summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.cirrus.yml33
-rw-r--r--.pre-commit-config.yaml2
-rw-r--r--Makefile16
-rw-r--r--RELEASE_PROCESS.md269
-rw-r--r--changelog.txt582
-rw-r--r--cmd/podman/common/create_opts.go39
-rw-r--r--cmd/podman/common/netflags.go2
-rw-r--r--cmd/podman/common/volumes.go36
-rw-r--r--cmd/podman/containers/cp.go54
-rw-r--r--cmd/podman/containers/create.go6
-rw-r--r--cmd/podman/images/build.go96
-rw-r--r--cmd/podman/root.go19
-rwxr-xr-xcontrib/cirrus/runner.sh4
-rwxr-xr-xcontrib/cirrus/setup_environment.sh1
-rw-r--r--contrib/spec/podman.spec.in2
-rw-r--r--docs/source/markdown/podman-attach.1.md4
-rw-r--r--docs/source/markdown/podman-build.1.md33
-rw-r--r--docs/source/markdown/podman-container-checkpoint.1.md4
-rw-r--r--docs/source/markdown/podman-container-cleanup.1.md4
-rw-r--r--docs/source/markdown/podman-container-restore.1.md4
-rw-r--r--docs/source/markdown/podman-container-runlabel.1.md2
-rw-r--r--docs/source/markdown/podman-cp.1.md2
-rw-r--r--docs/source/markdown/podman-create.1.md6
-rw-r--r--docs/source/markdown/podman-diff.1.md4
-rw-r--r--docs/source/markdown/podman-exec.1.md4
-rw-r--r--docs/source/markdown/podman-generate-systemd.1.md2
-rw-r--r--docs/source/markdown/podman-generate.1.md2
-rw-r--r--docs/source/markdown/podman-image-sign.1.md4
-rw-r--r--docs/source/markdown/podman-image-trust.1.md2
-rw-r--r--docs/source/markdown/podman-init.1.md4
-rw-r--r--docs/source/markdown/podman-inspect.1.md4
-rw-r--r--docs/source/markdown/podman-kill.1.md4
-rw-r--r--docs/source/markdown/podman-login.1.md2
-rw-r--r--docs/source/markdown/podman-logs.1.md4
-rw-r--r--docs/source/markdown/podman-manifest-add.1.md2
-rw-r--r--docs/source/markdown/podman-manifest-push.1.md2
-rw-r--r--docs/source/markdown/podman-mount.1.md4
-rw-r--r--docs/source/markdown/podman-network-reload.1.md4
-rw-r--r--docs/source/markdown/podman-play-kube.1.md4
-rw-r--r--docs/source/markdown/podman-pod-inspect.1.md4
-rw-r--r--docs/source/markdown/podman-pod-kill.1.md4
-rw-r--r--docs/source/markdown/podman-pod-pause.1.md4
-rw-r--r--docs/source/markdown/podman-pod-ps.1.md4
-rw-r--r--docs/source/markdown/podman-pod-restart.1.md4
-rw-r--r--docs/source/markdown/podman-pod-rm.1.md4
-rw-r--r--docs/source/markdown/podman-pod-start.1.md4
-rw-r--r--docs/source/markdown/podman-pod-stats.1.md4
-rw-r--r--docs/source/markdown/podman-pod-stop.1.md4
-rw-r--r--docs/source/markdown/podman-pod-top.1.md4
-rw-r--r--docs/source/markdown/podman-pod-unpause.1.md4
-rw-r--r--docs/source/markdown/podman-port.1.md4
-rw-r--r--docs/source/markdown/podman-ps.1.md4
-rw-r--r--docs/source/markdown/podman-pull.1.md2
-rw-r--r--docs/source/markdown/podman-push.1.md8
-rw-r--r--docs/source/markdown/podman-restart.1.md4
-rw-r--r--docs/source/markdown/podman-rm.1.md4
-rw-r--r--docs/source/markdown/podman-run.1.md6
-rw-r--r--docs/source/markdown/podman-start.1.md4
-rw-r--r--docs/source/markdown/podman-stats.1.md4
-rw-r--r--docs/source/markdown/podman-stop.1.md4
-rw-r--r--docs/source/markdown/podman-system-service.1.md6
-rw-r--r--docs/source/markdown/podman-top.1.md4
-rw-r--r--docs/source/markdown/podman-unmount.1.md4
-rw-r--r--docs/source/markdown/podman-wait.1.md3
-rw-r--r--docs/source/markdown/podman.1.md8
-rw-r--r--go.mod20
-rw-r--r--go.sum83
-rwxr-xr-xhack/podman-socat4
-rw-r--r--libpod/boltdb_state.go102
-rw-r--r--libpod/boltdb_state_internal.go4
-rw-r--r--libpod/container.go6
-rw-r--r--libpod/container_api.go73
-rw-r--r--libpod/container_copy_linux.go264
-rw-r--r--libpod/container_copy_unsupported.go16
-rw-r--r--libpod/container_internal.go71
-rw-r--r--libpod/container_internal_linux.go217
-rw-r--r--libpod/container_log.go1
-rw-r--r--libpod/container_log_linux.go1
-rw-r--r--libpod/container_path_resolution.go23
-rw-r--r--libpod/container_stat_linux.go181
-rw-r--r--libpod/container_stat_unsupported.go13
-rw-r--r--libpod/define/fileinfo.go16
-rw-r--r--libpod/define/mount.go12
-rw-r--r--libpod/define/version.go6
-rw-r--r--libpod/image/image.go33
-rw-r--r--libpod/image/image_test.go8
-rw-r--r--libpod/image/layer_tree.go8
-rw-r--r--libpod/image/pull.go26
-rw-r--r--libpod/in_memory_state.go40
-rw-r--r--libpod/kube.go12
-rw-r--r--libpod/networking_linux.go2
-rw-r--r--libpod/oci_conmon_linux.go20
-rw-r--r--libpod/options.go11
-rw-r--r--libpod/rootless_cni_linux.go2
-rw-r--r--libpod/runtime_ctr.go126
-rw-r--r--libpod/runtime_img.go12
-rw-r--r--libpod/runtime_img_test.go4
-rw-r--r--libpod/runtime_pod_infra_linux.go2
-rw-r--r--libpod/state.go13
-rw-r--r--libpod/storage.go5
-rw-r--r--nix/nixpkgs.json8
-rw-r--r--pkg/api/handlers/compat/containers.go39
-rw-r--r--pkg/api/handlers/compat/containers_stop.go4
-rw-r--r--pkg/api/handlers/compat/images.go123
-rw-r--r--pkg/api/handlers/compat/images_build.go56
-rw-r--r--pkg/api/handlers/compat/images_push.go121
-rw-r--r--pkg/api/handlers/compat/networks.go18
-rw-r--r--pkg/api/handlers/compat/secrets.go32
-rw-r--r--pkg/api/handlers/compat/version.go9
-rw-r--r--pkg/api/handlers/libpod/copy.go12
-rw-r--r--pkg/api/handlers/libpod/images.go1
-rw-r--r--pkg/api/handlers/libpod/images_pull.go3
-rw-r--r--pkg/api/handlers/utils/handler.go45
-rw-r--r--pkg/api/handlers/utils/handler_test.go5
-rw-r--r--pkg/api/server/handler_api.go7
-rw-r--r--pkg/api/server/register_archive.go2
-rw-r--r--pkg/api/server/register_pods.go1
-rw-r--r--pkg/api/server/register_secrets.go4
-rw-r--r--pkg/autoupdate/autoupdate.go1
-rw-r--r--pkg/bindings/bindings.go26
-rw-r--r--pkg/bindings/connection.go10
-rw-r--r--pkg/bindings/images/build.go28
-rw-r--r--pkg/bindings/test/attach_test.go2
-rw-r--r--pkg/bindings/test/common_test.go4
-rw-r--r--pkg/bindings/test/containers_test.go90
-rw-r--r--pkg/bindings/test/exec_test.go5
-rw-r--r--pkg/bindings/test/images_test.go2
-rw-r--r--pkg/bindings/test/info_test.go6
-rw-r--r--pkg/bindings/test/pods_test.go6
-rw-r--r--pkg/bindings/test/system_test.go17
-rw-r--r--pkg/checkpoint/checkpoint_restore.go36
-rw-r--r--pkg/checkpoint/crutils/checkpoint_restore_utils.go191
-rw-r--r--pkg/copy/fileinfo.go11
-rw-r--r--pkg/domain/entities/containers.go3
-rw-r--r--pkg/domain/entities/engine.go33
-rw-r--r--pkg/domain/entities/images.go2
-rw-r--r--pkg/domain/entities/secrets.go23
-rw-r--r--pkg/domain/infra/abi/archive.go163
-rw-r--r--pkg/domain/infra/abi/containers.go4
-rw-r--r--pkg/domain/infra/abi/containers_stat.go127
-rw-r--r--pkg/domain/infra/abi/images.go16
-rw-r--r--pkg/domain/infra/abi/network.go10
-rw-r--r--pkg/domain/infra/abi/play.go2
-rw-r--r--pkg/registries/registries.go5
-rw-r--r--pkg/specgen/generate/config_linux.go5
-rw-r--r--pkg/specgen/generate/oci.go30
-rw-r--r--pkg/specgen/generate/storage.go27
-rw-r--r--pkg/terminal/util.go4
-rw-r--r--pkg/tracing/tracing.go29
-rw-r--r--test/apiv2/01-basic.at2
-rw-r--r--test/apiv2/10-images.at4
-rw-r--r--test/apiv2/12-imagesMore.at4
-rw-r--r--test/apiv2/20-containers.at31
-rw-r--r--test/apiv2/30-volumes.at35
-rw-r--r--test/apiv2/35-networks.at66
-rw-r--r--test/apiv2/44-mounts.at21
-rw-r--r--test/apiv2/45-system.at8
-rw-r--r--test/apiv2/50-secrets.at13
-rw-r--r--test/apiv2/60-auth.at29
-rw-r--r--test/apiv2/rest_api/__init__.py4
-rw-r--r--test/apiv2/rest_api/test_rest_v2_0_0.py49
-rwxr-xr-xtest/apiv2/test-apiv2122
-rw-r--r--test/e2e/build_test.go16
-rw-r--r--test/e2e/config/containers.conf1
-rw-r--r--test/e2e/containers_conf_test.go22
-rw-r--r--test/e2e/generate_kube_test.go32
-rw-r--r--test/e2e/libpod_suite_remote_test.go6
-rw-r--r--test/e2e/libpod_suite_test.go6
-rw-r--r--test/e2e/login_logout_test.go8
-rw-r--r--test/e2e/logs_test.go110
-rw-r--r--test/e2e/network_connect_disconnect_test.go7
-rw-r--r--test/e2e/network_test.go23
-rw-r--r--test/e2e/rename_test.go21
-rw-r--r--test/e2e/run_selinux_test.go49
-rw-r--r--test/e2e/run_test.go36
-rw-r--r--test/e2e/stop_test.go4
-rw-r--r--test/python/__init__.py0
-rw-r--r--test/python/docker/__init__.py16
-rw-r--r--test/python/docker/build_labels/Dockerfile1
-rw-r--r--test/python/docker/compat/README.md (renamed from test/python/docker/README.md)12
-rw-r--r--test/python/docker/compat/__init__.py0
-rw-r--r--test/python/docker/compat/common.py (renamed from test/python/docker/common.py)6
-rw-r--r--test/python/docker/compat/constant.py (renamed from test/python/docker/constant.py)0
-rw-r--r--test/python/docker/compat/test_containers.py (renamed from test/python/docker/test_containers.py)11
-rw-r--r--test/python/docker/compat/test_images.py (renamed from test/python/docker/test_images.py)17
-rw-r--r--test/python/docker/compat/test_system.py (renamed from test/python/docker/test_system.py)3
-rw-r--r--test/python/requirements.txt6
-rw-r--r--test/system/030-run.bats4
-rw-r--r--test/system/050-stop.bats2
-rw-r--r--test/system/065-cp.bats315
-rw-r--r--test/system/070-build.bats118
-rw-r--r--test/system/120-load.bats7
-rw-r--r--test/system/410-selinux.bats21
-rwxr-xr-xtest/system/build-testimage52
-rw-r--r--test/system/helpers.bash9
-rw-r--r--test/upgrade/README.md87
-rw-r--r--test/upgrade/helpers.bash11
-rw-r--r--test/upgrade/test-upgrade.bats313
-rw-r--r--vendor/github.com/checkpoint-restore/checkpointctl/LICENSE (renamed from vendor/github.com/opentracing/opentracing-go/LICENSE)2
-rw-r--r--vendor/github.com/checkpoint-restore/checkpointctl/lib/metadata.go221
-rw-r--r--vendor/github.com/containers/buildah/add.go26
-rw-r--r--vendor/github.com/containers/buildah/buildah.go2
-rw-r--r--vendor/github.com/containers/buildah/copier/copier.go107
-rw-r--r--vendor/github.com/containers/buildah/copier/syscall_unix.go8
-rw-r--r--vendor/github.com/containers/buildah/pkg/overlay/overlay.go10
-rw-r--r--vendor/github.com/coreos/go-iptables/iptables/iptables.go112
-rw-r--r--vendor/github.com/coreos/go-systemd/v22/activation/files_unix.go (renamed from vendor/github.com/coreos/go-systemd/v22/activation/files.go)2
-rw-r--r--vendor/github.com/coreos/go-systemd/v22/activation/files_windows.go (renamed from vendor/github.com/uber/jaeger-client-go/rpcmetrics/doc.go)13
-rw-r--r--vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go45
-rw-r--r--vendor/github.com/coreos/go-systemd/v22/dbus/methods.go306
-rw-r--r--vendor/github.com/coreos/go-systemd/v22/journal/journal.go179
-rw-r--r--vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go208
-rw-r--r--vendor/github.com/coreos/go-systemd/v22/journal/journal_windows.go35
-rw-r--r--vendor/github.com/coreos/go-systemd/v22/sdjournal/journal.go50
-rw-r--r--vendor/github.com/cri-o/ocicni/pkg/ocicni/ocicni.go33
-rw-r--r--vendor/github.com/nxadm/tail/.gitignore3
-rw-r--r--vendor/github.com/nxadm/tail/.travis.yml16
-rw-r--r--vendor/github.com/nxadm/tail/CHANGES.md12
-rw-r--r--vendor/github.com/nxadm/tail/README.md40
-rw-r--r--vendor/github.com/nxadm/tail/appveyor.yml11
-rw-r--r--vendor/github.com/nxadm/tail/go.mod3
-rw-r--r--vendor/github.com/nxadm/tail/go.sum8
-rw-r--r--vendor/github.com/nxadm/tail/tail.go73
-rw-r--r--vendor/github.com/nxadm/tail/tail_posix.go6
-rw-r--r--vendor/github.com/nxadm/tail/tail_windows.go9
-rw-r--r--vendor/github.com/nxadm/tail/util/util.go1
-rw-r--r--vendor/github.com/nxadm/tail/watch/filechanges.go1
-rw-r--r--vendor/github.com/nxadm/tail/watch/inotify.go1
-rw-r--r--vendor/github.com/nxadm/tail/watch/inotify_tracker.go1
-rw-r--r--vendor/github.com/nxadm/tail/watch/polling.go1
-rw-r--r--vendor/github.com/nxadm/tail/watch/watch.go1
-rw-r--r--vendor/github.com/nxadm/tail/winfile/winfile.go1
-rw-r--r--vendor/github.com/onsi/ginkgo/.travis.yml4
-rw-r--r--vendor/github.com/onsi/ginkgo/CHANGELOG.md5
-rw-r--r--vendor/github.com/onsi/ginkgo/README.md1
-rw-r--r--vendor/github.com/onsi/ginkgo/config/config.go2
-rw-r--r--vendor/github.com/onsi/ginkgo/go.mod7
-rw-r--r--vendor/github.com/onsi/ginkgo/go.sum3
-rw-r--r--vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go9
-rw-r--r--vendor/github.com/onsi/gomega/.travis.yml18
-rw-r--r--vendor/github.com/onsi/gomega/CHANGELOG.md13
-rw-r--r--vendor/github.com/onsi/gomega/Dockerfile1
-rw-r--r--vendor/github.com/onsi/gomega/Makefile37
-rw-r--r--vendor/github.com/onsi/gomega/docker-compose.yaml10
-rw-r--r--vendor/github.com/onsi/gomega/format/format.go41
-rw-r--r--vendor/github.com/onsi/gomega/gexec/build.go150
-rw-r--r--vendor/github.com/onsi/gomega/go.mod4
-rw-r--r--vendor/github.com/onsi/gomega/go.sum18
-rw-r--r--vendor/github.com/onsi/gomega/gomega_dsl.go2
-rw-r--r--vendor/github.com/onsi/gomega/matchers/be_element_of_matcher.go22
-rw-r--r--vendor/github.com/onsi/gomega/matchers/with_transform.go19
-rw-r--r--vendor/github.com/opentracing/opentracing-go/.gitignore1
-rw-r--r--vendor/github.com/opentracing/opentracing-go/.travis.yml20
-rw-r--r--vendor/github.com/opentracing/opentracing-go/CHANGELOG.md63
-rw-r--r--vendor/github.com/opentracing/opentracing-go/Makefile20
-rw-r--r--vendor/github.com/opentracing/opentracing-go/README.md171
-rw-r--r--vendor/github.com/opentracing/opentracing-go/ext.go24
-rw-r--r--vendor/github.com/opentracing/opentracing-go/ext/field.go17
-rw-r--r--vendor/github.com/opentracing/opentracing-go/ext/tags.go215
-rw-r--r--vendor/github.com/opentracing/opentracing-go/globaltracer.go42
-rw-r--r--vendor/github.com/opentracing/opentracing-go/go.mod5
-rw-r--r--vendor/github.com/opentracing/opentracing-go/go.sum7
-rw-r--r--vendor/github.com/opentracing/opentracing-go/gocontext.go65
-rw-r--r--vendor/github.com/opentracing/opentracing-go/log/field.go282
-rw-r--r--vendor/github.com/opentracing/opentracing-go/log/util.go61
-rw-r--r--vendor/github.com/opentracing/opentracing-go/noop.go64
-rw-r--r--vendor/github.com/opentracing/opentracing-go/propagation.go176
-rw-r--r--vendor/github.com/opentracing/opentracing-go/span.go189
-rw-r--r--vendor/github.com/opentracing/opentracing-go/tracer.go304
-rw-r--r--vendor/github.com/rootless-containers/rootlesskit/pkg/api/api.go36
-rw-r--r--vendor/github.com/rootless-containers/rootlesskit/pkg/api/openapi.yaml161
-rw-r--r--vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/child/child.go15
-rw-r--r--vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/msg/msg.go2
-rw-r--r--vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/parent.go13
-rw-r--r--vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/tcp/tcp.go2
-rw-r--r--vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/udp/udp.go4
-rw-r--r--vendor/github.com/rootless-containers/rootlesskit/pkg/port/port.go12
-rw-r--r--vendor/github.com/rootless-containers/rootlesskit/pkg/port/portutil/portutil.go5
-rw-r--r--vendor/github.com/uber/jaeger-client-go/.gitignore15
-rw-r--r--vendor/github.com/uber/jaeger-client-go/.gitmodules3
-rw-r--r--vendor/github.com/uber/jaeger-client-go/.travis.yml56
-rw-r--r--vendor/github.com/uber/jaeger-client-go/CHANGELOG.md351
-rw-r--r--vendor/github.com/uber/jaeger-client-go/CONTRIBUTING.md170
-rw-r--r--vendor/github.com/uber/jaeger-client-go/DCO37
-rw-r--r--vendor/github.com/uber/jaeger-client-go/Gopkg.lock401
-rw-r--r--vendor/github.com/uber/jaeger-client-go/Gopkg.toml31
-rw-r--r--vendor/github.com/uber/jaeger-client-go/Makefile134
-rw-r--r--vendor/github.com/uber/jaeger-client-go/README.md324
-rw-r--r--vendor/github.com/uber/jaeger-client-go/RELEASE.md12
-rw-r--r--vendor/github.com/uber/jaeger-client-go/baggage_setter.go77
-rw-r--r--vendor/github.com/uber/jaeger-client-go/config/config.go434
-rw-r--r--vendor/github.com/uber/jaeger-client-go/config/config_env.go259
-rw-r--r--vendor/github.com/uber/jaeger-client-go/config/options.go165
-rw-r--r--vendor/github.com/uber/jaeger-client-go/constants.go106
-rw-r--r--vendor/github.com/uber/jaeger-client-go/contrib_observer.go56
-rw-r--r--vendor/github.com/uber/jaeger-client-go/doc.go24
-rw-r--r--vendor/github.com/uber/jaeger-client-go/glide.lock98
-rw-r--r--vendor/github.com/uber/jaeger-client-go/glide.yaml28
-rw-r--r--vendor/github.com/uber/jaeger-client-go/header.go65
-rw-r--r--vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/options.go101
-rw-r--r--vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/restriction_manager.go157
-rw-r--r--vendor/github.com/uber/jaeger-client-go/internal/baggage/restriction_manager.go71
-rw-r--r--vendor/github.com/uber/jaeger-client-go/internal/reporterstats/stats.go25
-rw-r--r--vendor/github.com/uber/jaeger-client-go/internal/spanlog/json.go81
-rw-r--r--vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/options.go99
-rw-r--r--vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/throttler.go216
-rw-r--r--vendor/github.com/uber/jaeger-client-go/internal/throttler/throttler.go32
-rw-r--r--vendor/github.com/uber/jaeger-client-go/interop.go55
-rw-r--r--vendor/github.com/uber/jaeger-client-go/jaeger_tag.go84
-rw-r--r--vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go181
-rw-r--r--vendor/github.com/uber/jaeger-client-go/logger.go53
-rw-r--r--vendor/github.com/uber/jaeger-client-go/metrics.go119
-rw-r--r--vendor/github.com/uber/jaeger-client-go/observer.go88
-rw-r--r--vendor/github.com/uber/jaeger-client-go/process.go29
-rw-r--r--vendor/github.com/uber/jaeger-client-go/propagation.go313
-rw-r--r--vendor/github.com/uber/jaeger-client-go/reference.go23
-rw-r--r--vendor/github.com/uber/jaeger-client-go/reporter.go322
-rw-r--r--vendor/github.com/uber/jaeger-client-go/reporter_options.go71
-rw-r--r--vendor/github.com/uber/jaeger-client-go/rpcmetrics/README.md5
-rw-r--r--vendor/github.com/uber/jaeger-client-go/rpcmetrics/endpoints.go63
-rw-r--r--vendor/github.com/uber/jaeger-client-go/rpcmetrics/metrics.go124
-rw-r--r--vendor/github.com/uber/jaeger-client-go/rpcmetrics/normalizer.go101
-rw-r--r--vendor/github.com/uber/jaeger-client-go/rpcmetrics/observer.go171
-rw-r--r--vendor/github.com/uber/jaeger-client-go/sampler.go516
-rw-r--r--vendor/github.com/uber/jaeger-client-go/sampler_remote.go337
-rw-r--r--vendor/github.com/uber/jaeger-client-go/sampler_remote_options.go162
-rw-r--r--vendor/github.com/uber/jaeger-client-go/sampler_v2.go93
-rw-r--r--vendor/github.com/uber/jaeger-client-go/span.go487
-rw-r--r--vendor/github.com/uber/jaeger-client-go/span_allocator.go56
-rw-r--r--vendor/github.com/uber/jaeger-client-go/span_context.go387
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggagerestrictionmanager.go435
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/constants.go18
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/ttypes.go154
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/constants.go18
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/samplingmanager.go410
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/ttypes.go873
-rw-r--r--vendor/github.com/uber/jaeger-client-go/tracer.go491
-rw-r--r--vendor/github.com/uber/jaeger-client-go/tracer_options.go182
-rw-r--r--vendor/github.com/uber/jaeger-client-go/transport.go38
-rw-r--r--vendor/github.com/uber/jaeger-client-go/transport/doc.go23
-rw-r--r--vendor/github.com/uber/jaeger-client-go/transport/http.go174
-rw-r--r--vendor/github.com/uber/jaeger-client-go/transport_udp.go193
-rw-r--r--vendor/github.com/uber/jaeger-client-go/zipkin.go77
-rw-r--r--vendor/github.com/uber/jaeger-client-go/zipkin_thrift_span.go329
-rw-r--r--vendor/github.com/uber/jaeger-lib/LICENSE201
-rw-r--r--vendor/github.com/uber/jaeger-lib/metrics/counter.go28
-rw-r--r--vendor/github.com/uber/jaeger-lib/metrics/factory.go78
-rw-r--r--vendor/github.com/uber/jaeger-lib/metrics/gauge.go28
-rw-r--r--vendor/github.com/uber/jaeger-lib/metrics/histogram.go28
-rw-r--r--vendor/github.com/uber/jaeger-lib/metrics/keys.go35
-rw-r--r--vendor/github.com/uber/jaeger-lib/metrics/metrics.go137
-rw-r--r--vendor/github.com/uber/jaeger-lib/metrics/stopwatch.go43
-rw-r--r--vendor/github.com/uber/jaeger-lib/metrics/timer.go33
-rw-r--r--vendor/go.uber.org/atomic/.codecov.yml19
-rw-r--r--vendor/go.uber.org/atomic/.gitignore12
-rw-r--r--vendor/go.uber.org/atomic/.travis.yml27
-rw-r--r--vendor/go.uber.org/atomic/CHANGELOG.md76
-rw-r--r--vendor/go.uber.org/atomic/LICENSE.txt19
-rw-r--r--vendor/go.uber.org/atomic/Makefile78
-rw-r--r--vendor/go.uber.org/atomic/README.md63
-rw-r--r--vendor/go.uber.org/atomic/bool.go81
-rw-r--r--vendor/go.uber.org/atomic/bool_ext.go53
-rw-r--r--vendor/go.uber.org/atomic/doc.go23
-rw-r--r--vendor/go.uber.org/atomic/duration.go82
-rw-r--r--vendor/go.uber.org/atomic/duration_ext.go40
-rw-r--r--vendor/go.uber.org/atomic/error.go51
-rw-r--r--vendor/go.uber.org/atomic/error_ext.go39
-rw-r--r--vendor/go.uber.org/atomic/float64.go76
-rw-r--r--vendor/go.uber.org/atomic/float64_ext.go47
-rw-r--r--vendor/go.uber.org/atomic/gen.go26
-rw-r--r--vendor/go.uber.org/atomic/go.mod8
-rw-r--r--vendor/go.uber.org/atomic/go.sum9
-rw-r--r--vendor/go.uber.org/atomic/int32.go102
-rw-r--r--vendor/go.uber.org/atomic/int64.go102
-rw-r--r--vendor/go.uber.org/atomic/nocmp.go35
-rw-r--r--vendor/go.uber.org/atomic/string.go54
-rw-r--r--vendor/go.uber.org/atomic/string_ext.go43
-rw-r--r--vendor/go.uber.org/atomic/uint32.go102
-rw-r--r--vendor/go.uber.org/atomic/uint64.go102
-rw-r--r--vendor/go.uber.org/atomic/value.go31
-rw-r--r--vendor/k8s.io/client-go/LICENSE202
-rw-r--r--vendor/k8s.io/client-go/util/homedir/homedir.go47
-rw-r--r--vendor/modules.txt43
-rw-r--r--version/version.go39
384 files changed, 5982 insertions, 17102 deletions
diff --git a/.cirrus.yml b/.cirrus.yml
index db2e86d83..4e7893e3b 100644
--- a/.cirrus.yml
+++ b/.cirrus.yml
@@ -598,6 +598,38 @@ rootless_system_test_task:
main_script: *main
always: *logs_artifacts
+# FIXME: we may want to consider running this from nightly cron instead of CI.
+# The tests are actually pretty quick (less than a minute) but they do rely
+# on pulling images from quay.io, which means we're subject to network flakes.
+#
+# FIXME: how does this env matrix work, anyway? Does it spin up multiple VMs?
+# We might just want to encode the version matrix in runner.sh instead
+upgrade_test_task:
+ name: "Upgrade test: from $PODMAN_UPGRADE_FROM"
+ alias: upgrade_test
+ skip: *tags
+ only_if: *not_docs
+ depends_on:
+ - local_system_test
+ matrix:
+ - env:
+ PODMAN_UPGRADE_FROM: v1.9.0
+ - env:
+ PODMAN_UPGRADE_FROM: v2.0.6
+ - env:
+ PODMAN_UPGRADE_FROM: v2.1.1
+ gce_instance: *standardvm
+ env:
+ TEST_FLAVOR: upgrade_test
+ DISTRO_NV: ${FEDORA_NAME}
+ VM_IMAGE_NAME: ${FEDORA_CACHE_IMAGE_NAME}
+ # ID for re-use of build output
+ _BUILD_CACHE_HANDLE: ${FEDORA_NAME}-build-${CIRRUS_BUILD_ID}
+ clone_script: *noop
+ gopath_cache: *ro_gopath_cache
+ setup_script: *setup
+ main_script: *main
+ always: *logs_artifacts
# This task is critical. It updates the "last-used by" timestamp stored
# in metadata for all VM images. This mechanism functions in tandem with
@@ -654,6 +686,7 @@ success_task:
- local_system_test
- remote_system_test
- rootless_system_test
+ - upgrade_test
- meta
container: *smallcontainer
env:
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 4de8e4c12..f44b0ea42 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -2,7 +2,7 @@
exclude: ^vendor/
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks.git
- rev: v2.4.0
+ rev: v3.4.0
hooks:
- id: end-of-file-fixer
- id: trailing-whitespace
diff --git a/Makefile b/Makefile
index 4c2a4047b..14e428397 100644
--- a/Makefile
+++ b/Makefile
@@ -543,14 +543,21 @@ install.cni:
install ${SELINUXOPT} -m 644 cni/87-podman-bridge.conflist ${DESTDIR}${ETCDIR}/cni/net.d/87-podman-bridge.conflist
.PHONY: install.docker
-install.docker: docker-docs
- install ${SELINUXOPT} -d -m 755 $(DESTDIR)$(BINDIR) $(DESTDIR)$(MANDIR)/man1
+install.docker:
install ${SELINUXOPT} -m 755 docker $(DESTDIR)$(BINDIR)/docker
- install ${SELINUXOPT} -m 644 docs/build/man/docker*.1 -t $(DESTDIR)$(MANDIR)/man1
install ${SELINUXOPT} -m 755 -d ${DESTDIR}${SYSTEMDDIR} ${DESTDIR}${USERSYSTEMDDIR} ${DESTDIR}${TMPFILESDIR}
install ${SELINUXOPT} -m 644 contrib/systemd/system/podman-docker.conf -t ${DESTDIR}${TMPFILESDIR}
+.PHONY: install.docker-docs-nobuild
+install.docker-docs-nobuild:
+ install ${SELINUXOPT} -d -m 755 $(DESTDIR)$(BINDIR) $(DESTDIR)$(MANDIR)/man1
+ install ${SELINUXOPT} -m 644 docs/build/man/docker*.1 -t $(DESTDIR)$(MANDIR)/man1
+
+.PHONY: install.docker-docs
+install.docker-docs: docker-docs install.docker-docs-nobuild
+
.PHONY: install.systemd
+ifneq (,$(findstring systemd,$(BUILDTAGS)))
install.systemd:
install ${SELINUXOPT} -m 755 -d ${DESTDIR}${SYSTEMDDIR} ${DESTDIR}${USERSYSTEMDDIR}
# User services
@@ -563,6 +570,9 @@ install.systemd:
install ${SELINUXOPT} -m 644 contrib/systemd/auto-update/podman-auto-update.timer ${DESTDIR}${SYSTEMDDIR}/podman-auto-update.timer
install ${SELINUXOPT} -m 644 contrib/systemd/system/podman.socket ${DESTDIR}${SYSTEMDDIR}/podman.socket
install ${SELINUXOPT} -m 644 contrib/systemd/system/podman.service ${DESTDIR}${SYSTEMDDIR}/podman.service
+else
+install.systemd:
+endif
.PHONY: uninstall
uninstall:
diff --git a/RELEASE_PROCESS.md b/RELEASE_PROCESS.md
new file mode 100644
index 000000000..52c08c3f2
--- /dev/null
+++ b/RELEASE_PROCESS.md
@@ -0,0 +1,269 @@
+# Podman Releases
+
+## Overview
+
+Podman (and podman-remote) versioning is mostly based on [semantic-versioning
+standards](https://semver.org).
+Significant versions
+are tagged, including *release candidates* (`rc`).
+All relevant **minor** releases (`vX.Y`) have their own branches. The **latest**
+development efforts occur on the *master* branch. Branches with a
+*rhel* suffix are use for long-term support of downstream RHEL releases.
+
+## Release workflow expectations
+
+* You have push access to the [upstream podman repository](https://github.com/containers/podman.git)
+* You understand all basic `git` operations and concepts, like creating commits,
+ local vs. remote branches, rebasing, and conflict resolution.
+* You have access to your public and private *GPG* keys.
+* You have reliable internet access (i.e. not the public WiFi link at McDonalds)
+* Other podman maintainers are online/available for assistance if needed.
+* For a **major** release, you have 4-8 hours of time available, most of which will
+ be dedicated to writing release notes.
+* For a **minor** or **patch** release, you have 2-4 hours of time available
+ (minimum depends largely on the speed/reliability of automated testing)
+
+# Releases
+
+## Major (***X***.y.z) release
+
+These releases always begin from *master*, and are contained in a branch
+named with the **major** and **minor** version. **Major** release branches
+begin in a *release candidate* phase, with prospective release tags being
+created with an `-rc` suffix. There may be multiple *release candidate*
+tags before the final/official **major** version is tagged and released.
+
+## Significant minor (x.**Y**.z) and patch (x.y.**Z**) releases
+
+Significant **minor** and **patch** level releases are normally
+branched from *master*, but there are occsaional exceptions.
+Additionally, these branches may be named with `-rhel` (or another)
+suffix to signify a specialized purpose. For example, `-rhel` indicates
+a release intended for downstream *RHEL* consumption.
+
+## Unreleased Milestones
+
+Non-release versions may occasionally appear tagged on a branch, without
+the typical (major) receive media postings or artifact distribution. For
+example, as required for the (separate) RHEL release process. Otherwise
+these tags are simply milestones of reference purposes and may
+generally be safely ignored.
+
+## Process
+
+***Note:*** This is intended as a guideline, and generalized process.
+Not all steps are applicable in all situations. Not all steps are
+spelled with complete minutiae.
+
+1. Make a `[CI:DOCS]` release notes pull request.
+
+ 1. Ensure any/all intended PR's are completed and merged prior to any
+ processing of release notes. Ensure your local clone is fully up to
+ date with the remote upstream (`git remote update`).
+ 1. Check out (create) a local working branch for a release-notes PR,
+ based on the latest `upstream/master` or pre-existing version-named
+ branch - for example, if this is an additional *release-candidate*
+ you might use `vX.Y.Z-rc2`; **Note** this is a local branch name,
+ an upstream branch would never contain the `-rc?` suffix.
+ 1. Find all merged PRs since the last release, which were performed by
+ the merge-robot. For example, given the commit range `1234...5678`
+ you would run `git log --oneline --author=openshift-merge-robot 1234...5678`.
+ Keep this list open/available for reference as you edit.
+ 1. Edit `RELEASE_NOTES.md`
+
+ * If operating on a *release-candidate*, be sure to remove any
+ not-applicable items/sections. For example, those brought in
+ because of backports.
+ * Add/update the version-section of with sub-sections for *Features*
+ (new functionality), *Changes* (Altered podman behaviors),
+ *Bugfixes* (self-explanatory), *API* (All related features,
+ changes, and bugfixes), and *Misc* (include any **major**
+ library bumps, e.g. `c/buildah`, `c/storage`, `c/common`, etc).
+ * Use your merge-bot reference PR-listing to examine each PR in turn,
+ adding an entry for it into the appropriate section.
+
+ * Be sure to link any issue the PR fixed.
+ * Do not include any PRs that are only documentation or test/automation
+ changes.
+ * Do not include any PRs that fix bugs which we introduced due to
+ new features/enhancements. In other words, if it was working, broke, then
+ got fixed, there's no need to mention those items.
+
+ 1. Commit and **sign** the `RELEASE_NOTES.md` changes, using the description
+ `Create release notes for vX.Y.Z` (where `X`, `Y`, and `Z` are the
+ actual version numbers).
+ 1. Push your working branch to your github fork and create a new pull request.
+
+ * ***Ensure*** you properly select the base branch if not *master*.
+ For example, `vX.y.Z`.
+ * ***Before submitting*** the new PR, update the title with the
+ prefix `[CI:DOCS]` to avoid triggering lengthy automated testing.
+
+ 1. If this is a release on a pre-existing version-named branch
+ (e.x. *release-candidate* or `-rhel`), open another PR against
+ the upstream *master* branch. This is needed to ensure the new
+ notes are present for future releases.
+
+
+1. Create a new upstream release branch (if none already exist).
+
+ 1. After the release-notes pull requests have merged, a release branch is
+ needed. Branching ensures all changes are curated before inclusion in the
+ release, and no new features land after the *release-candidate* phases
+ are complete.
+ 1. Ensure your local clone is fully up to date with the remote upstream
+ (`git remote update`). Switch to this branch (`git checkout upstream/master`).
+ 1. Make a new local branch for the release based on *master*. For example,
+ `git checkout -b vX.Y`. Where `X.Y` represent the complete release
+ version-name, including any suffix (if any) like `-rhel`. ***DO NOT***
+ include any `-rc` suffix in the branch name.
+ 1. Edit the `.cirrus.yml` file, changing the `DEST_BRANCH` value (under the
+ `env` section) to the new, complete branch name (e.x. `vX.Y`).
+ Commit and sign, using the description
+ `Cirrus: Update operating branch`.
+ 1. Push the new branch otherwise unmodified (`git push upstream vX.Y`).
+ 1. Automation will begin executing on the branch immediately. Because
+ the repository allows out-of-sequence PR merging, it is possible that
+ merge order introduced bugs/defects. To establish a clean
+ baseline, observe the initial CI run on the branch for any unexpected
+ failures. This can be done by going directly to
+ `https://cirrus-ci.com/github/containers/podman/vX.Y`
+ 1. If there are CI test or automation boops that need fixing on the branch,
+ attend to them using normal PR process (to *master* first, then backport
+ changes to the new branch). Ideally, CI should be "green" on the new
+ branch before proceeding.
+
+1. Create a new branch-verification Cirrus-Cron entry.
+
+ 1. This is to ensure CI's VM image timestamps are refreshed. Without this,
+ the VM images ***will*** be permanently pruned after 60 days of inactivity
+ and are hard/impossible to re-create accurately.
+ 1. Go to
+ [https://cirrus-ci.com/github/containers/podman](https://cirrus-ci.com/github/containers/podman)
+ and press the "gear" (Repository Settings) button on the top-right.
+ 1. At the bottom of the settings page is a table of cron-job names, branches,
+ schedule, and recent status. Below that is an editable new-entry line.
+ 1. Set the new job's `name` and `branch` to the name of new release branch.
+ 1. Set the `expression` using the form `X X X ? * 1-6` where 'X' is a number
+ between 0-23 and not already taken by another job in the table. The 1-hour
+ interval is used because it takes about that long for the job to run.
+ 1. Add the new job by pressing the `+` button on the right-side of the
+ new-entry line.
+
+1. Update version numbers and push tag
+
+ **TODO:** This process can be simplified by updating the script for the
+ "Optional Release Test" such that it tests the first commit, not the second.
+ In this way, pushing twice to the same PR won't be required.
+
+ 1. Assuming CI Test and automation ran clean on the release branch,
+ update your local repo to be fully up to date with the remote upstream
+ (`git remote update`). Check out a local copy of the upstream
+ release branch (`git checkout upstream/vX.Y`).
+ 1. Create a new local working-branch to develop the release PR,
+ `git checkout -b bump_vX.Y.Z`.
+ 1. Lookup the *COMMIT ID* of the last release,
+ `git log -1 $(git tag | sort -V | tail -1)`.
+ 1. Run `make changelog CHANGELOG_BASE=`*COMMIT ID*. This will modify the
+ `changelog.txt` file. Manually edit it to change the first line
+ (“Changelog for …”) to include the current (new) release version number.
+ For example, `- Changelog for v2.1.0 (2020-09-22):`.
+ 1. Edit `version/version.go` and bump the `Version` value to the new
+ release version. If there were API changes, also bump `APIVersion` value.
+ 1. Commit this and sign the commit (`git commit -a -s -S`). The commit message
+ should be `Bump to vX.Y.Z` (using the actual version numbers).
+ 1. Push this single change to your github fork, and make a new PR,
+ **being careful** to select the proper release branch as its base.
+ 1. Wait for all automated tests pass (including on an RC-branch PR). Re-running
+ and/or updating code as needed.
+ 1. In the PR, under the *Checks* tab, locate and clock on the Cirrus-CI
+ task `Optional Release Test`. In the right-hand window pane, click
+ the `trigger` button and wait for the test to go green. *This is a
+ critical step* which confirms the commit is worthy of becoming a release.
+ 1. Tag the `Bump to vX.Y.Z` commit as a release by running
+ `git tag -s -m 'vX.Y.Z' vX.Y.Z $HASH` where `$HASH` is specified explicitly
+ and carefully, to avoid (basically) unfixable accidents (if they are pushed).
+ 1. Change `version/version.go` again. This time, bump the **patch** version and
+ re-add the `-dev` suffix to indicate this is a non-released version of Podman.
+ 1. Change `contrib/spec/podman.spec.in`, bumping **patch** number of `Version`.
+ 1. Commit these changes with the message `Bump to X.Y.Z-dev`.
+ 1. Push your local branch to your github fork (and the PR) again.
+ 1. The PR should now have two commits that look very similar to
+ https://github.com/containers/podman/pull/7787
+ 1. Wait for at least all the "Build" and "Verify" (or similar) CI Testing
+ steps to complete successfully. No need to wait for complete integration
+ 4and system-testing (it was already done on substantially the same code, above).
+ 1. Merge the PR (or ask someone else to review and merge, to be safer).
+ 1. **Note:** This is the last point where any test-failures can be addressed
+ by code changes. After pushing the new version-tag upstream, no further
+ changes can be made to the code without lots of unpleasent efforts. Please
+ seek assistance if needed, before proceeding.
+
+ 1. Assuming the "Bump to ..." PR merged successfully, and you're **really**
+ confident the correct commit has been tagged, push it with
+ `git push upstream vX.Y.Z`
+
+1. Locate, Verify release testing is proceeding
+
+ 1. When the tag was pushed, an automated build was created. Locate this
+ by starting from
+ `https://github.com/containers/podman/tags` and finding the recent entry
+ for the pushed tag. Under the tag name will be a timestamp and abbrieviated
+ commit hash, for example `<> 5b2585f`. Click the commit-hash link.
+ 1. In the upper-left most corner, just to the left of the "Bump to vX.Y"
+ text, will be a small status icon (Yellow circle, Red "X", or green check).
+ Click this, to open a small pop-up/overlay window listing all the status
+ checks.
+ 1. In the small pop-up/overlay window, press the "Details" link on one of the
+ Cirrus-CI status check entries (doesn't matter which one).
+ 1. On the following page, in the lower-right pane, will be a "View more details
+ on Cirrus CI" link, click this.
+ 1. A Cirrus-CI task details page will open, click the button labeled
+ "View All Tasks".
+ 1. Keep this page open to monitor its progress and for use in future steps.
+
+1. Bump master `-dev` version
+
+ 1. If you made a release branch and bumped **major** or **minor** version
+ Complete the "Update version numbers and push tag" steps above on the
+ *master* branch. Bump the **minor** version and set the **patch**
+ version to 0. For example, after pushing the v2.2.0 release, *master*
+ should be set to v2.3.0-dev.
+ 1. Create a "Bump to vX.Y.Z-dev" commit with these changes.
+ 1. Bump the version number in `README.md` (still on on *master*)
+ to reflect the new release. Commit these changes.
+ 1. Create a PR with the above commits, and oversee it's merging.
+
+1. Create Github Release entry and upload assets
+
+ 1. Return to the Cirrus-CI Build page for the new release tag, confirm
+ (or wait for) it to complete, re-running any failed tasks as appropriate.
+ 1. For anything other than an RC, download the new release artifacts
+ (the binaries which were actually tested). Visit each of the
+ "Build for ...", "Static Build", and "... Cross" tasks.
+ 1. Under the "Artifacts" section of each task, click the "gosrc" item,
+ find and download the release archive (`zip`, `tar.gz` or `.msi`).
+ Save the the archive with a meaningful name, for example
+ `podman-v3.0.0.msi`.
+ 1. For the "Static Build" task, find the compiled `podman` and `podman-remote`
+ binaries under the "binary", "bin" links. Tar these files as
+ `podman-static.tar.gz`.
+ 1. In the directory where you downloaded the archives, run
+ `sha256sum *.tar.gz *.zip *.msi > shasums` to generate SHA sums.
+ 1. Go to `https://github.com/containers/podman/releases/tag/vX.Y.Z` and
+ press the "Edit Release" button. Change the name to the form `vX.Y.Z`
+ 1. If this is a release candidate be certain to click the pre-release
+ checkbox at the bottom of the page.
+ 1. Copy and paste the release notes for the release into the body of
+ the release.
+ 1. Near the bottom of the page there is a box with the message
+ “Add binaries by dropping them here or selecting them”. Use
+ that to upload the artifacts you previously downloaded, including
+ the `shasums` file.
+
+ * podman-remote-release-darwin.zip
+ * podman-remote-release-windows.zip
+ * podman-remote-static.tar.gz
+ * podman-vX.Y.Z.msi
+ * shasums
+ 1. Save the release.
diff --git a/changelog.txt b/changelog.txt
index 326f52718..324826288 100644
--- a/changelog.txt
+++ b/changelog.txt
@@ -1,3 +1,585 @@
+- Changelog for v3.1.0-rc1 (2021-03-08)
+ * Compat API: Avoid trying to create volumes if they already exist
+ * Bump github.com/onsi/gomega from 1.10.5 to 1.11.0
+ * Allow users to generate a kubernetes yaml off non running containers
+ * Bump github.com/onsi/ginkgo from 1.15.0 to 1.15.1
+ * turn hidden --trace into a NOP
+ * pkg/terminal: use c/storage/pkg/homedir
+ * build-arg
+ * Handle podman build --dns-search
+ * podman build --build-arg should fall back to environment
+ * Add support for podman build --ignorefile
+ * replace local mount consts with libpod/define
+ * separate file with mount consts in libpod/define
+ * Correct compat images/{name}/push response
+ * [NO TESTS NEEDED] Bump pre-commit-hooks version
+ * [ci skip] Bad formatting fix in build documentation
+ * Bump github.com/containernetworking/plugins to v0.9.1
+ * podman-remote stop -time 0 does not work
+ * Do not return from c.stop() before re-locking
+ * Fix for podman network rm (-f) workflow
+ * Bump github.com/containers/buildah from 1.19.6 to 1.19.7
+ * Add tests for selinux kvm/init labels
+ * Respect NanoCpus in Compat Create
+ * podman cp: support copying on tmpfs mounts
+ * image removal: ignore unknown-layer errors
+ * Fix cni teardown errors
+ * Use version package to track all versions
+ * Check for supportsKVM based on basename of the runtime
+ * Compat API: create volume source dirs on the host
+ * Makefile: add install.docker-docs-nobuild for packaging use
+ * Add /sys/fs/cgroup as readonly path in docs
+ * Add network summary to compat ps
+ * Fix possible panic with podman build --iidfile
+ * Add version field to secret compat list/inspect api
+ * Tidy duplicate log tests
+ * Fix support for podman build --timestamp
+ * Rewrite Rename backend in a more atomic fashion
+ * Use functions and defines from checkpointctl
+ * Move checkpoint/restore code to pkg/checkpoint/crutils
+ * Vendor in checkpointctl
+ * Support label type dict on compat build
+ * Makefile: install systemd services conditionally
+ * podman-system-service.1.md: fix timeout example
+ * swagger: update the libpodPutArchive operation verb
+ * Makefile: split install.docker-docs from install.docker
+ * Bump RootlessKit v0.14.0-beta.0
+ * Compat api containers/json Ports field is null
+ * Bump github.com/cri-o/ocicni to latest master
+ * Refactor python tests to run against python3.9
+ * APIv2 tests: make more maintainable
+ * [CI:DOCS] Improve release process docs
+ * podman rmi: handle corrupted storage better
+ * Enable cgroupsv2 rw mount via security-opt unmask
+ * podman-image-sign.1.md: typo fix
+ * compat api network ls accept both format options
+ * Enable no_hosts from containers.conf
+ * Correct compat images/create?fromImage response
+ * Fix parsing of Tmpfs field in compat create
+ * prune remotecommand dependency
+ * system test image: build it multiarch
+ * Updated based on Jhonce comments
+ * updated common to 0.35.0
+ * Refactored file
+ * swagger: removes the schema type for PodSpecGenerator $ref
+ * podman-system-connection.1.md: fix copy/paste error
+ * Add dns search domains from cni response to resolv.conf
+ * Network connect error if net mode is not bridge
+ * Sort CapDrop in inspect to guarantee order
+ * podman upgrade tests
+ * test: ignore named hierarchies for cgroups=split
+ * container removal: handle already removed containers
+ * Bump github.com/rootless-containers/rootlesskit from 0.13.1 to 0.13.2
+ * Bump k8s.io/apimachinery from 0.20.3 to 0.20.4
+ * Add U volume flag to chown source volumes
+ * Replace Labels and Options nulls with {} in NetworkResource
+ * Cirrus: Temp. disable prior-fedora (F32) testing
+ * podman cp: test /dev/stdin correctly
+ * podman cp: treat /dev/stdout correctly
+ * cgroup: change cgroup deletion logic on v1
+ * Fix podman network IDs handling
+ * pr-should-include-tests: recognized "renamed" tests
+ * --no-header flag implementation for generate systemd
+ * [NO TESTS NEEDED] Make binding util internal
+ * Two variations of --new flag added to e2e
+ * swagger: add missing schema properties
+ * bump go module to v3
+ * Fix 'storage.options' indent
+ * Bump github.com/sirupsen/logrus from 1.7.1 to 1.8.0
+ * Bump github.com/containers/buildah from 1.19.4 to 1.19.6
+ * Turn on journald and k8s file logging tests
+ * Allow podman play kube to read yaml file from stdin
+ * Log working dir when chdir fails
+ * Fix segfault in run with memory-swap
+ * leak fix in rootless_linux.c fcn can_use_shortcut
+ * Fix journald logs with more than 1 container
+ * Fix journald logs --follow
+ * Fix journald logs --since
+ * fix journald logs --tail 0
+ * [CI:DOCS]basic networking guide
+ * cp: treat "." and "/." correctly
+ * [CI:DOCS] [NO TESTS NEEDED] Update swagger doc for libpod container wait
+ * Bump k8s.io/apimachinery from 0.20.2 to 0.20.3
+ * Don't switch on a single case
+ * Quote URL
+ * bindings: support simple types that implement fmt.Stringer interface
+ * API: fix libpod's container wait endpoint condition conversion
+ * Change source path resolution for volume copy-up
+ * podman ps --format '{{ .Size }}' requires --size option
+ * infra: downgrade warning to debug
+ * Ignore entrypoint=[\"\"]
+ * Bump github.com/sirupsen/logrus from 1.7.0 to 1.7.1
+ * Add missing early returns in compat API
+ * Do not reset storage when running inside of a container
+ * podman kill should report rawInput not container id
+ * Fix an issue where copyup could fail with ENOENT
+ * do not set empty $HOME
+ * images/create: always pull image
+ * Fix panic in pod creation
+ * Bump github.com/rootless-containers/rootlesskit from 0.13.0 to 0.13.1
+ * podman build: pass runtime to buildah
+ * correct startup error message
+ * Add missing params for podman-remote build
+ * Fix typo podman run doc in flag -pid=mode "efault"
+ * When stopping a container, print rawInput
+ * fix create container: handle empty host port
+ * Don't chown workdir if it already exists
+ * Fix broken podman generate systemd --new with pods
+ * fix dns resolution on ubuntu
+ * e2e: fix network alias test
+ * fix failing image e2e test
+ * Update troubleshooting.md
+ * [NO TESTS NEEDED] Refactor generated code
+ * Fix superfluous response.WriteHeader call in WaitContainerLibpod()
+ * change ps Created to unix
+ * Enable more golangci-lint linters
+ * make layer-tree lookup errors non-fatal
+ * Enable whitespace linter
+ * Enable golint linter
+ * Enable stylecheck linter
+ * Update Master to reflect the 3.0 release
+ * utils: takes the longest path on cgroup v1
+ * container ps json format miscue
+ * Bump github.com/spf13/cobra from 1.1.2 to 1.1.3
+ * utils: create parent cgroups
+ * utils: ignore unified on cgroupv1 if not present
+ * utils: skip empty lines
+ * Correct compat network prune response
+ * Display correct value for unlimited ulimit
+ * apiv2: handle docker-java clients pulling
+ * Rewrite copy-up to use buildah Copier
+ * bump to v3.1.0-dev
+ * [NO TESTS NEEDED] Update linter
+ * Bump github.com/spf13/cobra from 1.1.1 to 1.1.2
+ * Add shell completion tests for secrets
+ * Docker APIv2 push sends digest in response body
+ * Fix compat networks endpoint for a empty result
+ * hardening flags for fedora rpmbuilds
+ * [CI:DOCS]First pass at release process
+ * Restart service when CONTAINERS_CONF changes
+ * Support annotations from containers.conf
+ * vendor github.com/containers/image v5.10.2
+ * APIv2 tests: lots of cleanup
+ * Fix Docker APIv2 push endpoint
+ * generate kube: support --privileged
+ * Bump github.com/containers/ocicrypt from 1.0.3 to 1.1.0
+ * Implement Secrets
+ * Bump containers/buildah to v1.19.4
+ * Allow path completion for podman create/run --rootfs
+ * Cirrus: Send cirrus-cron report e-mail to list.
+ * make `podman rmi` more robust
+ * Implement missing arguments for podman build
+ * vendor latest containers/common
+ * add network prune
+ * fix logic when not creating a workdir
+ * Bump remote API version to 3.0.0
+ * play kube selinux test case
+ * Fix podman network disconnect wrong NetworkStatus number
+ * Fix per review request
+ * generate kube: handle entrypoint
+ * play kube selinux test case
+ * Increase timeouts in some tests
+ * Add test for Docker APIv2 wait
+ * Implement Docker wait conditions
+ * Improve ContainerEngine.ContainerWait()
+ * Improve container libpod.Wait*() functions
+ * Cirrus: Collect ginkgo node logs artifacts
+ * Bump github.com/containers/storage from 1.24.5 to 1.25.0
+ * Bump github.com/containernetworking/cni from 0.8.0 to 0.8.1
+ * bindings: attach: warn correct error
+ * Fix invalid wait condition on kill
+ * Makefile: make bin/* real targets!
+ * typo
+ * Bump github.com/onsi/gomega from 1.10.4 to 1.10.5
+ * Update nix pin with `make nixpkgs`
+ * System test for #9096 (truncated stdout)
+ * play kube selinux label test case
+ * Gating tests: diff test: workaround for RHEL8 failure
+ * [NO TESTS NEEDED] style: indendation
+ * [NO TESTS NEEDED] fixup: remove debug code
+ * Report StatusConflict on Pod opt partial failures
+ * Honor network options for macvlan networks
+ * Make slirp MTU configurable (network_cmd_options)
+ * [NO TESTS NEEDED] Generated files
+ * [NO TESTS NEEDED] Improve generator
+ * play kube selinux label issue
+ * Makefile: refactor ginkgo * ginkgo-remote
+ * Allow pods to use --net=none
+ * Bump github.com/onsi/ginkgo from 1.14.2 to 1.15.0
+ * Update release notes for v3.0.0
+ * New 'make completions' target
+ * add macvlan as a supported network driver
+ * Fix podman generate systemd --new special char handling
+ * Bump github.com/rootless-containers/rootlesskit from 0.12.0 to 0.13.0
+ * Endpoint that lists containers does not return correct Status value
+ * Fix --network parsing for podman pod create
+ * list volumes before pruning
+ * Docker ignores mount flags that begin with constency
+ * podman generate kube ignores --network=host
+ * Switch podman stop/kill/wait handlers to use abi
+ * [CI:DOCS]build instructions for macOS
+ * Vendor in containers/buildah v1.19.3
+ * Honor custom DNS in play|generate kube
+ * Podman-remote push can support --format
+ * Bump github.com/containers/image/v5 from 5.10.0 to 5.10.1
+ * Cirrus: Build static podman-remote
+ * podman build --pull: refine help message and docs
+ * Revert "podman build --pull: use correct policy"
+ * Bump github.com/containers/image/v5 from 5.9.0 to 5.10.0
+ * Cleanup bindings for image pull
+ * Don't fail if one of the cgroups is not setup
+ * Add support for rootless network-aliases
+ * Allow static ip and mac with rootless cni network
+ * podman build --pull: use correct policy
+ * Cirrus: Fix running Validate task on branches
+ * Fix static build cache by using cachix
+ * Switch podman image push handlers to use abi
+ * e2e tests: synchronize test results
+ * podman-remote ps --external --pod --sort do not work.
+ * Fix podman history --no-trunc for the CREATED BY field
+ * remote exec: write conmon error on hijacked connection
+ * Fix #9100 Change console mode message to debug
+ * Add default net info in container inspect
+ * Ensure the Volumes field in Compat Create is honored
+ * [CI:DOCS]update state of restful service
+ * workdir presence checks
+ * libpod: add (*Container).ResolvePath()
+ * Fixup search
+ * Pass DefaultMountsFile to podman build
+ * Ensure shutdown handler access is syncronized
+ * System tests: cover gaps from the last month
+ * Fix --arch and --os flags to work correctly
+ * Bump github.com/google/uuid from 1.1.5 to 1.2.0
+ * Fix typo
+ * disable dnsname when --internal
+ * swagger.go: Fix compilation error
+ * Fix fish completion issue if the command is prefixed with a space
+ * Bump golang.org/x/crypto
+ * networking: lookup child IP in networks
+ * Small API test improvement for compatibility search endpoint
+ * podman manifest exists
+ * Accept and ignore 'null' as value for X-Registry-Auth
+ * Turn on some remote test
+ * Add a notice to remove pod before starting service
+ * libpod: move slirp magic IPs to consts
+ * rootlessport: set source IP to slirp4netns device
+ * vendor: update rootlesskit to v0.12.0
+ * api: fix import image swagger definition
+ * podman volume exists
+ * Cirrus: Upload swagger YAML in every context
+ * [CI:DOCS] Cirrus: Skip smoke task on branch-push
+ * Move the cni lock file into the cni config dir
+ * Use random network names in the e2e tests
+ * [CI:DOCS] Update project name in Code of Conduct
+ * Set log driver for compatability containers
+ * Make generate systemd --new robust against double curly braces
+ * Fix man page for fuse-overlayfs config in rootless mode
+ * Cirrus: add bindings checks
+ * Fix handling of container remove
+ * make bindings generation explicit
+ * make bindings generation more robuts
+ * Revert "ginkgo: install on demand via `go get -u`"
+ * [CI:DOCS] fix go-md2man HTMLSpan warnings
+ * CI: smoke test: insist on adding tests on PRs
+ * podman network exists
+ * ginkgo: install on demand via `go get -u`
+ * runner.sh : deal with bash 'set -e'
+ * Add binding options for container|pod exists
+ * [CI:DOCS]Do not run compose tests with CI:DOCS
+ * simplify bindings generation
+ * make: generate bindings: use vendor
+ * hack/install_golangci.sh: smarter install
+ * golangci-lint: install to ./bin
+ * Create release notes for V3.0.0
+ * Rename AutocompletePortCommand func
+ * Allow podman push to push manifest lists
+ * [CI:DOCS]Add README.md for golang bindings
+ * Turn on podman pod stats test for rootless cgroup v2
+ * Fix missing podman-container-rename man page link
+ * Container rename bindings
+ * Bump to containers/buildah 1.9.2
+ * Bump github.com/google/uuid from 1.1.4 to 1.1.5
+ * specgen: improve heuristic for /sys bind mount
+ * Initial implementation of renaming containers
+ * Add tests for volume plugins
+ * Initial implementation of volume plugins
+ * [CI:DOCS] Add hook-script example to get_ci_vm.sh
+ * Makefile: add target to generate bindings
+ * container stop: release lock before calling the runtime
+ * Bump github.com/cri-o/ocicni to latest master
+ * Cirrus: Upd. ext. service check host list
+ * Bump k8s.io/apimachinery from 0.20.1 to 0.20.2
+ * Bump github.com/stretchr/testify from 1.6.1 to 1.7.0
+ * Cirrus: Utilize $GOPATH cache for alt_build task
+ * Add more information and examples on podman and pipes
+ * Vendor in common 0.33.1
+ * CI: fix broken diagnostic message for -dev check
+ * test: use stringid.GenerateNonCryptoID() in more tests
+ * network: disallow CNI networks with user namespaces
+ * Reduce general binding binary size
+ * play kube: set entrypoint when interpreting Command
+ * Fxes /etc/hosts duplicated every time after container restarted in a pod
+ * Add 'MemUsageBytes' format option
+ * Remove the ability to use [name:tag] in podman load command
+ * More /var/run -> /run
+ * More /var/run -> /run
+ * Exorcise Driver code from libpod/define
+ * Fix problems reported by staticcheck
+ * Expose security attribute errors with their own messages
+ * oci: use /proc/self/fd/FD to open unix socket
+ * Use HTTPProxy settings from containers.conf
+ * Cirrus: Add cross-compile test for alternative arches
+ * image list: ignore bare manifest list
+ * Ensure that `podman play kube` actually reports errors
+ * Bump github.com/containers/storage from 1.24.4 to 1.24.5
+ * oci: keep LC_ env variables to conmon
+ * Better test and idomatic code.
+ * add pre checkpoint
+ * podman build --force-rm defaults to true in code
+ * Adding json formatting to `--list-tags` option in `podman search` command.
+ * Use abi PodPs implementation for libpod/pods/json endpoint
+ * Add Networks format placeholder to podman ps and pod ps
+ * Add network filter for podman ps and pod ps
+ * Improve error message when the the podman service is not enabled
+ * Restore compatible API for prune endpoints
+ * Cirrus: Skip most tests on tag-push
+ * Add mips architecture to the cross build target
+ * Fix build for mips architecture follow-up
+ * Handle podman exec capabilities correctly
+ * Containers should not get inheritable caps by default
+ * Make podman generate systemd --new flag parsing more robust
+ * Switch references of /var/run -> /run
+ * rootless: automatically split userns ranges
+ * rootless: add function to retrieve uid mappings
+ * rootless: add function to retrieve gid mappings
+ * test: Add checkpoint/restore with volumes
+ * Include named volumes in container migration
+ * Use Options as CRImportCheckpoint() argument
+ * Use Options as exportCheckpoint() argument
+ * Fix podman logs read partial log lines
+ * Revert e6fbc15f26b2a609936dfc11732037c70ee14cba
+ * Cirrus: Update Fedora & Ubuntu images
+ * Ensure that user-specified HOSTNAME is honored
+ * generate systemd: do not set `KillMode`
+ * Bump github.com/google/uuid from 1.1.3 to 1.1.4
+ * vendor containers/psgo@v1.5.2
+ * Add default sysctls for pod infra containers
+ * Ensure we do not edit container config in Exec
+ * close journald when reading
+ * libpod API: pull: fix channel race
+ * Allow image errors to bubble up from lower level functions.
+ * test: fix variable name
+ * systemd: make rundir always accessible
+ * podman-remote fix sending tar content
+ * fix: disable seccomp by default when privileged.
+ * Compat api containers/json add support for filters
+ * Bump github.com/google/uuid from 1.1.2 to 1.1.3
+ * Expose Height/Width fields to decoder
+ * Rework pruning to report reclaimed space
+ * Add support for Gentoo file to package query
+ * The slirp4netns sandbox requires pivot_root
+ * Update nix pin with `make nixpkgs`
+ * readme: Remove broken link
+ * Fix e2e test for `podman build --logfile`
+ * test: fix variables name
+ * exec: honor --privileged
+ * libpod: change function to accept ExecOptions
+ * Consolidate filter logic to pkg subdirectory
+ * sort api endpoints in documentation
+ * libpod: handle single user mapped as root
+ * Refactor kube.ToSpecGen parameters to struct
+ * re-open container log files
+ * Set NetNS mode instead of value
+ * add --cidfile to container kill
+ * Document uid/gidmap are based on subuid/gid mapping
+ * Bump github.com/containers/storage from 1.24.3 to 1.24.4
+ * Fix podman build --logfile
+ * Fix missing options in volumes display while setting uid and gid
+ * Spelling
+ * play kube: fix args/command handling
+ * Pass down EnableKeyring from containers.conf to conmon
+ * Prefer read/write images over read/only images
+ * add pod filter for ps
+ * Add Security information to podman info
+ * Add volume filters to system prune
+ * podman v3 container bindings
+ * Fix build for mips architecture
+ * Bump k8s.io/apimachinery from 0.20.0 to 0.20.1
+ * Update nix pin with `make nixpkgs`
+ * Document location of backend events file
+ * Fix support for rpmbuild < 4.12.0.
+ * system tests: set PODMAN_TIMEOUT to 120
+ * remote copy
+ * Bump github.com/containers/common from 0.31.0 to 0.31.1
+ * podman v3 pod bindings
+ * [CI:DOCS] Bump version on readme
+ * misc bindings to podman v3
+ * Docker compat API - /images/search returns wrong structure (#7857)
+ * Close the stdin/tty when using podman as a restAPI.
+ * Add support for pacman package version query
+ * Don't accidently remove XDG_RUNTIME_DIR when reseting storage
+ * Always add the default gateway to the cni config file
+ * System tests: better diagnostics in completion test
+ * Bump github.com/opencontainers/selinux from 1.7.0 to 1.8.0
+ * podman.service should be an exec service not a notify service
+ * Fix: unpause not supported for CGv1 rootless
+ * Disable incompatible rootless + CGroupsV1 tests
+ * Disable rootless pod stats tests w/ CgroupV1
+ * Disable CGv1 pod stats on net=host post
+ * Disable pod stats tests in containerized Fedora w/ CGroupsV1
+ * Disable blkio.weight test on Ubuntu
+ * Cirrus: Add support for Ubuntu 20.x
+ * Add LogSize to container inspect
+ * Podman image bindings for 3.0
+ * contrib: drop mirror.chpc.utah.edu:443
+ * libpod, conmon: change log level for rootless
+ * Clean up temporary file.
+ * Allow users to specify TMPDIR in containers.conf
+ * system tests: the catch-up game
+ * RHEL gating tests: more journald exceptions
+ * Add volume prune --filter support
+ * shell completion for the network flag
+ * podman events allow future time for --until
+ * Sign multi-arch images
+ * add compose test descriptions
+ * test-compose: rewrite to new subdir form
+ * add compose regression to ci
+ * WIP: test docker-compose
+ * podman: drop checking valid rootless UID
+ * Cleanup CNI Networks on reboot
+ * Fix some network compat api problems
+ * Fix Wrong image tag is used when creating a container from an image with multiple tags
+ * Handle --rm when starting a container
+ * Refine public key usage when remote
+ * podman logs honor stderr correctly
+ * Bindings refactor
+ * Ignore containers.conf sysctls when sharing namespaces
+ * Fix panic in libpod images exists endpoint
+ * Bump github.com/containernetworking/plugins from 0.8.7 to 0.9.0
+ * Add --filter to podman system prune
+ * Fix storage.conf to define driver in the VM
+ * Bump github.com/containers/storage from 1.24.1 to 1.24.3
+ * Properly handle --cap-add all when running with a --user flag
+ * security: honor systempaths=unconfined for ro paths
+ * Add system test for shell completion
+ * Bump github.com/onsi/gomega from 1.10.3 to 1.10.4
+ * Honor the --layers flag
+ * pkg/copy: introduce a Copier
+ * Repeat system pruning until there is nothing removed
+ * Bump k8s.io/apimachinery from 0.19.4 to 0.20.0
+ * Bump github.com/opencontainers/selinux from 1.6.0 to 1.7.0
+ * auto updates: document systemd unit and timer
+ * archive: move stat-header handling into copy package
+ * Fix spelling mistakes
+ * pkg/copy: add parsing API
+ * make podman play use ENVs from image
+ * Correct port range logic for port generation
+ * Make `podman stats` slirp check more robust
+ * Add systempaths=unconfined option
+ * Bump github.com/containers/image/v5 from 5.8.1 to 5.9.0
+ * Restore json format for fields as well as whole structs
+ * Do not pull if image domain is localhost
+ * pass full NetworkMode to ParseNetworkNamespace
+ * Fix network ls --filter invalid value flake
+ * Implement pod-network-reload
+ * generate kube on multiple containers
+ * Change name of imageVolumes in container config JSON
+ * Do not error on installing duplicate shutdown handler
+ * image sign using per user registries.d
+ * container cgroup path
+ * add comment to #8558 regression test
+ * Docker compat API - containers create ignores the name
+ * Add APIv2 test for containers-prune
+ * container create: do not clear image name
+ * Add saschagrunert and zhangguanzhang to OWNERS
+ * Bump github.com/containers/common from 0.30.0 to 0.31.0
+ * update website link for install instructions
+ * Jira RUN-1106 System handlers updates
+ * enable short-name aliasing
+ * Jira RUN-1106 Volumes handlers updates
+ * Jira RUN-1106 Network handlers updates
+ * Do not mount sysfs as rootless in more cases
+ * Add ability to set system wide options for slirp4netns
+ * Vendor in containers/common v0.30.0
+ * Clarify uid range requirements
+ * Close image rawSource when each loop ends
+ * Use PasswordCallback instead of Password for ssh
+ * More docker compat API fixes
+ * rewrite podman-cp
+ * e2e: bump pull timeout to 240 seconds
+ * add @Luap99 to OWNERS file
+ * Support Unix timestamps for `podman logs --since`
+ * Fix some nit
+ * Jira RUN-1106 Image handlers updates
+ * Jira RUN-1106 Container handlers updates
+ * Add containerenv information to /run/.containerenv
+ * Correct which network commands can be run as rootless
+ * Drop default log-level from error to warn
+ * podman, exec: move conmon to the correct cgroup
+ * Support --network=default as if it was private
+ * Change bindings to stop two API calls for ping
+ * hack/podman-socat captures the API stream
+ * BATS: add new load test
+ * Add mask and unmask option to --security-opt
+ * Use Libpod tmpdir for pause path
+ * Fix `podman images...` missing headers in table templates
+ * add commas between mount options
+ * Do not pass name argument to Load API
+ * target is not tag
+ * Fix shell completion for ps --filter ancestor
+ * Add support for network ids
+ * Validate that the bridge option is supported
+ * Add integration test for the bridge options
+ * Add podman network create option for bridge vlan
+ * Add podman network create option for bridge mtu
+ * Do not use "true" after "syslog" in exit commands
+ * Fix typo in tests
+ * Fix potential race condition in testing
+ * compat create should use bindings
+ * Add API for communicating with Docker volume plugins
+ * BATS: add ping test
+ * Document volume mounts of source directories do NOT get created
+ * Revert the custom cobra vendor
+ * Bump version in README to v2.2.0
+ * network connect disconnect on non-running containers
+ * Bump master to v3.0.0-dev
+ * Update release notes for v2.2.0
+ * Fix extra quotation mark in manpages.
+ * Fix option names --subuidname and --subgidname
+ * Do not ignore infra command from config files
+ * Revert "Allow multiple --network flags for podman run/create"
+ * Add APIv2 tests for kube generate
+ * Document docker transport is the only supported remote transport
+ * podman network label support
+ * runtime: set XDG_* env variables if missing
+ * Add support for persistent volume claims in kube files
+ * Prepare support in kube play for other volume types than hostPath
+ * Remove varlink support from Podman
+ * Fix problems with network remove
+ * Switch from pkg/secrets to pkg/subscriptions
+ * Do not validate the volume source path in specgen
+ * Add support for --platform
+ * REST API v2 - ping - fix typo in header
+ * REST API v2 - ping - remove newline from response to improve Docker compatibility
+ * squash
+ * Not use local image create/add manifest
+ * [CI:DOCS] fix misleading save/load usage
+ * [tutorials:mac-win-client] Fix command ensuring sshd is enabled
+ * Fix custom mac address with a custom cni network
+ * Bump to v2.2.0-dev
+ * Handle ps container created field as a time.Time
+ * test resource cleanup
+ * more tests
+ * not forcing unmount
+ * few more tests
+ * add test
+ * add comment
+ * fix: unmount container without force
+ * style: wsl
+ * fix lint
+ * Implement containers/{id or name}/archive
+ * Ensure that --net=host/pod/container/none warn with -p
+
- Changelog for HEAD (2020-11-24):
* Set PATH env in systemd timer.
* Docker compat API fixes
diff --git a/cmd/podman/common/create_opts.go b/cmd/podman/common/create_opts.go
index 78611371d..a296ef4f1 100644
--- a/cmd/podman/common/create_opts.go
+++ b/cmd/podman/common/create_opts.go
@@ -3,6 +3,7 @@ package common
import (
"fmt"
"net"
+ "os"
"path/filepath"
"strconv"
"strings"
@@ -13,6 +14,7 @@ import (
"github.com/containers/podman/v3/pkg/domain/entities"
"github.com/containers/podman/v3/pkg/rootless"
"github.com/containers/podman/v3/pkg/specgen"
+ "github.com/pkg/errors"
)
type ContainerCLIOpts struct {
@@ -311,6 +313,15 @@ func ContainerCreateToContainerCLIOpts(cc handlers.CreateContainerConfig, cgroup
netInfo.CNINetworks = []string{string(cc.HostConfig.NetworkMode)}
}
+ parsedTmp := make([]string, 0, len(cc.HostConfig.Tmpfs))
+ for path, options := range cc.HostConfig.Tmpfs {
+ finalString := path
+ if options != "" {
+ finalString += ":" + options
+ }
+ parsedTmp = append(parsedTmp, finalString)
+ }
+
// Note: several options here are marked as "don't need". this is based
// on speculation by Matt and I. We think that these come into play later
// like with start. We believe this is just a difference in podman/compat
@@ -367,7 +378,7 @@ func ContainerCreateToContainerCLIOpts(cc handlers.CreateContainerConfig, cgroup
StorageOpt: stringMaptoArray(cc.HostConfig.StorageOpt),
Sysctl: stringMaptoArray(cc.HostConfig.Sysctls),
Systemd: "true", // podman default
- TmpFS: stringMaptoArray(cc.HostConfig.Tmpfs),
+ TmpFS: parsedTmp,
TTY: cc.Config.Tty,
User: cc.Config.User,
UserNS: string(cc.HostConfig.UsernsMode),
@@ -386,8 +397,16 @@ func ContainerCreateToContainerCLIOpts(cc handlers.CreateContainerConfig, cgroup
cliOpts.Ulimit = ulimits
}
}
+ if cc.HostConfig.Resources.NanoCPUs > 0 {
+ if cliOpts.CPUPeriod != 0 || cliOpts.CPUQuota != 0 {
+ return nil, nil, errors.Errorf("NanoCpus conflicts with CpuPeriod and CpuQuota")
+ }
+ cliOpts.CPUPeriod = 100000
+ cliOpts.CPUQuota = cc.HostConfig.Resources.NanoCPUs / 10000
+ }
// volumes
+ volSources := make(map[string]bool)
volDestinations := make(map[string]bool)
for _, vol := range cc.HostConfig.Binds {
cliOpts.Volume = append(cliOpts.Volume, vol)
@@ -398,6 +417,7 @@ func ContainerCreateToContainerCLIOpts(cc handlers.CreateContainerConfig, cgroup
case 1:
volDestinations[vol] = true
default:
+ volSources[splitVol[0]] = true
volDestinations[splitVol[1]] = true
}
}
@@ -412,6 +432,23 @@ func ContainerCreateToContainerCLIOpts(cc handlers.CreateContainerConfig, cgroup
}
cliOpts.Volume = append(cliOpts.Volume, vol)
}
+ // Make mount points for compat volumes
+ for vol := range volSources {
+ // This might be a named volume.
+ // Assume it is if it's not an absolute path.
+ if !filepath.IsAbs(vol) {
+ continue
+ }
+ // If volume already exists, there is nothing to do
+ if _, err := os.Stat(vol); err == nil {
+ continue
+ }
+ if err := os.MkdirAll(vol, 0755); err != nil {
+ if !os.IsExist(err) {
+ return nil, nil, errors.Wrapf(err, "error making volume mountpoint for volume %s", vol)
+ }
+ }
+ }
if len(cc.HostConfig.BlkioWeightDevice) > 0 {
devices := make([]string, 0, len(cc.HostConfig.BlkioWeightDevice))
for _, d := range cc.HostConfig.BlkioWeightDevice {
diff --git a/cmd/podman/common/netflags.go b/cmd/podman/common/netflags.go
index bc4d54de0..4d0a554a6 100644
--- a/cmd/podman/common/netflags.go
+++ b/cmd/podman/common/netflags.go
@@ -80,7 +80,7 @@ func DefineNetFlags(cmd *cobra.Command) {
_ = cmd.RegisterFlagCompletionFunc(publishFlagName, completion.AutocompleteNone)
netFlags.Bool(
- "no-hosts", false,
+ "no-hosts", containerConfig.Containers.NoHosts,
"Do not create /etc/hosts within the container, instead use the version from the image",
)
}
diff --git a/cmd/podman/common/volumes.go b/cmd/podman/common/volumes.go
index 19a49a6f2..aff323936 100644
--- a/cmd/podman/common/volumes.go
+++ b/cmd/podman/common/volumes.go
@@ -6,23 +6,13 @@ import (
"strings"
"github.com/containers/common/pkg/parse"
+ "github.com/containers/podman/v3/libpod/define"
"github.com/containers/podman/v3/pkg/specgen"
"github.com/containers/podman/v3/pkg/util"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
)
-const (
- // TypeBind is the type for mounting host dir
- TypeBind = "bind"
- // TypeVolume is the type for named volumes
- TypeVolume = "volume"
- // TypeTmpfs is the type for mounting tmpfs
- TypeTmpfs = "tmpfs"
- // TypeDevpts is the type for creating a devpts
- TypeDevpts = "devpts"
-)
-
var (
errDuplicateDest = errors.Errorf("duplicate mount destination")
optionArgError = errors.Errorf("must provide an argument for option")
@@ -90,7 +80,7 @@ func parseVolumes(volumeFlag, mountFlag, tmpfsFlag []string, addReadOnlyTmpfs bo
}
unifiedMounts[dest] = spec.Mount{
Destination: dest,
- Type: TypeTmpfs,
+ Type: define.TypeTmpfs,
Source: "tmpfs",
Options: options,
}
@@ -131,7 +121,7 @@ func parseVolumes(volumeFlag, mountFlag, tmpfsFlag []string, addReadOnlyTmpfs bo
// Final step: maps to arrays
finalMounts := make([]spec.Mount, 0, len(unifiedMounts))
for _, mount := range unifiedMounts {
- if mount.Type == TypeBind {
+ if mount.Type == define.TypeBind {
absSrc, err := filepath.Abs(mount.Source)
if err != nil {
return nil, nil, nil, nil, errors.Wrapf(err, "error getting absolute path of %s", mount.Source)
@@ -194,7 +184,7 @@ func getMounts(mountFlag []string) (map[string]spec.Mount, map[string]*specgen.N
return nil, nil, nil, err
}
switch mountType {
- case TypeBind:
+ case define.TypeBind:
mount, err := getBindMount(tokens)
if err != nil {
return nil, nil, nil, err
@@ -203,7 +193,7 @@ func getMounts(mountFlag []string) (map[string]spec.Mount, map[string]*specgen.N
return nil, nil, nil, errors.Wrapf(errDuplicateDest, mount.Destination)
}
finalMounts[mount.Destination] = mount
- case TypeTmpfs:
+ case define.TypeTmpfs:
mount, err := getTmpfsMount(tokens)
if err != nil {
return nil, nil, nil, err
@@ -212,7 +202,7 @@ func getMounts(mountFlag []string) (map[string]spec.Mount, map[string]*specgen.N
return nil, nil, nil, errors.Wrapf(errDuplicateDest, mount.Destination)
}
finalMounts[mount.Destination] = mount
- case TypeDevpts:
+ case define.TypeDevpts:
mount, err := getDevptsMount(tokens)
if err != nil {
return nil, nil, nil, err
@@ -250,7 +240,7 @@ func getMounts(mountFlag []string) (map[string]spec.Mount, map[string]*specgen.N
// Parse a single bind mount entry from the --mount flag.
func getBindMount(args []string) (spec.Mount, error) {
newMount := spec.Mount{
- Type: TypeBind,
+ Type: define.TypeBind,
}
var setSource, setDest, setRORW, setSuid, setDev, setExec, setRelabel bool
@@ -381,8 +371,8 @@ func getBindMount(args []string) (spec.Mount, error) {
// Parse a single tmpfs mount entry from the --mount flag
func getTmpfsMount(args []string) (spec.Mount, error) {
newMount := spec.Mount{
- Type: TypeTmpfs,
- Source: TypeTmpfs,
+ Type: define.TypeTmpfs,
+ Source: define.TypeTmpfs,
}
var setDest, setRORW, setSuid, setDev, setExec, setTmpcopyup bool
@@ -460,8 +450,8 @@ func getTmpfsMount(args []string) (spec.Mount, error) {
// Parse a single devpts mount entry from the --mount flag
func getDevptsMount(args []string) (spec.Mount, error) {
newMount := spec.Mount{
- Type: TypeDevpts,
- Source: TypeDevpts,
+ Type: define.TypeDevpts,
+ Source: define.TypeDevpts,
}
var setDest bool
@@ -630,9 +620,9 @@ func getTmpfsMounts(tmpfsFlag []string) (map[string]spec.Mount, error) {
mount := spec.Mount{
Destination: filepath.Clean(destPath),
- Type: string(TypeTmpfs),
+ Type: string(define.TypeTmpfs),
Options: options,
- Source: string(TypeTmpfs),
+ Source: string(define.TypeTmpfs),
}
m[destPath] = mount
}
diff --git a/cmd/podman/containers/cp.go b/cmd/podman/containers/cp.go
index 7887e9539..27aacc6e5 100644
--- a/cmd/podman/containers/cp.go
+++ b/cmd/podman/containers/cp.go
@@ -160,6 +160,25 @@ func copyFromContainer(container string, containerPath string, hostPath string)
}
}
+ // If we copy a directory via the "." notation and the host path does
+ // not exist, we need to make sure that the destination on the host
+ // gets created; otherwise the contents of the source directory will be
+ // written to the destination's parent directory.
+ //
+ // While we could cut it short on the host and do create the directory
+ // ourselves, we would run into problems trying to that the other way
+ // around when copying into a container. Instead, to keep both
+ // implementations symmetrical, we need to massage the code a bit to
+ // let Buildah's copier package create the destination.
+ //
+ // Hence, whenever "." is the source and the destination does not exist,
+ // we copy the source's parent and let the copier package create the
+ // destination via the Rename option.
+ containerTarget := containerInfo.LinkTarget
+ if hostInfoErr != nil && containerInfo.IsDir && strings.HasSuffix(containerTarget, ".") {
+ containerTarget = filepath.Dir(containerTarget)
+ }
+
reader, writer := io.Pipe()
hostCopy := func() error {
defer reader.Close()
@@ -189,13 +208,14 @@ func copyFromContainer(container string, containerPath string, hostPath string)
}
putOptions := buildahCopiah.PutOptions{
- ChownDirs: &idPair,
- ChownFiles: &idPair,
+ ChownDirs: &idPair,
+ ChownFiles: &idPair,
+ IgnoreDevices: true,
}
- if !containerInfo.IsDir && (!hostInfo.IsDir || hostInfoErr != nil) {
+ if (!containerInfo.IsDir && !hostInfo.IsDir) || hostInfoErr != nil {
// If we're having a file-to-file copy, make sure to
// rename accordingly.
- putOptions.Rename = map[string]string{filepath.Base(containerInfo.LinkTarget): hostBaseName}
+ putOptions.Rename = map[string]string{filepath.Base(containerTarget): hostBaseName}
}
dir := hostInfo.LinkTarget
if !hostInfo.IsDir {
@@ -209,7 +229,7 @@ func copyFromContainer(container string, containerPath string, hostPath string)
containerCopy := func() error {
defer writer.Close()
- copyFunc, err := registry.ContainerEngine().ContainerCopyToArchive(registry.GetContext(), container, containerInfo.LinkTarget, writer)
+ copyFunc, err := registry.ContainerEngine().ContainerCopyToArchive(registry.GetContext(), container, containerTarget, writer)
if err != nil {
return err
}
@@ -277,6 +297,19 @@ func copyToContainer(container string, containerPath string, hostPath string) er
containerBaseName = filepath.Base(containerInfo.LinkTarget)
}
+ // If we copy a directory via the "." notation and the container path
+ // does not exist, we need to make sure that the destination on the
+ // container gets created; otherwise the contents of the source
+ // directory will be written to the destination's parent directory.
+ //
+ // Hence, whenever "." is the source and the destination does not
+ // exist, we copy the source's parent and let the copier package create
+ // the destination via the Rename option.
+ hostTarget := hostInfo.LinkTarget
+ if containerInfoErr != nil && hostInfo.IsDir && strings.HasSuffix(hostTarget, ".") {
+ hostTarget = filepath.Dir(hostTarget)
+ }
+
var stdinFile string
if isStdin {
if !containerInfo.IsDir {
@@ -317,15 +350,16 @@ func copyToContainer(container string, containerPath string, hostPath string) er
}
getOptions := buildahCopiah.GetOptions{
- // Unless the specified points to ".", we want to copy the base directory.
- KeepDirectoryNames: hostInfo.IsDir && filepath.Base(hostPath) != ".",
+ // Unless the specified path points to ".", we want to
+ // copy the base directory.
+ KeepDirectoryNames: hostInfo.IsDir && filepath.Base(hostTarget) != ".",
}
- if !hostInfo.IsDir && (!containerInfo.IsDir || containerInfoErr != nil) {
+ if (!hostInfo.IsDir && !containerInfo.IsDir) || containerInfoErr != nil {
// If we're having a file-to-file copy, make sure to
// rename accordingly.
- getOptions.Rename = map[string]string{filepath.Base(hostInfo.LinkTarget): containerBaseName}
+ getOptions.Rename = map[string]string{filepath.Base(hostTarget): containerBaseName}
}
- if err := buildahCopiah.Get("/", "", getOptions, []string{hostInfo.LinkTarget}, writer); err != nil {
+ if err := buildahCopiah.Get("/", "", getOptions, []string{hostTarget}, writer); err != nil {
return errors.Wrap(err, "error copying from host")
}
return nil
diff --git a/cmd/podman/containers/create.go b/cmd/podman/containers/create.go
index d7507775f..af9278ce1 100644
--- a/cmd/podman/containers/create.go
+++ b/cmd/podman/containers/create.go
@@ -166,7 +166,11 @@ func createInit(c *cobra.Command) error {
return errors.Errorf("--cpu-quota and --cpus cannot be set together")
}
- if c.Flag("no-hosts").Changed && c.Flag("add-host").Changed {
+ noHosts, err := c.Flags().GetBool("no-hosts")
+ if err != nil {
+ return err
+ }
+ if noHosts && c.Flag("add-host").Changed {
return errors.Errorf("--no-hosts and --add-host cannot be set together")
}
cliVals.UserNS = c.Flag("userns").Value.String()
diff --git a/cmd/podman/images/build.go b/cmd/podman/images/build.go
index d6bf761db..3b34a6bf6 100644
--- a/cmd/podman/images/build.go
+++ b/cmd/podman/images/build.go
@@ -2,6 +2,7 @@ package images
import (
"io"
+ "io/ioutil"
"os"
"path/filepath"
"strings"
@@ -19,7 +20,6 @@ import (
"github.com/containers/podman/v3/cmd/podman/registry"
"github.com/containers/podman/v3/cmd/podman/utils"
"github.com/containers/podman/v3/pkg/domain/entities"
- "github.com/docker/go-units"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
@@ -170,6 +170,7 @@ func buildFlags(cmd *cobra.Command) {
_ = flags.MarkHidden("signature-policy")
_ = flags.MarkHidden("tls-verify")
_ = flags.MarkHidden("compress")
+ _ = flags.MarkHidden("volume")
}
}
@@ -265,6 +266,9 @@ func build(cmd *cobra.Command, args []string) error {
}
report, err := registry.ImageEngine().Build(registry.GetContext(), containerFiles, *apiBuildOpts)
+ if err != nil {
+ return err
+ }
if cmd.Flag("iidfile").Changed {
f, err := os.Create(buildOpts.Iidfile)
@@ -276,7 +280,7 @@ func build(cmd *cobra.Command, args []string) error {
}
}
- return err
+ return nil
}
// buildFlagsWrapperToOptions converts the local build flags to the build options used
@@ -295,6 +299,11 @@ func buildFlagsWrapperToOptions(c *cobra.Command, contextDir string, flags *buil
}
}
+ commonOpts, err := parse.CommonBuildOptions(c)
+ if err != nil {
+ return nil, err
+ }
+
pullPolicy := imagebuildah.PullIfMissing
if c.Flags().Changed("pull") && flags.Pull {
pullPolicy = imagebuildah.PullAlways
@@ -314,7 +323,12 @@ func buildFlagsWrapperToOptions(c *cobra.Command, contextDir string, flags *buil
if len(av) > 1 {
args[av[0]] = av[1]
} else {
- delete(args, av[0])
+ // check if the env is set in the local environment and use that value if it is
+ if val, present := os.LookupEnv(av[0]); present {
+ args[av[0]] = val
+ } else {
+ delete(args, av[0])
+ }
}
}
}
@@ -353,22 +367,6 @@ func buildFlagsWrapperToOptions(c *cobra.Command, contextDir string, flags *buil
reporter = logfile
}
- var memoryLimit, memorySwap int64
- var err error
- if c.Flags().Changed("memory") {
- memoryLimit, err = units.RAMInBytes(flags.Memory)
- if err != nil {
- return nil, err
- }
- }
-
- if c.Flags().Changed("memory-swap") {
- memorySwap, err = units.RAMInBytes(flags.MemorySwap)
- if err != nil {
- return nil, err
- }
- }
-
nsValues, networkPolicy, err := parse.NamespaceOptions(c)
if err != nil {
return nil, err
@@ -446,29 +444,15 @@ func buildFlagsWrapperToOptions(c *cobra.Command, contextDir string, flags *buil
}
opts := imagebuildah.BuildOptions{
- AddCapabilities: flags.CapAdd,
- AdditionalTags: tags,
- Annotations: flags.Annotation,
- Architecture: arch,
- Args: args,
- BlobDirectory: flags.BlobCache,
- CNIConfigDir: flags.CNIConfigDir,
- CNIPluginPath: flags.CNIPlugInPath,
- CommonBuildOpts: &buildah.CommonBuildOptions{
- AddHost: flags.AddHost,
- CPUPeriod: flags.CPUPeriod,
- CPUQuota: flags.CPUQuota,
- CPUSetCPUs: flags.CPUSetCPUs,
- CPUSetMems: flags.CPUSetMems,
- CPUShares: flags.CPUShares,
- CgroupParent: flags.CgroupParent,
- HTTPProxy: flags.HTTPProxy,
- Memory: memoryLimit,
- MemorySwap: memorySwap,
- ShmSize: flags.ShmSize,
- Ulimit: flags.Ulimit,
- Volumes: flags.Volumes,
- },
+ AddCapabilities: flags.CapAdd,
+ AdditionalTags: tags,
+ Annotations: flags.Annotation,
+ Architecture: arch,
+ Args: args,
+ BlobDirectory: flags.BlobCache,
+ CNIConfigDir: flags.CNIConfigDir,
+ CNIPluginPath: flags.CNIPlugInPath,
+ CommonBuildOpts: commonOpts,
Compression: compression,
ConfigureNetwork: networkPolicy,
ContextDirectory: contextDir,
@@ -509,6 +493,19 @@ func buildFlagsWrapperToOptions(c *cobra.Command, contextDir string, flags *buil
TransientMounts: flags.Volumes,
}
+ if flags.IgnoreFile != "" {
+ excludes, err := parseDockerignore(flags.IgnoreFile)
+ if err != nil {
+ return nil, errors.Wrapf(err, "unable to obtain decrypt config")
+ }
+ opts.Excludes = excludes
+ }
+
+ if c.Flag("timestamp").Changed {
+ timestamp := time.Unix(flags.Timestamp, 0).UTC()
+ opts.Timestamp = &timestamp
+ }
+
return &entities.BuildOptions{BuildOptions: opts}, nil
}
@@ -526,3 +523,18 @@ func getDecryptConfig(decryptionKeys []string) (*encconfig.DecryptConfig, error)
return decConfig, nil
}
+
+func parseDockerignore(ignoreFile string) ([]string, error) {
+ excludes := []string{}
+ ignore, err := ioutil.ReadFile(ignoreFile)
+ if err != nil {
+ return excludes, err
+ }
+ for _, e := range strings.Split(string(ignore), "\n") {
+ if len(e) == 0 || e[0] == '#' {
+ continue
+ }
+ excludes = append(excludes, e)
+ }
+ return excludes, nil
+}
diff --git a/cmd/podman/root.go b/cmd/podman/root.go
index 874573bb9..7722e35dd 100644
--- a/cmd/podman/root.go
+++ b/cmd/podman/root.go
@@ -17,9 +17,7 @@ import (
"github.com/containers/podman/v3/pkg/domain/entities"
"github.com/containers/podman/v3/pkg/parallel"
"github.com/containers/podman/v3/pkg/rootless"
- "github.com/containers/podman/v3/pkg/tracing"
"github.com/containers/podman/v3/version"
- "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
@@ -103,7 +101,6 @@ func Execute() {
}
func persistentPreRunE(cmd *cobra.Command, args []string) error {
- // TODO: Remove trace statement in podman V2.1
logrus.Debugf("Called %s.PersistentPreRunE(%s)", cmd.Name(), strings.Join(os.Args, " "))
// Help, completion and commands with subcommands are special cases, no need for more setup
@@ -194,16 +191,6 @@ func persistentPreRunE(cmd *cobra.Command, args []string) error {
}
}
- if cmd.Flag("trace").Changed {
- tracer, closer := tracing.Init("podman")
- opentracing.SetGlobalTracer(tracer)
- cfg.SpanCloser = closer
-
- cfg.Span = tracer.StartSpan("before-context")
- cfg.SpanCtx = opentracing.ContextWithSpan(registry.Context(), cfg.Span)
- opentracing.StartSpanFromContext(cfg.SpanCtx, cmd.Name())
- }
-
if cfg.MaxWorks <= 0 {
return errors.Errorf("maximum workers must be set to a positive number (got %d)", cfg.MaxWorks)
}
@@ -226,22 +213,16 @@ func persistentPreRunE(cmd *cobra.Command, args []string) error {
}
func persistentPostRunE(cmd *cobra.Command, args []string) error {
- // TODO: Remove trace statement in podman V2.1
logrus.Debugf("Called %s.PersistentPostRunE(%s)", cmd.Name(), strings.Join(os.Args, " "))
if !requireCleanup {
return nil
}
- cfg := registry.PodmanConfig()
if !registry.IsRemote() {
if cmd.Flag("cpu-profile").Changed {
pprof.StopCPUProfile()
}
- if cmd.Flag("trace").Changed {
- cfg.Span.Finish()
- cfg.SpanCloser.Close()
- }
}
registry.ImageEngine().Shutdown(registry.Context())
diff --git a/contrib/cirrus/runner.sh b/contrib/cirrus/runner.sh
index ccbdb63b6..fca9aff93 100755
--- a/contrib/cirrus/runner.sh
+++ b/contrib/cirrus/runner.sh
@@ -70,6 +70,10 @@ function _run_sys() {
dotest system
}
+function _run_upgrade_test() {
+ bats test/upgrade |& logformatter
+}
+
function _run_bindings() {
# shellcheck disable=SC2155
export PATH=$PATH:$GOSRC/hack
diff --git a/contrib/cirrus/setup_environment.sh b/contrib/cirrus/setup_environment.sh
index 4c95d0254..64ea3b7b4 100755
--- a/contrib/cirrus/setup_environment.sh
+++ b/contrib/cirrus/setup_environment.sh
@@ -200,6 +200,7 @@ case "$TEST_FLAVOR" in
compose) ;&
int) ;&
sys) ;&
+ upgrade_test) ;&
bindings) ;&
endpoint)
# Use existing host bits when testing is to happen inside a container
diff --git a/contrib/spec/podman.spec.in b/contrib/spec/podman.spec.in
index 662234f71..5e820719f 100644
--- a/contrib/spec/podman.spec.in
+++ b/contrib/spec/podman.spec.in
@@ -42,7 +42,7 @@ Epoch: 99
%else
Epoch: 0
%endif
-Version: 3.0.0
+Version: 3.1.0
Release: #COMMITDATE#.git%{shortcommit0}%{?dist}
Summary: Manage Pods, Containers and Container Images
License: ASL 2.0
diff --git a/docs/source/markdown/podman-attach.1.md b/docs/source/markdown/podman-attach.1.md
index 06d3c205d..c4a5eec50 100644
--- a/docs/source/markdown/podman-attach.1.md
+++ b/docs/source/markdown/podman-attach.1.md
@@ -25,9 +25,7 @@ Specify the key sequence for detaching a container. Format is a single character
#### **--latest**, **-l**
Instead of providing the container name or ID, use the last created container. If you use methods other than Podman
-to run containers such as CRI-O, the last started container could be from either of those methods.
-
-The latest option is not supported on the remote client.
+to run containers such as CRI-O, the last started container could be from either of those methods. (This option is not available with the remote Podman client)
#### **--no-stdin**
diff --git a/docs/source/markdown/podman-build.1.md b/docs/source/markdown/podman-build.1.md
index e05678e2c..24093d414 100644
--- a/docs/source/markdown/podman-build.1.md
+++ b/docs/source/markdown/podman-build.1.md
@@ -86,7 +86,7 @@ resulting image's configuration.
#### **--cache-from**
Images to utilize as potential cache sources. Podman does not currently support
-caching so this is a NOOP.
+caching so this is a NOOP. (This option is not available with the remote Podman client)
#### **--cap-add**=*CAP\_xxx*
@@ -111,8 +111,7 @@ given.
#### **--cert-dir**=*path*
Use certificates at *path* (\*.crt, \*.cert, \*.key) to connect to the registry.
-Default certificates directory is _/etc/containers/certs.d_. (Not available for
-remote commands)
+Default certificates directory is _/etc/containers/certs.d_. (This option is not available with the remote Podman client)
#### **--cgroup-parent**=*path*
@@ -124,7 +123,7 @@ of the init process. Cgroups will be created if they do not already exist.
This option is added to be aligned with other containers CLIs.
Podman doesn't communicate with a daemon or a remote server.
-Thus, compressing the data before sending it is irrelevant to Podman.
+Thus, compressing the data before sending it is irrelevant to Podman. (This option is not available with the remote Podman client)
#### **--cni-config-dir**=*directory*
@@ -255,11 +254,11 @@ specifying **--disable-compression=false**.
This is a Docker specific option to disable image verification to a Docker
registry and is not supported by Podman. This flag is a NOOP and provided
-solely for scripting compatibility.
+solely for scripting compatibility. (This option is not available with the remote Podman client)
#### **--dns**=*dns*
-Set custom DNS servers
+Set custom DNS servers to be used during the build.
This option can be used to override the DNS configuration passed to the
container. Typically this is necessary when the host DNS configuration is
@@ -272,11 +271,11 @@ image will be used without changes.
#### **--dns-option**=*option*
-Set custom DNS options
+Set custom DNS options to be used during the build.
#### **--dns-search**=*domain*
-Set custom DNS search domains
+Set custom DNS search domains to be used during the build.
#### **--file**, **-f**=*Containerfile*
@@ -521,7 +520,7 @@ size entirely, the system uses `64m`.
#### **--sign-by**=*fingerprint*
-Sign the image using a GPG key with the specified FINGERPRINT.
+Sign the image using a GPG key with the specified FINGERPRINT. (This option is not available with the remote Podman client)
#### **--squash**
@@ -566,7 +565,7 @@ timestamp.
#### **--tls-verify**=*true|false*
Require HTTPS and verify certificates when talking to container registries
-(defaults to true).
+(defaults to true). (This option is not available with the remote Podman client)
#### **--ulimit**=*type*=*soft-limit*[:*hard-limit*]
@@ -679,7 +678,9 @@ Set the architecture variant of the image to be pulled.
Create a bind mount. If you specify, ` -v /HOST-DIR:/CONTAINER-DIR`, Podman
bind mounts `/HOST-DIR` in the host to `/CONTAINER-DIR` in the Podman
- container. The `OPTIONS` are a comma delimited list and can be: <sup>[[1]](#Footnote1)</sup>
+ container. (This option is not available with the remote Podman client)
+
+ The `OPTIONS` are a comma delimited list and can be: <sup>[[1]](#Footnote1)</sup>
* [rw|ro]
* [z|Z|O]
@@ -821,9 +822,13 @@ $ podman build --no-cache --rm=false -t imageName .
### Building an multi-architecture image using a --manifest option (Requires emulation software)
-podman build --arch arm --manifest myimage /tmp/mysrc
-podman build --arch amd64 --manifest myimage /tmp/mysrc
-podman build --arch s390x --manifest myimage /tmp/mysrc
+```
+$ podman build --arch arm --manifest myimage /tmp/mysrc
+
+$ podman build --arch amd64 --manifest myimage /tmp/mysrc
+
+$ podman build --arch s390x --manifest myimage /tmp/mysrc
+```
### Building an image using a URL, Git repo, or archive
diff --git a/docs/source/markdown/podman-container-checkpoint.1.md b/docs/source/markdown/podman-container-checkpoint.1.md
index ea05979cd..46b6cb646 100644
--- a/docs/source/markdown/podman-container-checkpoint.1.md
+++ b/docs/source/markdown/podman-container-checkpoint.1.md
@@ -23,9 +23,7 @@ Checkpoint all running containers.
#### **--latest**, **-l**
-Instead of providing the container name or ID, checkpoint the last created container.
-
-The latest option is not supported on the remote client.
+Instead of providing the container name or ID, checkpoint the last created container. (This option is not available with the remote Podman client)
#### **--leave-running**, **-R**
diff --git a/docs/source/markdown/podman-container-cleanup.1.md b/docs/source/markdown/podman-container-cleanup.1.md
index eabb462e8..8fd267b47 100644
--- a/docs/source/markdown/podman-container-cleanup.1.md
+++ b/docs/source/markdown/podman-container-cleanup.1.md
@@ -25,9 +25,7 @@ Conflicts with **--rmi** as the container is not being cleaned up so the image c
#### **--latest**, **-l**
Instead of providing the container name or ID, use the last created container. If you use methods other than Podman
-to run containers such as CRI-O, the last started container could be from either of those methods.
-
-The latest option is not supported on the remote client.
+to run containers such as CRI-O, the last started container could be from either of those methods. (This option is not available with the remote Podman client)
#### **--rm**
diff --git a/docs/source/markdown/podman-container-restore.1.md b/docs/source/markdown/podman-container-restore.1.md
index 192b8765b..ef8722279 100644
--- a/docs/source/markdown/podman-container-restore.1.md
+++ b/docs/source/markdown/podman-container-restore.1.md
@@ -30,9 +30,7 @@ Restore all checkpointed containers.
#### **--latest**, **-l**
-Instead of providing the container name or ID, restore the last created container.
-
-The latest option is not supported on the remote client.
+Instead of providing the container name or ID, restore the last created container. (This option is not available with the remote Podman client)
#### **--tcp-established**
diff --git a/docs/source/markdown/podman-container-runlabel.1.md b/docs/source/markdown/podman-container-runlabel.1.md
index 54d675705..36bcbab3d 100644
--- a/docs/source/markdown/podman-container-runlabel.1.md
+++ b/docs/source/markdown/podman-container-runlabel.1.md
@@ -57,7 +57,7 @@ The runlabel command will not execute if --display is specified.
#### **--cert-dir**=*path*
Use certificates at *path* (\*.crt, \*.cert, \*.key) to connect to the registry.
-Default certificates directory is _/etc/containers/certs.d_. (Not available for remote commands)
+Default certificates directory is _/etc/containers/certs.d_. (This option is not available with the remote Podman client)
#### **--creds**=*[username[:password]]*
diff --git a/docs/source/markdown/podman-cp.1.md b/docs/source/markdown/podman-cp.1.md
index 56511c244..bafbbdf3b 100644
--- a/docs/source/markdown/podman-cp.1.md
+++ b/docs/source/markdown/podman-cp.1.md
@@ -57,6 +57,8 @@ If you use a : in a local machine path, you must be explicit with a relative or
Using `-` as the *src_path* streams the contents of STDIN as a tar archive. The command extracts the content of the tar to the *DEST_PATH* in the container. In this case, *dest_path* must specify a directory. Using `-` as the *dest_path* streams the contents of the resource (can be a directory) as a tar archive to STDOUT.
+Note that `podman cp` ignores permission errors when copying from a running rootless container. The TTY devices inside a rootless container are owned by the host's root user and hence cannot be read inside the container's user namespace.
+
## OPTIONS
## ALTERNATIVES
diff --git a/docs/source/markdown/podman-create.1.md b/docs/source/markdown/podman-create.1.md
index 30cadf703..d6b90e17a 100644
--- a/docs/source/markdown/podman-create.1.md
+++ b/docs/source/markdown/podman-create.1.md
@@ -343,7 +343,7 @@ See [**Environment**](#environment) note below for precedence and examples.
#### **--env-host**=*true|false*
-Use host environment inside of the container. See **Environment** note below for precedence. (Not available for remote commands)
+Use host environment inside of the container. See **Environment** note below for precedence. (This option is not available with the remote Podman client)
#### **--env-file**=*file*
@@ -412,7 +412,7 @@ the container should not use any proxy. Proxy environment variables specified
for the container in any other way will override the values that would have
been passed through from the host. (Other ways to specify the proxy for the
container include passing the values with the `--env` flag, or hard coding the
-proxy environment at container build time.) (Not available for remote commands)
+proxy environment at container build time.) (This option is not available with the remote Podman client)
For example, to disable passing these environment variables from host to
container:
@@ -859,7 +859,7 @@ Security Options
- `unmask=ALL or /path/1:/path/2` : Paths to unmask separated by a colon. If set to **ALL**, it will
unmask all the paths that are masked or made read only by default.
- The default masked paths are **/proc/acpi, /proc/kcore, /proc/keys, /proc/latency_stats, /proc/sched_debug, /proc/scsi, /proc/timer_list, /proc/timer_stats, /sys/firmware, and /sys/fs/selinux.** The default paths that are read only are **/proc/asound, /proc/bus, /proc/fs, /proc/irq, /proc/sys, /proc/sysrq-trigger**.
+ The default masked paths are **/proc/acpi, /proc/kcore, /proc/keys, /proc/latency_stats, /proc/sched_debug, /proc/scsi, /proc/timer_list, /proc/timer_stats, /sys/firmware, and /sys/fs/selinux.** The default paths that are read only are **/proc/asound, /proc/bus, /proc/fs, /proc/irq, /proc/sys, /proc/sysrq-trigger, /sys/fs/cgroup**.
- `proc-opts=OPTIONS` : Comma separated list of options to use for the /proc mount. More details for the
possible mount options are specified at **proc(5)** man page.
diff --git a/docs/source/markdown/podman-diff.1.md b/docs/source/markdown/podman-diff.1.md
index 227da4864..dbab2d4db 100644
--- a/docs/source/markdown/podman-diff.1.md
+++ b/docs/source/markdown/podman-diff.1.md
@@ -20,9 +20,7 @@ Alter the output into a different format. The only valid format for diff is `js
#### **--latest**, **-l**
Instead of providing the container name or ID, use the last created container. If you use methods other than Podman
-to run containers such as CRI-O, the last started container could be from either of those methods.
-
-The latest option is not supported on the remote client.
+to run containers such as CRI-O, the last started container could be from either of those methods. (This option is not available with the remote Podman client)
## EXAMPLE
diff --git a/docs/source/markdown/podman-exec.1.md b/docs/source/markdown/podman-exec.1.md
index 17d620cf8..524ee50f0 100644
--- a/docs/source/markdown/podman-exec.1.md
+++ b/docs/source/markdown/podman-exec.1.md
@@ -37,9 +37,7 @@ When set to true, keep stdin open even if not attached. The default is *false*.
#### **--latest**, **-l**
Instead of providing the container name or ID, use the last created container. If you use methods other than Podman
-to run containers such as CRI-O, the last started container could be from either of those methods.
-
-The latest option is not supported on the remote client.
+to run containers such as CRI-O, the last started container could be from either of those methods. (This option is not available with the remote Podman client)
#### **--preserve-fds**=*N*
diff --git a/docs/source/markdown/podman-generate-systemd.1.md b/docs/source/markdown/podman-generate-systemd.1.md
index f75f77d79..00b13669a 100644
--- a/docs/source/markdown/podman-generate-systemd.1.md
+++ b/docs/source/markdown/podman-generate-systemd.1.md
@@ -1,7 +1,7 @@
% podman-generate-systemd(1)
## NAME
-podman\-generate\-systemd - Generate systemd unit file(s) for a container or pod. Not supported for the remote client
+podman\-generate\-systemd - Generate systemd unit file(s) for a container or pod
## SYNOPSIS
**podman generate systemd** [*options*] *container|pod*
diff --git a/docs/source/markdown/podman-generate.1.md b/docs/source/markdown/podman-generate.1.md
index da5d92ea4..82c67fdb1 100644
--- a/docs/source/markdown/podman-generate.1.md
+++ b/docs/source/markdown/podman-generate.1.md
@@ -14,7 +14,7 @@ The generate command will create structured output (like YAML) based on a contai
| Command | Man Page | Description |
|---------|------------------------------------------------------------|-------------------------------------------------------------------------------------|
| kube | [podman-generate-kube(1)](podman-generate-kube.1.md) | Generate Kubernetes YAML based on a pod or container. |
-| systemd | [podman-generate-systemd(1)](podman-generate-systemd.1.md) | Generate systemd unit file(s) for a container or pod. Not supported for the remote client. |
+| systemd | [podman-generate-systemd(1)](podman-generate-systemd.1.md) | Generate systemd unit file(s) for a container or pod. |
## SEE ALSO
diff --git a/docs/source/markdown/podman-image-sign.1.md b/docs/source/markdown/podman-image-sign.1.md
index 3e52bde30..b9addc062 100644
--- a/docs/source/markdown/podman-image-sign.1.md
+++ b/docs/source/markdown/podman-image-sign.1.md
@@ -26,7 +26,7 @@ Sign all the manifests of the multi-architecture image (default false).
#### **--cert-dir**=*path*
Use certificates at *path* (\*.crt, \*.cert, \*.key) to connect to the registry.
-Default certificates directory is _/etc/containers/certs.d_. (Not available for remote commands)
+Default certificates directory is _/etc/containers/certs.d_. (This option is not available with the remote Podman client)
#### **--directory**, **-d**=*dir*
@@ -37,7 +37,7 @@ Store the signatures in the specified directory. Default: /var/lib/containers/s
Override the default identity of the signature.
## EXAMPLES
-Sign the busybox image with the identify of foo@bar.com with a user's keyring and save the signature in /tmp/signatures/.
+Sign the busybox image with the identity of foo@bar.com with a user's keyring and save the signature in /tmp/signatures/.
sudo podman image sign --sign-by foo@bar.com --directory /tmp/signatures docker://privateregistry.example.com/foobar
diff --git a/docs/source/markdown/podman-image-trust.1.md b/docs/source/markdown/podman-image-trust.1.md
index 002f42bff..6e2c2287a 100644
--- a/docs/source/markdown/podman-image-trust.1.md
+++ b/docs/source/markdown/podman-image-trust.1.md
@@ -8,7 +8,7 @@ podman\-image\-trust - Manage container registry image trust policy
**podman image trust** set|show [*options*] *registry[/repository]*
## DESCRIPTION
-Manages which registries you trust as a source of container images based on its location. (Not available for remote commands)
+Manages which registries you trust as a source of container images based on its location. (This option is not available with the remote Podman client)
The location is determined
by the transport and the registry host of the image. Using this container image `docker://docker.io/library/busybox`
diff --git a/docs/source/markdown/podman-init.1.md b/docs/source/markdown/podman-init.1.md
index e37902e13..5f3636686 100644
--- a/docs/source/markdown/podman-init.1.md
+++ b/docs/source/markdown/podman-init.1.md
@@ -25,9 +25,7 @@ Initialize all containers. Containers that have already initialized (including c
#### **--latest**, **-l**
Instead of providing the container name or ID, use the last created container. If you use methods other than Podman
-to run containers such as CRI-O, the last started container could be from either of those methods.
-
-The latest option is not supported on the remote client.
+to run containers such as CRI-O, the last started container could be from either of those methods. (This option is not available with the remote Podman client)
## EXAMPLE
diff --git a/docs/source/markdown/podman-inspect.1.md b/docs/source/markdown/podman-inspect.1.md
index d0e2fbc99..bfd0cea3d 100644
--- a/docs/source/markdown/podman-inspect.1.md
+++ b/docs/source/markdown/podman-inspect.1.md
@@ -39,9 +39,7 @@ The keys of the returned JSON can be used as the values for the --format flag (s
Instead of providing the container name or ID, use the last created container. If you use methods other than Podman
to run containers such as CRI-O, the last started container could be from either of those methods.
-This option can be used to inspect the latest pod created when used with --type pod
-
-The latest option is not supported on the remote client or when invoked as *podman image inspect*.
+This option can be used to inspect the latest pod created when used with --type pod. (This option is not available with the remote Podman client or when invoked as *podman image inspect*.)
#### **--size**, **-s**
diff --git a/docs/source/markdown/podman-kill.1.md b/docs/source/markdown/podman-kill.1.md
index 96c01ac09..e5f1d4bbe 100644
--- a/docs/source/markdown/podman-kill.1.md
+++ b/docs/source/markdown/podman-kill.1.md
@@ -23,9 +23,7 @@ Read container ID from the specified file and remove the container. Can be spec
#### **--latest**, **-l**
Instead of providing the container name or ID, use the last created container. If you use methods other than Podman
-to run containers such as CRI-O, the last started container could be from either of those methods.
-
-The latest option is not supported on the remote client.
+to run containers such as CRI-O, the last started container could be from either of those methods. (This option is not available with the remote Podman client)
#### **--signal**, **-s**
diff --git a/docs/source/markdown/podman-login.1.md b/docs/source/markdown/podman-login.1.md
index 89ef289e3..274869042 100644
--- a/docs/source/markdown/podman-login.1.md
+++ b/docs/source/markdown/podman-login.1.md
@@ -54,7 +54,7 @@ Return the logged-in user for the registry. Return error if no login is found.
#### **--cert-dir**=*path*
Use certificates at *path* (\*.crt, \*.cert, \*.key) to connect to the registry.
-Default certificates directory is _/etc/containers/certs.d_. (Not available for remote commands)
+Default certificates directory is _/etc/containers/certs.d_. (This option is not available with the remote Podman client)
#### **--tls-verify**=*true|false*
diff --git a/docs/source/markdown/podman-logs.1.md b/docs/source/markdown/podman-logs.1.md
index 772668c8b..8f1301102 100644
--- a/docs/source/markdown/podman-logs.1.md
+++ b/docs/source/markdown/podman-logs.1.md
@@ -26,9 +26,7 @@ file will be removed before `podman logs` reads the final content.
#### **--latest**, **-l**
Instead of providing the container name or ID, use the last created container. If you use methods other than Podman
-to run containers such as CRI-O, the last started container could be from either of those methods.
-
-The latest option is not supported on the remote client.
+to run containers such as CRI-O, the last started container could be from either of those methods. (This option is not available with the remote Podman client)
#### **--names**, **-n**
diff --git a/docs/source/markdown/podman-manifest-add.1.md b/docs/source/markdown/podman-manifest-add.1.md
index 6e6409765..f2d188766 100644
--- a/docs/source/markdown/podman-manifest-add.1.md
+++ b/docs/source/markdown/podman-manifest-add.1.md
@@ -44,7 +44,7 @@ environment variable. `export REGISTRY_AUTH_FILE=path`
#### **--cert-dir**=*path*
Use certificates at *path* (\*.crt, \*.cert, \*.key) to connect to the registry.
-Default certificates directory is _/etc/containers/certs.d_. (Not available for remote commands)
+Default certificates directory is _/etc/containers/certs.d_. (This option is not available with the remote Podman client)
#### **--creds**=*creds*
diff --git a/docs/source/markdown/podman-manifest-push.1.md b/docs/source/markdown/podman-manifest-push.1.md
index 2beb65517..a8169cb56 100644
--- a/docs/source/markdown/podman-manifest-push.1.md
+++ b/docs/source/markdown/podman-manifest-push.1.md
@@ -30,7 +30,7 @@ environment variable. `export REGISTRY_AUTH_FILE=path`
#### **--cert-dir**=*path*
Use certificates at *path* (\*.crt, \*.cert, \*.key) to connect to the registry.
-Default certificates directory is _/etc/containers/certs.d_. (Not available for remote commands)
+Default certificates directory is _/etc/containers/certs.d_. (This option is not available with the remote Podman client)
#### **--creds**=*creds*
diff --git a/docs/source/markdown/podman-mount.1.md b/docs/source/markdown/podman-mount.1.md
index 3e7aeaa32..1b1b09120 100644
--- a/docs/source/markdown/podman-mount.1.md
+++ b/docs/source/markdown/podman-mount.1.md
@@ -38,9 +38,7 @@ Print the mounted containers in specified format (json).
Instead of providing the container name or ID, use the last created container.
If you use methods other than Podman to run containers such as CRI-O, the last
-started container could be from either of those methods.
-
-The latest option is not supported on the remote client.
+started container could be from either of those methods. (This option is not available with the remote Podman client)
#### **--notruncate**
diff --git a/docs/source/markdown/podman-network-reload.1.md b/docs/source/markdown/podman-network-reload.1.md
index dd8047297..011640c86 100644
--- a/docs/source/markdown/podman-network-reload.1.md
+++ b/docs/source/markdown/podman-network-reload.1.md
@@ -23,9 +23,7 @@ Reload network configuration of all containers.
#### **--latest**, **-l**
Instead of providing the container name or ID, use the last created container. If you use methods other than Podman
-to run containers such as CRI-O, the last started container could be from either of those methods.
-
-The latest option is not supported on the remote client.
+to run containers such as CRI-O, the last started container could be from either of those methods. (This option is not available with the remote Podman client)
## EXAMPLE
diff --git a/docs/source/markdown/podman-play-kube.1.md b/docs/source/markdown/podman-play-kube.1.md
index 3795e954c..0a34a622f 100644
--- a/docs/source/markdown/podman-play-kube.1.md
+++ b/docs/source/markdown/podman-play-kube.1.md
@@ -28,7 +28,7 @@ environment variable. `export REGISTRY_AUTH_FILE=path`
#### **--cert-dir**=*path*
Use certificates at *path* (\*.crt, \*.cert, \*.key) to connect to the registry.
-Default certificates directory is _/etc/containers/certs.d_. (Not available for remote commands)
+Default certificates directory is _/etc/containers/certs.d_. (This option is not available with the remote Podman client)
#### **--configmap**=*path*
@@ -56,7 +56,7 @@ Suppress output information when pulling images
#### **--seccomp-profile-root**=*path*
-Directory path for seccomp profiles (default: "/var/lib/kubelet/seccomp"). (Not available for remote commands)
+Directory path for seccomp profiles (default: "/var/lib/kubelet/seccomp"). (This option is not available with the remote Podman client)
#### **--start**=*true|false*
diff --git a/docs/source/markdown/podman-pod-inspect.1.md b/docs/source/markdown/podman-pod-inspect.1.md
index 99cac6f9c..1f4e6cb06 100644
--- a/docs/source/markdown/podman-pod-inspect.1.md
+++ b/docs/source/markdown/podman-pod-inspect.1.md
@@ -14,9 +14,7 @@ that belong to the pod.
#### **--latest**, **-l**
Instead of providing the pod name or ID, use the last created pod. If you use methods other than Podman
-to run pods such as CRI-O, the last started pod could be from either of those methods.
-
-The latest option is not supported on the remote client.
+to run pods such as CRI-O, the last started pod could be from either of those methods. (This option is not available with the remote Podman client)
#### **--format**=*format*, **-f**
diff --git a/docs/source/markdown/podman-pod-kill.1.md b/docs/source/markdown/podman-pod-kill.1.md
index d791b7cbd..d4eef2307 100644
--- a/docs/source/markdown/podman-pod-kill.1.md
+++ b/docs/source/markdown/podman-pod-kill.1.md
@@ -17,9 +17,7 @@ Sends signal to all containers associated with a pod.
#### **--latest**, **-l**
Instead of providing the pod name or ID, use the last created pod. If you use methods other than Podman
-to run pods such as CRI-O, the last started pod could be from either of those methods.
-
-The latest option is not supported on the remote client.
+to run pods such as CRI-O, the last started pod could be from either of those methods. (This option is not available with the remote Podman client)
#### **--signal**, **-s**
diff --git a/docs/source/markdown/podman-pod-pause.1.md b/docs/source/markdown/podman-pod-pause.1.md
index fc1727737..2c849a932 100644
--- a/docs/source/markdown/podman-pod-pause.1.md
+++ b/docs/source/markdown/podman-pod-pause.1.md
@@ -17,9 +17,7 @@ Pause all pods.
#### **--latest**, **-l**
-Instead of providing the pod name or ID, pause the last created pod.
-
-The latest option is not supported on the remote client.
+Instead of providing the pod name or ID, pause the last created pod. (This option is not available with the remote Podman client)
## EXAMPLE
diff --git a/docs/source/markdown/podman-pod-ps.1.md b/docs/source/markdown/podman-pod-ps.1.md
index ab250e1ff..7c1c4a0b4 100644
--- a/docs/source/markdown/podman-pod-ps.1.md
+++ b/docs/source/markdown/podman-pod-ps.1.md
@@ -40,9 +40,7 @@ Includes the container statuses in the container info field
#### **--latest**, **-l**
-Show the latest pod created (all states)
-
-The latest option is not supported on the remote client.
+Show the latest pod created (all states) (This option is not available with the remote Podman client)
#### **--no-trunc**
diff --git a/docs/source/markdown/podman-pod-restart.1.md b/docs/source/markdown/podman-pod-restart.1.md
index 19c46b9de..65d05f977 100644
--- a/docs/source/markdown/podman-pod-restart.1.md
+++ b/docs/source/markdown/podman-pod-restart.1.md
@@ -20,9 +20,7 @@ Restarts all pods
#### **--latest**, **-l**
-Instead of providing the pod name or ID, restart the last created pod.
-
-The latest option is not supported on the remote client.
+Instead of providing the pod name or ID, restart the last created pod. (This option is not available with the remote Podman client)
## EXAMPLE
diff --git a/docs/source/markdown/podman-pod-rm.1.md b/docs/source/markdown/podman-pod-rm.1.md
index 4ca113cf7..d185385b6 100644
--- a/docs/source/markdown/podman-pod-rm.1.md
+++ b/docs/source/markdown/podman-pod-rm.1.md
@@ -23,9 +23,7 @@ ExecStop directive of a systemd service referencing that pod.
#### **--latest**, **-l**
-Instead of providing the pod name or ID, remove the last created pod.
-
-The latest option is not supported on the remote client.
+Instead of providing the pod name or ID, remove the last created pod. (This option is not available with the remote Podman client)
#### **--force**, **-f**
diff --git a/docs/source/markdown/podman-pod-start.1.md b/docs/source/markdown/podman-pod-start.1.md
index 38117ebe8..40a71d879 100644
--- a/docs/source/markdown/podman-pod-start.1.md
+++ b/docs/source/markdown/podman-pod-start.1.md
@@ -18,9 +18,7 @@ Starts all pods
#### **--latest**, **-l**
-Instead of providing the pod name or ID, start the last created pod.
-
-The latest option is not supported on the remote client.
+Instead of providing the pod name or ID, start the last created pod. (This option is not available with the remote Podman client)
#### **--pod-id-file**
diff --git a/docs/source/markdown/podman-pod-stats.1.md b/docs/source/markdown/podman-pod-stats.1.md
index 4ef15fc20..3158ffc1c 100644
--- a/docs/source/markdown/podman-pod-stats.1.md
+++ b/docs/source/markdown/podman-pod-stats.1.md
@@ -17,9 +17,7 @@ Show all containers. Only running containers are shown by default
#### **--latest**, **-l**
-Instead of providing the pod name or ID, use the last created pod.
-
-The latest option is not supported on the remote client.
+Instead of providing the pod name or ID, use the last created pod. (This option is not available with the remote Podman client)
#### **--no-reset**
diff --git a/docs/source/markdown/podman-pod-stop.1.md b/docs/source/markdown/podman-pod-stop.1.md
index df1b36fc4..77f6af433 100644
--- a/docs/source/markdown/podman-pod-stop.1.md
+++ b/docs/source/markdown/podman-pod-stop.1.md
@@ -23,9 +23,7 @@ ExecStop directive of a systemd service referencing that pod.
#### **--latest**, **-l**
-Instead of providing the pod name or ID, stop the last created pod.
-
-The latest option is not supported on the remote client.
+Instead of providing the pod name or ID, stop the last created pod. (This option is not available with the remote Podman client)
#### **--time**, **-t**=*time*
diff --git a/docs/source/markdown/podman-pod-top.1.md b/docs/source/markdown/podman-pod-top.1.md
index 80334ebd0..d374a0dca 100644
--- a/docs/source/markdown/podman-pod-top.1.md
+++ b/docs/source/markdown/podman-pod-top.1.md
@@ -17,9 +17,7 @@ Display the running processes of containers in a pod. The *format-descriptors* a
#### **--latest**, **-l**
-Instead of providing the pod name or ID, use the last created pod.
-
-The latest option is not supported on the remote client.
+Instead of providing the pod name or ID, use the last created pod. (This option is not available with the remote Podman client)
## FORMAT DESCRIPTORS
diff --git a/docs/source/markdown/podman-pod-unpause.1.md b/docs/source/markdown/podman-pod-unpause.1.md
index 0b3c2d384..98022a50b 100644
--- a/docs/source/markdown/podman-pod-unpause.1.md
+++ b/docs/source/markdown/podman-pod-unpause.1.md
@@ -17,9 +17,7 @@ Unpause all pods.
#### **--latest**, **-l**
-Instead of providing the pod name or ID, unpause the last created pod.
-
-The latest option is not supported on the remote client.
+Instead of providing the pod name or ID, unpause the last created pod. (This option is not available with the remote Podman client)
## EXAMPLE
diff --git a/docs/source/markdown/podman-port.1.md b/docs/source/markdown/podman-port.1.md
index 9d56d5e8c..7a84d181a 100644
--- a/docs/source/markdown/podman-port.1.md
+++ b/docs/source/markdown/podman-port.1.md
@@ -21,9 +21,7 @@ or private ports/protocols as filters.
#### **--latest**, **-l**
Instead of providing the container name or ID, use the last created container. If you use methods other than Podman
-to run containers such as CRI-O, the last started container could be from either of those methods.
-
-The latest option is not supported on the remote client.
+to run containers such as CRI-O, the last started container could be from either of those methods. (This option is not available with the remote Podman client)
## EXAMPLE
diff --git a/docs/source/markdown/podman-ps.1.md b/docs/source/markdown/podman-ps.1.md
index bb8001ad9..75eede527 100644
--- a/docs/source/markdown/podman-ps.1.md
+++ b/docs/source/markdown/podman-ps.1.md
@@ -94,9 +94,7 @@ Print the n last created containers (all states)
#### **--latest**, **-l**
-Show the latest container created (all states)
-
-The latest option is not supported on the remote client.
+Show the latest container created (all states) (This option is not available with the remote Podman client)
#### **--namespace**, **--ns**
diff --git a/docs/source/markdown/podman-pull.1.md b/docs/source/markdown/podman-pull.1.md
index af91a59e9..02ba736f2 100644
--- a/docs/source/markdown/podman-pull.1.md
+++ b/docs/source/markdown/podman-pull.1.md
@@ -85,7 +85,7 @@ environment variable. `export REGISTRY_AUTH_FILE=path`
#### **--cert-dir**=*path*
Use certificates at *path* (\*.crt, \*.cert, \*.key) to connect to the registry.
-Default certificates directory is _/etc/containers/certs.d_. (Not available for remote commands)
+Default certificates directory is _/etc/containers/certs.d_. (This option is not available with the remote Podman client)
#### **--creds**=*[username[:password]]*
diff --git a/docs/source/markdown/podman-push.1.md b/docs/source/markdown/podman-push.1.md
index 3ed5f60c0..f42ee1020 100644
--- a/docs/source/markdown/podman-push.1.md
+++ b/docs/source/markdown/podman-push.1.md
@@ -71,7 +71,7 @@ value can be entered. The password is entered without echo.
#### **--cert-dir**=*path*
Use certificates at *path* (\*.crt, \*.cert, \*.key) to connect to the registry.
-Default certificates directory is _/etc/containers/certs.d_. (Not available for remote commands)
+Default certificates directory is _/etc/containers/certs.d_. (This option is not available with the remote Podman client)
#### **--compress**
@@ -80,7 +80,7 @@ Note: This flag can only be set when using the **dir** transport
#### **--digestfile** *Digestfile*
-After copying the image, write the digest of the resulting image to the file. (Not available for remote commands)
+After copying the image, write the digest of the resulting image to the file. (This option is not available with the remote Podman client)
#### **--disable-content-trust**
@@ -98,11 +98,11 @@ When writing the output image, suppress progress output
#### **--remove-signatures**
-Discard any pre-existing signatures in the image. (Not available for remote commands)
+Discard any pre-existing signatures in the image. (This option is not available with the remote Podman client)
#### **--sign-by**=*key*
-Add a signature at the destination using the specified key. (Not available for remote commands)
+Add a signature at the destination using the specified key. (This option is not available with the remote Podman client)
#### **--tls-verify**=*true|false*
diff --git a/docs/source/markdown/podman-restart.1.md b/docs/source/markdown/podman-restart.1.md
index acca0ccfe..2b37e2ea4 100644
--- a/docs/source/markdown/podman-restart.1.md
+++ b/docs/source/markdown/podman-restart.1.md
@@ -19,9 +19,7 @@ Restart all containers regardless of their current state.
#### **--latest**, **-l**
Instead of providing the container name or ID, use the last created container. If you use methods other than Podman
-to run containers such as CRI-O, the last started container could be from either of those methods.
-
-The latest option is not supported on the remote client.
+to run containers such as CRI-O, the last started container could be from either of those methods. (This option is not available with the remote Podman client)
#### **--running**
Restart all containers that are already in the *running* state.
diff --git a/docs/source/markdown/podman-rm.1.md b/docs/source/markdown/podman-rm.1.md
index cba9a0b49..0abf2768c 100644
--- a/docs/source/markdown/podman-rm.1.md
+++ b/docs/source/markdown/podman-rm.1.md
@@ -39,9 +39,7 @@ during the ExecStop directive of a systemd service referencing that container.
#### **--latest**, **-l**
Instead of providing the container name or ID, use the last created container. If you use methods other than Podman
-to run containers such as CRI-O, the last started container could be from either of those methods.
-
-The latest option is not supported on the remote client.
+to run containers such as CRI-O, the last started container could be from either of those methods. (This option is not available with the remote Podman client)
#### **--volumes**, **-v**
diff --git a/docs/source/markdown/podman-run.1.md b/docs/source/markdown/podman-run.1.md
index a633df94e..78b036cee 100644
--- a/docs/source/markdown/podman-run.1.md
+++ b/docs/source/markdown/podman-run.1.md
@@ -380,7 +380,7 @@ See [**Environment**](#environment) note below for precedence and examples.
#### **--env-host**=**true**|**false**
-Use host environment inside of the container. See **Environment** note below for precedence. (Not available for remote commands)
+Use host environment inside of the container. See **Environment** note below for precedence. (This option is not available with the remote Podman client)
#### **--env-file**=*file*
@@ -456,7 +456,7 @@ the container should not use any proxy. Proxy environment variables specified
for the container in any other way will override the values that would have
been passed through from the host. (Other ways to specify the proxy for the
container include passing the values with the **--env** flag, or hard coding the
-proxy environment at container build time.) (Not available for remote commands)
+proxy environment at container build time.) (This option is not available with the remote Podman client)
Defaults to **true**.
@@ -908,7 +908,7 @@ Security Options
for the possible mount options are specified at **proc(5)** man page.
- **unmask**=_ALL_ or _/path/1:/path/2_: Paths to unmask separated by a colon. If set to **ALL**, it will
unmask all the paths that are masked or made read only by default.
- The default masked paths are **/proc/acpi, /proc/kcore, /proc/keys, /proc/latency_stats, /proc/sched_debug, /proc/scsi, /proc/timer_list, /proc/timer_stats, /sys/firmware, and /sys/fs/selinux.**. The default paths that are read only are **/proc/asound**, **/proc/bus**, **/proc/fs**, **/proc/irq**, **/proc/sys**, **/proc/sysrq-trigger**.
+ The default masked paths are **/proc/acpi, /proc/kcore, /proc/keys, /proc/latency_stats, /proc/sched_debug, /proc/scsi, /proc/timer_list, /proc/timer_stats, /sys/firmware, and /sys/fs/selinux.**. The default paths that are read only are **/proc/asound**, **/proc/bus**, **/proc/fs**, **/proc/irq**, **/proc/sys**, **/proc/sysrq-trigger**, **/sys/fs/cgroup**.
Note: Labeling can be disabled for all containers by setting **label=false** in the **containers.conf**(5) file.
diff --git a/docs/source/markdown/podman-start.1.md b/docs/source/markdown/podman-start.1.md
index 4f8aa2b18..600d3859a 100644
--- a/docs/source/markdown/podman-start.1.md
+++ b/docs/source/markdown/podman-start.1.md
@@ -32,9 +32,7 @@ Attach container's STDIN. The default is false.
#### **--latest**, **-l**
Instead of providing the container name or ID, use the last created container. If you use methods other than Podman
-to run containers such as CRI-O, the last started container could be from either of those methods.
-
-The latest option is not supported on the remote client.
+to run containers such as CRI-O, the last started container could be from either of those methods. (This option is not available with the remote Podman client)
#### **--sig-proxy**=*true|false*
diff --git a/docs/source/markdown/podman-stats.1.md b/docs/source/markdown/podman-stats.1.md
index 722027aae..300106796 100644
--- a/docs/source/markdown/podman-stats.1.md
+++ b/docs/source/markdown/podman-stats.1.md
@@ -27,9 +27,7 @@ Show all containers. Only running containers are shown by default
#### **--latest**, **-l**
Instead of providing the container name or ID, use the last created container. If you use methods other than Podman
-to run containers such as CRI-O, the last started container could be from either of those methods.
-
-The latest option is not supported on the remote client.
+to run containers such as CRI-O, the last started container could be from either of those methods. (This option is not available with the remote Podman client)
#### **--no-reset**
diff --git a/docs/source/markdown/podman-stop.1.md b/docs/source/markdown/podman-stop.1.md
index 83570f50d..a1e9675c1 100644
--- a/docs/source/markdown/podman-stop.1.md
+++ b/docs/source/markdown/podman-stop.1.md
@@ -34,9 +34,7 @@ during the ExecStop directive of a systemd service referencing that container.
#### **--latest**, **-l**
Instead of providing the container name or ID, use the last created container. If you use methods other than Podman
-to run containers such as CRI-O, the last started container could be from either of those methods.
-
-The latest option is not supported on the remote client.
+to run containers such as CRI-O, the last started container could be from either of those methods. (This option is not available with the remote Podman client)
#### **--time**, **-t**=*time*
diff --git a/docs/source/markdown/podman-system-service.1.md b/docs/source/markdown/podman-system-service.1.md
index 70764823c..93f18adf1 100644
--- a/docs/source/markdown/podman-system-service.1.md
+++ b/docs/source/markdown/podman-system-service.1.md
@@ -13,6 +13,10 @@ If no endpoint is provided, defaults will be used. The default endpoint for a r
service is *unix:/run/podman/podman.sock* and rootless is *unix:/$XDG_RUNTIME_DIR/podman/podman.sock* (for
example *unix:/run/user/1000/podman/podman.sock*)
+To access the API service inside a container:
+- mount the socket as a volume
+- run the container with `--security-opt label:disable`
+
The REST API provided by **podman system service** is split into two parts: a compatibility layer offering support for the Docker v1.40 API, and a Podman-native Libpod layer.
Documentation for the latter is available at *https://docs.podman.io/en/latest/_static/api.html*.
Both APIs are versioned, but the server will not reject requests with an unsupported version set.
@@ -34,7 +38,7 @@ Print usage statement.
Run an API listening for 5 seconds using the default socket.
```
-podman system service --timeout 5000
+podman system service --time 5
```
## SEE ALSO
diff --git a/docs/source/markdown/podman-top.1.md b/docs/source/markdown/podman-top.1.md
index cfb89567c..09e8f7423 100644
--- a/docs/source/markdown/podman-top.1.md
+++ b/docs/source/markdown/podman-top.1.md
@@ -20,9 +20,7 @@ Print usage statement
#### **--latest**, **-l**
Instead of providing the container name or ID, use the last created container. If you use methods other than Podman
-to run containers such as CRI-O, the last started container could be from either of those methods.
-
-The latest option is not supported on the remote client.
+to run containers such as CRI-O, the last started container could be from either of those methods.(This option is not available with the remote Podman client)
## FORMAT DESCRIPTORS
diff --git a/docs/source/markdown/podman-unmount.1.md b/docs/source/markdown/podman-unmount.1.md
index 97780111a..2832e5fdb 100644
--- a/docs/source/markdown/podman-unmount.1.md
+++ b/docs/source/markdown/podman-unmount.1.md
@@ -43,9 +43,7 @@ as the mount point could be removed without their knowledge.
Instead of providing the container name or ID, use the last created container.
If you use methods other than Podman to run containers such as CRI-O, the last
-started container could be from either of those methods.
-
-The latest option is not supported on the remote client.
+started container could be from either of those methods. (This option is not available with the remote Podman client)
## EXAMPLE
diff --git a/docs/source/markdown/podman-wait.1.md b/docs/source/markdown/podman-wait.1.md
index 07e42ca6e..fac017fe6 100644
--- a/docs/source/markdown/podman-wait.1.md
+++ b/docs/source/markdown/podman-wait.1.md
@@ -29,9 +29,8 @@ Condition to wait on (default "stopped")
#### **--latest**, **-l**
Instead of providing the container name or ID, use the last created container. If you use methods other than Podman
-to run containers such as CRI-O, the last started container could be from either of those methods.
+to run containers such as CRI-O, the last started container could be from either of those methods. (This option is not available with the remote Podman client)
-The latest option is not supported on the remote client.
## EXAMPLES
diff --git a/docs/source/markdown/podman.1.md b/docs/source/markdown/podman.1.md
index 6f9e705c2..141d231f3 100644
--- a/docs/source/markdown/podman.1.md
+++ b/docs/source/markdown/podman.1.md
@@ -279,6 +279,8 @@ Distributions ship the `/usr/share/containers/containers.conf` file with their d
Podman uses builtin defaults if no containers.conf file is found.
+If the **CONTAINERS_CONF** environment variable is set, then its value is used for the containers.conf file rather than the default.
+
**mounts.conf** (`/usr/share/containers/mounts.conf`)
The mounts.conf file specifies volume mount directories that are automatically mounted inside containers when executing the `podman run` or `podman start` commands. Administrators can override the defaults file by creating `/etc/containers/mounts.conf`.
@@ -295,6 +297,8 @@ When Podman runs in rootless mode, the file `$HOME/.config/containers/mounts.con
Non root users of Podman can create the `$HOME/.config/containers/registries.conf` file to be used instead of the system defaults.
+ If the **CONTAINERS_REGISTRIES_CONF** environment variable is set, then its value is used for the registries.conf file rather than the default.
+
**storage.conf** (`/etc/containers/storage.conf`, `$HOME/.config/containers/storage.conf`)
storage.conf is the storage configuration file for all tools using containers/storage
@@ -303,8 +307,10 @@ When Podman runs in rootless mode, the file `$HOME/.config/containers/mounts.con
When Podman runs in rootless mode, the file `$HOME/.config/containers/storage.conf` is used instead of the system defaults.
+ If the **CONTAINERS_STORAGE_CONF** environment variable is set, the its value is used for the storage.conf file rather than the default.
+
## Rootless mode
-Podman can also be used as non-root user. When podman runs in rootless mode, a user namespace is automatically created for the user, defined in /etc/subuid and /etc/subgid.
+Podman can also be used as non-root user. When podman runs in rootless mode, a user namespace is automatically created for the user, defined in /etc/subuid and /etc/subgid.
Containers created by a non-root user are not visible to other users and are not seen or managed by Podman running as root.
diff --git a/go.mod b/go.mod
index a185dcd89..dd2f196f4 100644
--- a/go.mod
+++ b/go.mod
@@ -6,19 +6,19 @@ require (
github.com/BurntSushi/toml v0.3.1
github.com/blang/semver v3.5.1+incompatible
github.com/buger/goterm v0.0.0-20181115115552-c206103e1f37
+ github.com/checkpoint-restore/checkpointctl v0.0.0-20210301084134-a2024f5584e7
github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b
- github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd // indirect
github.com/containernetworking/cni v0.8.1
- github.com/containernetworking/plugins v0.9.0
- github.com/containers/buildah v1.19.6
+ github.com/containernetworking/plugins v0.9.1
+ github.com/containers/buildah v1.19.8
github.com/containers/common v0.35.0
github.com/containers/conmon v2.0.20+incompatible
github.com/containers/image/v5 v5.10.2
github.com/containers/ocicrypt v1.1.0
github.com/containers/psgo v1.5.2
github.com/containers/storage v1.25.0
- github.com/coreos/go-systemd/v22 v22.1.0
- github.com/cri-o/ocicni v0.2.1-0.20201204103948-b6cbe99b9756
+ github.com/coreos/go-systemd/v22 v22.2.0
+ github.com/cri-o/ocicni v0.2.1-0.20210301205850-541cf7c703cf
github.com/cyphar/filepath-securejoin v0.2.2
github.com/davecgh/go-spew v1.1.1
github.com/docker/distribution v2.7.1+incompatible
@@ -40,28 +40,25 @@ require (
github.com/moby/term v0.0.0-20201110203204-bea5bbe245bf
github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
- github.com/onsi/ginkgo v1.15.0
- github.com/onsi/gomega v1.10.5
+ github.com/onsi/ginkgo v1.15.1
+ github.com/onsi/gomega v1.11.0
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6
github.com/opencontainers/runc v1.0.0-rc91.0.20200708210054-ce54a9d4d79b
github.com/opencontainers/runtime-spec v1.0.3-0.20200817204227-f9c09b4ea1df
github.com/opencontainers/runtime-tools v0.9.0
github.com/opencontainers/selinux v1.8.0
- github.com/opentracing/opentracing-go v1.2.0
github.com/pkg/errors v0.9.1
github.com/pmezard/go-difflib v1.0.0
- github.com/rootless-containers/rootlesskit v0.13.2
+ github.com/rootless-containers/rootlesskit v0.14.0-beta.0
github.com/sirupsen/logrus v1.8.0
github.com/spf13/cobra v1.1.3
github.com/spf13/pflag v1.0.5
github.com/stretchr/testify v1.7.0
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2
github.com/uber/jaeger-client-go v2.25.0+incompatible
- github.com/uber/jaeger-lib v2.2.0+incompatible // indirect
github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852
go.etcd.io/bbolt v1.3.5
- go.uber.org/atomic v1.7.0 // indirect
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4
@@ -69,5 +66,4 @@ require (
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 // indirect
k8s.io/api v0.0.0-20190620084959-7cf5895f2711
k8s.io/apimachinery v0.20.4
- k8s.io/client-go v0.0.0-20190620085101-78d2af792bab
)
diff --git a/go.sum b/go.sum
index 3073df41f..693c16c54 100644
--- a/go.sum
+++ b/go.sum
@@ -16,7 +16,6 @@ github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774 h1:SCbEWT58NSt7
github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774/go.mod h1:6/0dYRLLXyJjbkIPeeGyoJ/eKOSI0eU6eTlCBYibgd0=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
-github.com/Azure/go-autorest v11.1.2+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
@@ -57,6 +56,8 @@ github.com/buger/goterm v0.0.0-20181115115552-c206103e1f37/go.mod h1:u9UyCz2eTrS
github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
+github.com/checkpoint-restore/checkpointctl v0.0.0-20210301084134-a2024f5584e7 h1:ZmSAEFFtv3mepC4/Ze6E/hi6vGZlhRvywqp1l+w+qqw=
+github.com/checkpoint-restore/checkpointctl v0.0.0-20210301084134-a2024f5584e7/go.mod h1:Kp3ezoDVdhfYxZUtgs4OL8sVvgOLz3txk0sbQD0opvw=
github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b h1:T4nWG1TXIxeor8mAu5bFguPJgSIGhZqv/f0z55KCrJM=
github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b/go.mod h1:TrMrLQfeENAPYPRsJuq3jsqdlRh3lvi6trTZJG8+tho=
github.com/checkpoint-restore/go-criu/v4 v4.0.2/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
@@ -71,8 +72,6 @@ github.com/cilium/ebpf v0.0.0-20200507155900-a9f01edf17e3/go.mod h1:XT+cAw5wfvso
github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
-github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd h1:qMd81Ts1T2OTKmB4acZcyKaMtRnY5Y44NuXGX2GFJ1w=
-github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59 h1:qWj4qVYZ95vLWwqyNJCQg7rDsG5wPdze0UaPolH7DUk=
github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM=
@@ -89,57 +88,54 @@ github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv
github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
-github.com/containernetworking/cni v0.8.0 h1:BT9lpgGoH4jw3lFC7Odz2prU5ruiYKcgAjMCbgybcKI=
github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
github.com/containernetworking/cni v0.8.1 h1:7zpDnQ3T3s4ucOuJ/ZCLrYBxzkg0AELFfII3Epo9TmI=
github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
github.com/containernetworking/plugins v0.8.7/go.mod h1:R7lXeZaBzpfqapcAbHRW8/CYwm0dHzbz0XEjofx0uB0=
-github.com/containernetworking/plugins v0.9.0 h1:c+1gegKhR7+d0Caum9pEHugZlyhXPOG6v3V6xJgIGCI=
-github.com/containernetworking/plugins v0.9.0/go.mod h1:dbWv4dI0QrBGuVgj+TuVQ6wJRZVOhrCQj91YyC92sxg=
-github.com/containers/buildah v1.19.6 h1:8mPysB7QzHxX9okR+Bwq/lsKAZA/FjDcqB+vebgwI1g=
-github.com/containers/buildah v1.19.6/go.mod h1:VnyHWgNmfR1d89/zJ/F4cbwOzaQS+6sBky46W7dCo3E=
+github.com/containernetworking/plugins v0.9.1 h1:FD1tADPls2EEi3flPc2OegIY1M9pUa9r2Quag7HMLV8=
+github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8=
+github.com/containers/buildah v1.19.8 h1:4TzmetfKPQF5hh6GgMwbAfrD50j+PAcsRiWDnx+gCI8=
+github.com/containers/buildah v1.19.8/go.mod h1:VnyHWgNmfR1d89/zJ/F4cbwOzaQS+6sBky46W7dCo3E=
github.com/containers/common v0.33.4/go.mod h1:PhgL71XuC4jJ/1BIqeP7doke3aMFkCP90YBXwDeUr9g=
github.com/containers/common v0.35.0 h1:1OLZ2v+Tj/CN9BTQkKZ5VOriOiArJedinMMqfJRUI38=
github.com/containers/common v0.35.0/go.mod h1:gs1th7XFTOvVUl4LDPdQjOfOeNiVRDbQ7CNrZ0wS6F8=
github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg=
github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
-github.com/containers/image/v5 v5.10.1 h1:tHhGQ8RCMxJfJLD/PEW1qrOKX8nndledW9qz6UiAxns=
github.com/containers/image/v5 v5.10.1/go.mod h1:JlRLJZv7elVbtHaaaR6Kz8i6G3k2ttj4t7fubwxD9Hs=
github.com/containers/image/v5 v5.10.2 h1:STD9GYR9p/X0qTLmBYsyx8dEM7zQW+qZ8KHoL/64fkg=
github.com/containers/image/v5 v5.10.2/go.mod h1:JlRLJZv7elVbtHaaaR6Kz8i6G3k2ttj4t7fubwxD9Hs=
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE=
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
-github.com/containers/ocicrypt v1.0.3 h1:vYgl+RZ9Q3DPMuTfxmN+qp0X2Bj52uuY2vnt6GzVe1c=
github.com/containers/ocicrypt v1.0.3/go.mod h1:CUBa+8MRNL/VkpxYIpaMtgn1WgXGyvPQj8jcy0EVG6g=
github.com/containers/ocicrypt v1.1.0 h1:A6UzSUFMla92uxO43O6lm86i7evMGjTY7wTKB2DyGPY=
github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4=
github.com/containers/psgo v1.5.2 h1:3aoozst/GIwsrr/5jnFy3FrJay98uujPCu9lTuSZ/Cw=
github.com/containers/psgo v1.5.2/go.mod h1:2ubh0SsreMZjSXW1Hif58JrEcFudQyIy9EzPUWfawVU=
-github.com/containers/storage v1.24.5 h1:BusfdU0rCS2/Daa/DPw+0iLfGRlYA7UVF7D0el3N7Vk=
+github.com/containers/storage v1.23.5/go.mod h1:ha26Q6ngehFNhf3AWoXldvAvwI4jFe3ETQAf/CeZPyM=
github.com/containers/storage v1.24.5/go.mod h1:YC+2pY8SkfEAcZkwycxYbpK8EiRbx5soPPwz9dxe4IQ=
github.com/containers/storage v1.24.6/go.mod h1:YC+2pY8SkfEAcZkwycxYbpK8EiRbx5soPPwz9dxe4IQ=
github.com/containers/storage v1.25.0 h1:p0PLlQcWmtE+7XLfOCR0WuYyMTby1yozpI4DaKOtWTA=
github.com/containers/storage v1.25.0/go.mod h1:UxTYd5F4mPVqmDRcRL0PBS8+HP74aBn96eahnhEvPtk=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
-github.com/coreos/go-iptables v0.4.5 h1:DpHb9vJrZQEFMcVLFKAAGMUVX0XoRC0ptCthinRYm38=
github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
+github.com/coreos/go-iptables v0.5.0 h1:mw6SAibtHKZcNzAsOxjoHIG0gy5YFHhypWSSNc6EjbQ=
+github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
-github.com/coreos/go-systemd/v22 v22.1.0 h1:kq/SbG2BCKLkDKkjQf5OWwKWUKj1lgs3lFI4PxnR5lg=
-github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
+github.com/coreos/go-systemd/v22 v22.2.0 h1:BBmbNtSc5PuUM3Byxs7yE5rLdxQO4/FMoEXY5Rle4GA=
+github.com/coreos/go-systemd/v22 v22.2.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
-github.com/creack/pty v1.1.9 h1:uDmaGzcdjhF4i/plgjmEsriH11Y0o7RKapEf/LDaM3w=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw=
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
-github.com/cri-o/ocicni v0.2.1-0.20201204103948-b6cbe99b9756 h1:4T3rzrCSvMgVTR+fm526d+Ed0BurAHGjOaaNFOVoK6E=
-github.com/cri-o/ocicni v0.2.1-0.20201204103948-b6cbe99b9756/go.mod h1:vingr1ztOAzP2WyTgGbpMov9dFhbjNxdLtDv0+PhAvY=
+github.com/cri-o/ocicni v0.2.1-0.20210301205850-541cf7c703cf h1:k2wrxBiBseRfOD7h+9fABEuesABBQuUuW5fWwpARbeI=
+github.com/cri-o/ocicni v0.2.1-0.20210301205850-541cf7c703cf/go.mod h1:vingr1ztOAzP2WyTgGbpMov9dFhbjNxdLtDv0+PhAvY=
github.com/cyphar/filepath-securejoin v0.2.2 h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg=
github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ=
@@ -149,14 +145,12 @@ github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjI
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/dgrijalva/jwt-go v0.0.0-20160705203006-01aeca54ebda/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v1.4.2-0.20191219165747-a9416c67da9f/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v17.12.0-ce-rc1.0.20200505174321-1655290016ac+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
-github.com/docker/docker v17.12.0-ce-rc1.0.20201020191947-73dc6a680cdd+incompatible h1:+0LETFJcCLdIqdtEbVWF1JIxATqM15Y4sLiMcWOYq2U=
github.com/docker/docker v17.12.0-ce-rc1.0.20201020191947-73dc6a680cdd+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v20.10.0-beta1.0.20201113105859-b6bfff2a628f+incompatible h1:lwpV3629md5omgAKjxPWX17shI7vMRpE3nyb9WHn8pA=
github.com/docker/docker v20.10.0-beta1.0.20201113105859-b6bfff2a628f+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
@@ -174,12 +168,10 @@ github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316 h1:moehP
github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8=
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4=
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
-github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 h1:cenwrSVm+Z7QLSV/BsnenAOcDXdX4cMv4wP0B/5QbPg=
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
-github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc=
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
@@ -224,7 +216,6 @@ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
-github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@@ -246,7 +237,6 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/google/btree v0.0.0-20160524151835-7d79101e329e/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
@@ -275,7 +265,6 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
-github.com/gophercloud/gophercloud v0.0.0-20190126172459-c818fa66e4c8/go.mod h1:3WdhXV3rUYy9p6AUW8d94kr+HS62Y4VL9mBnFxsD8q4=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
@@ -283,7 +272,6 @@ github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB7
github.com/gorilla/schema v1.2.0 h1:YufUaxZYCKGFuAq3c96BOhjgd5nmXiOY9NGzF247Tsc=
github.com/gorilla/schema v1.2.0/go.mod h1:kgLaKoK1FELgZqMAVxx/5cbj0kT+57qxUrAlIO2eleU=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
-github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
@@ -314,7 +302,6 @@ github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/J
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/hugelgupf/socketpair v0.0.0-20190730060125-05d35a94e714/go.mod h1:2Goc3h8EklBH5mspfHFxBnEoURQCGzQQH1ga9Myjvis=
-github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA=
github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
@@ -342,7 +329,7 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.11.5 h1:xNCE0uE6yvTPRS+0wGNMHPo3NIpwnk6aluQZ6R6kRcc=
+github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.5/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.7 h1:0hzRabrMN4tSTvMfnL3SCv1ZGeAP23ynzodBgaHeMeg=
github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
@@ -353,7 +340,6 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxv
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
@@ -374,10 +360,10 @@ github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaO
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
-github.com/mattn/go-shellwords v1.0.10 h1:Y7Xqm8piKOO3v10Thp7Z36h4FYFjt5xB//6XvOrs2Gw=
github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
github.com/mattn/go-shellwords v1.0.11 h1:vCoR9VPpsk/TZFW2JwK5I9S0xdrtUq2bph6/YjEPnaw=
github.com/mattn/go-shellwords v1.0.11/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
@@ -414,7 +400,7 @@ github.com/moby/term v0.0.0-20200429084858-129dac9f73f6/go.mod h1:or9wGItza1sRcM
github.com/moby/term v0.0.0-20200915141129-7f0af18e79f2/go.mod h1:TjQg8pa4iejrUrjiz0MCtMV38jdMNW4doKSiBrEvCQQ=
github.com/moby/term v0.0.0-20201110203204-bea5bbe245bf h1:Un6PNx5oMK6CCwO3QTUyPiK2mtZnPrpDl5UnZ64eCkw=
github.com/moby/term v0.0.0-20201110203204-bea5bbe245bf/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc=
-github.com/moby/vpnkit v0.4.0/go.mod h1:KyjUrL9cb6ZSNNAUwZfqRjhwwgJ3BJN+kXh0t43WTUQ=
+github.com/moby/vpnkit v0.5.0/go.mod h1:KyjUrL9cb6ZSNNAUwZfqRjhwwgJ3BJN+kXh0t43WTUQ=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@@ -434,7 +420,10 @@ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWb
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
+github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
+github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
+github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA=
github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
@@ -442,10 +431,11 @@ github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
-github.com/onsi/ginkgo v1.14.2 h1:8mVmC9kjFFmA8H4pKMUhcblgifdkOIXPvbhN1T36q1M=
github.com/onsi/ginkgo v1.14.2/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/ginkgo v1.15.0 h1:1V1NfVQR87RtWAgp1lv9JZJ5Jap+XFGKPi00andXGi4=
github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISqnKUg=
+github.com/onsi/ginkgo v1.15.1 h1:DsXNrKujDlkMS9Rsxmd+Fg7S6Kc5lhE+qX8tY6laOxc=
+github.com/onsi/ginkgo v1.15.1/go.mod h1:Dd6YFfwBW84ETqqtL0CPyPXillHgY6XhQH3uuCCTr/o=
github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
@@ -454,10 +444,11 @@ github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1Cpa
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
-github.com/onsi/gomega v1.10.4 h1:NiTx7EEvBzu9sFOD1zORteLSt3o8gnlvZZwSE9TnY9U=
github.com/onsi/gomega v1.10.4/go.mod h1:g/HbgYopi++010VEqkFgJHKC09uJiW9UkXvMUuKHUCQ=
github.com/onsi/gomega v1.10.5 h1:7n6FEkpFmfCoo2t+YYqXH0evK+a9ICQz0xcAy9dYcaQ=
github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48=
+github.com/onsi/gomega v1.11.0 h1:+CqWgvj0OZycCaqclBD1pxKHAU+tOkHmQIWvDHq2aug=
+github.com/onsi/gomega v1.11.0/go.mod h1:azGKhqFUon9Vuj0YmTfLSmx0FUwqXYSTl5re8lQLTUg=
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
@@ -480,17 +471,15 @@ github.com/opencontainers/runtime-spec v1.0.3-0.20200817204227-f9c09b4ea1df/go.m
github.com/opencontainers/runtime-tools v0.9.0 h1:FYgwVsKRI/H9hU32MJ/4MLOzXWodKK5zsQavY8NPMkU=
github.com/opencontainers/runtime-tools v0.9.0/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
github.com/opencontainers/selinux v1.5.1/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g=
+github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
github.com/opencontainers/selinux v1.8.0 h1:+77ba4ar4jsCbL1GLbFL8fFM57w6suPfSS9PDLDY7KM=
github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
github.com/openshift/imagebuilder v1.1.8 h1:gjiIl8pbNj0eC4XWvFJHATdDvYm64p9/pLDLQWoLZPA=
github.com/openshift/imagebuilder v1.1.8/go.mod h1:9aJRczxCH0mvT6XQ+5STAQaPWz7OsWcU5/mRkt8IWeo=
-github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
-github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913 h1:TnbXhKzrTOyuvWrjI8W6pcoI9XPbLHFXCdN2dtUw7Rw=
github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
-github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/pkg/errors v0.0.0-20190227000051-27936f6d90f9/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@@ -526,8 +515,8 @@ github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDa
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/rootless-containers/rootlesskit v0.13.2 h1:NoSyGw0+0Js0L6nI/rfm8laV0QBI+sUxjFSGWfQgtr0=
-github.com/rootless-containers/rootlesskit v0.13.2/go.mod h1:P+T/zWEzrIidEJIsYkuVWFLPebBvdehdIem7s36glh8=
+github.com/rootless-containers/rootlesskit v0.14.0-beta.0 h1:S0VzvU7sEvqCTkxPAxzJ1OZpG9a8oG9FSwkVhk0b8PM=
+github.com/rootless-containers/rootlesskit v0.14.0-beta.0/go.mod h1:5UDnrX52Dyoyz2lK66mjHftWpK9YSp1ghO+fY1ZkxFc=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
@@ -545,7 +534,6 @@ github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
-github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.8.0 h1:nfhvjKcUMhBMVqbKHJlk5RPrrfYr/NMo3692g0dwfWU=
github.com/sirupsen/logrus v1.8.0/go.mod h1:4GuYW9TZmE769R5STWrRakJc4UqQ3+QQ95fyz7ENv1A=
@@ -556,7 +544,6 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
-github.com/spf13/cobra v1.1.1 h1:KfztREH0tPxJJ+geloSLaAkaPkr4ki2Er5quFV1TDo4=
github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI=
github.com/spf13/cobra v1.1.3 h1:xghbfqPkxzxP3C/f3n5DdpAbdKLj4ZE4BWQI362l53M=
github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
@@ -590,8 +577,6 @@ github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1
github.com/u-root/u-root v7.0.0+incompatible/go.mod h1:RYkpo8pTHrNjW08opNd/U6p/RJE7K0D8fXO0d47+3YY=
github.com/uber/jaeger-client-go v2.25.0+incompatible h1:IxcNZ7WRY1Y3G4poYlx24szfsn/3LvK9QHCq9oQw8+U=
github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
-github.com/uber/jaeger-lib v2.2.0+incompatible h1:MxZXOiR2JuoANZ3J6DE/U0kSFv/eJ/GfSYVCjK7dyaw=
-github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
github.com/ulikunitz/xz v0.5.9 h1:RsKRIA2MO8x56wkkcd3LbtcE/uMszhb6DpRf+3uwa3I=
github.com/ulikunitz/xz v0.5.9/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
@@ -610,6 +595,7 @@ github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmF
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae h1:4hwBBUfQCFe3Cym0ZtKyq7L16eZUtYKs+BaHDN6mAns=
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
+github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
github.com/willf/bitset v1.1.11 h1:N7Z7E9UvjW+sGsEl7k/SJrvY2reP1A07MrGuCjIOjRE=
github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
@@ -630,14 +616,11 @@ go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
-go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
-go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
@@ -699,13 +682,11 @@ golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwY
golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb h1:eBmm0M9fYhWpKZLjQUUKka/LtIxf46G4fxeEz5KJr9U=
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210119194325-5f4716e94777 h1:003p0dJM77cxMSyCPFphvZf/Y5/NXf5fzg6ufd1/Oew=
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -767,7 +748,6 @@ golang.org/x/sys v0.0.0-20201101102859-da207088b7d1/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3 h1:kzM6+9dur93BcC2kVlYl34cHU+TYZLanmpSJHVMmL64=
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201218084310-7d0127a74742/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -783,7 +763,6 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/time v0.0.0-20161028155119-f51c12702a4d/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs=
@@ -843,9 +822,7 @@ google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ij
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
-google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4=
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
google.golang.org/grpc v1.33.2 h1:EQyQC3sa8M+p6Ulc8yy9SWSS2GVwyRc83gAbG8lrl4o=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
@@ -863,7 +840,6 @@ gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -887,7 +863,6 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
@@ -909,8 +884,6 @@ k8s.io/api v0.0.0-20190620084959-7cf5895f2711/go.mod h1:TBhBqb1AWbBQbW3XRusr7n7E
k8s.io/apimachinery v0.0.0-20190612205821-1799e75a0719/go.mod h1:I4A+glKBHiTgiEjQiCCQfCAIcIMFGt291SmsvcrFzJA=
k8s.io/apimachinery v0.20.4 h1:vhxQ0PPUUU2Ns1b9r4/UFp13UPs8cw2iOoTjnY9faa0=
k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
-k8s.io/client-go v0.0.0-20190620085101-78d2af792bab h1:E8Fecph0qbNsAbijJJQryKu4Oi9QTp5cVpjTE+nqg6g=
-k8s.io/client-go v0.0.0-20190620085101-78d2af792bab/go.mod h1:E95RaSlHr79aHaX0aGSwcPNfygDiPKOVXdmivCIZT0k=
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/klog v0.3.1/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
@@ -920,8 +893,6 @@ k8s.io/klog/v2 v2.4.0 h1:7+X0fUguPyrKEC4WjH8iGDg3laWgMo5tMnRTIGTTxGQ=
k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc=
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
-k8s.io/utils v0.0.0-20190221042446-c2654d5206da h1:ElyM7RPonbKnQqOcw7dG2IK5uvQQn3b/WPHqD5mBvP4=
-k8s.io/utils v0.0.0-20190221042446-c2654d5206da/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
sigs.k8s.io/structured-merge-diff/v4 v4.0.2 h1:YHQV7Dajm86OuqnIR6zAelnDWBRjo+YhYV9PmGrh1s8=
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
diff --git a/hack/podman-socat b/hack/podman-socat
index 7bc571816..6ee6b89d8 100755
--- a/hack/podman-socat
+++ b/hack/podman-socat
@@ -54,8 +54,8 @@ trap "cleanup $TMPDIR" EXIT
# Need locations to store stuff
mkdir -p "${TMPDIR}"/{podman,crio,crio-run,cni/net.d,ctnr,tunnel}
-export REGISTRIES_CONFIG_PATH=${TMPDIR}/registry.conf
-cat >"$REGISTRIES_CONFIG_PATH" <<-EOT
+export CONTAINERS_REGISTRIES_CONF=${TMPDIR}/registry.conf
+cat >"$CONTAINERS_REGISTRIES_CONF" <<-EOT
[registries.search]
registries = ['docker.io']
[registries.insecure]
diff --git a/libpod/boltdb_state.go b/libpod/boltdb_state.go
index 6f2eaeab2..5df3e8961 100644
--- a/libpod/boltdb_state.go
+++ b/libpod/boltdb_state.go
@@ -879,7 +879,7 @@ func (s *BoltState) ContainerInUse(ctr *Container) ([]string, error) {
ctrDB := ctrBucket.Bucket([]byte(ctr.ID()))
if ctrDB == nil {
ctr.valid = false
- return errors.Wrapf(define.ErrNoSuchCtr, "no container with ID %s found in DB", ctr.ID())
+ return errors.Wrapf(define.ErrNoSuchCtr, "no container with ID %q found in DB", ctr.ID())
}
dependsBkt := ctrDB.Bucket(dependenciesBkt)
@@ -1669,7 +1669,105 @@ func (s *BoltState) RewriteContainerConfig(ctr *Container, newCfg *ContainerConf
ctrDB := ctrBkt.Bucket([]byte(ctr.ID()))
if ctrDB == nil {
ctr.valid = false
- return errors.Wrapf(define.ErrNoSuchCtr, "no container with ID %s found in DB", ctr.ID())
+ return errors.Wrapf(define.ErrNoSuchCtr, "no container with ID %q found in DB", ctr.ID())
+ }
+
+ if err := ctrDB.Put(configKey, newCfgJSON); err != nil {
+ return errors.Wrapf(err, "error updating container %s config JSON", ctr.ID())
+ }
+
+ return nil
+ })
+ return err
+}
+
+// SafeRewriteContainerConfig rewrites a container's configuration in a more
+// limited fashion than RewriteContainerConfig. It is marked as safe to use
+// under most circumstances, unlike RewriteContainerConfig.
+// DO NOT USE TO: Change container dependencies, change pod membership, change
+// locks, change container ID.
+func (s *BoltState) SafeRewriteContainerConfig(ctr *Container, oldName, newName string, newCfg *ContainerConfig) error {
+ if !s.valid {
+ return define.ErrDBClosed
+ }
+
+ if !ctr.valid {
+ return define.ErrCtrRemoved
+ }
+
+ if newName != "" && newCfg.Name != newName {
+ return errors.Wrapf(define.ErrInvalidArg, "new name %s for container %s must match name in given container config", newName, ctr.ID())
+ }
+ if newName != "" && oldName == "" {
+ return errors.Wrapf(define.ErrInvalidArg, "must provide old name for container if a new name is given")
+ }
+
+ newCfgJSON, err := json.Marshal(newCfg)
+ if err != nil {
+ return errors.Wrapf(err, "error marshalling new configuration JSON for container %s", ctr.ID())
+ }
+
+ db, err := s.getDBCon()
+ if err != nil {
+ return err
+ }
+ defer s.deferredCloseDBCon(db)
+
+ err = db.Update(func(tx *bolt.Tx) error {
+ if newName != "" {
+ idBkt, err := getIDBucket(tx)
+ if err != nil {
+ return err
+ }
+ namesBkt, err := getNamesBucket(tx)
+ if err != nil {
+ return err
+ }
+ allCtrsBkt, err := getAllCtrsBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ needsRename := true
+ if exists := namesBkt.Get([]byte(newName)); exists != nil {
+ if string(exists) == ctr.ID() {
+ // Name already associated with the ID
+ // of this container. No need for a
+ // rename.
+ needsRename = false
+ } else {
+ return errors.Wrapf(define.ErrCtrExists, "name %s already in use, cannot rename container %s", newName, ctr.ID())
+ }
+ }
+
+ if needsRename {
+ // We do have to remove the old name. The other
+ // buckets are ID-indexed so we just need to
+ // overwrite the values there.
+ if err := namesBkt.Delete([]byte(oldName)); err != nil {
+ return errors.Wrapf(err, "error deleting container %s old name from DB for rename", ctr.ID())
+ }
+ if err := idBkt.Put([]byte(ctr.ID()), []byte(newName)); err != nil {
+ return errors.Wrapf(err, "error renaming container %s in ID bucket in DB", ctr.ID())
+ }
+ if err := namesBkt.Put([]byte(newName), []byte(ctr.ID())); err != nil {
+ return errors.Wrapf(err, "error adding new name %s for container %s in DB", newName, ctr.ID())
+ }
+ if err := allCtrsBkt.Put([]byte(ctr.ID()), []byte(newName)); err != nil {
+ return errors.Wrapf(err, "error renaming container %s in all containers bucket in DB", ctr.ID())
+ }
+ }
+ }
+
+ ctrBkt, err := getCtrBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ ctrDB := ctrBkt.Bucket([]byte(ctr.ID()))
+ if ctrDB == nil {
+ ctr.valid = false
+ return errors.Wrapf(define.ErrNoSuchCtr, "no container with ID %q found in DB", ctr.ID())
}
if err := ctrDB.Put(configKey, newCfgJSON); err != nil {
diff --git a/libpod/boltdb_state_internal.go b/libpod/boltdb_state_internal.go
index cf8f1c175..d4994334f 100644
--- a/libpod/boltdb_state_internal.go
+++ b/libpod/boltdb_state_internal.go
@@ -1055,9 +1055,9 @@ func (s *BoltState) lookupContainerID(idOrName string, ctrBucket, namesBucket, n
return nil, err
} else if !exists {
if isPod {
- return nil, errors.Wrapf(define.ErrNoSuchCtr, "%s is a pod, not a container", idOrName)
+ return nil, errors.Wrapf(define.ErrNoSuchCtr, "%q is a pod, not a container", idOrName)
}
- return nil, errors.Wrapf(define.ErrNoSuchCtr, "no container with name or ID %s found", idOrName)
+ return nil, errors.Wrapf(define.ErrNoSuchCtr, "no container with name or ID %q found", idOrName)
}
return id, nil
}
diff --git a/libpod/container.go b/libpod/container.go
index ee6e243ac..65abbfd5e 100644
--- a/libpod/container.go
+++ b/libpod/container.go
@@ -904,6 +904,12 @@ func (c *Container) NamespacePath(linuxNS LinuxNS) (string, error) { //nolint:in
}
}
+ return c.namespacePath(linuxNS)
+}
+
+// namespacePath returns the path of one of the container's namespaces
+// If the container is not running, an error will be returned
+func (c *Container) namespacePath(linuxNS LinuxNS) (string, error) { //nolint:interfacer
if c.state.State != define.ContainerStateRunning && c.state.State != define.ContainerStatePaused {
return "", errors.Wrapf(define.ErrCtrStopped, "cannot get namespace path unless container %s is running", c.ID())
}
diff --git a/libpod/container_api.go b/libpod/container_api.go
index 2818ac841..4ccb240e7 100644
--- a/libpod/container_api.go
+++ b/libpod/container_api.go
@@ -2,6 +2,7 @@ package libpod
import (
"context"
+ "io"
"io/ioutil"
"net/http"
"os"
@@ -11,7 +12,6 @@ import (
"github.com/containers/podman/v3/libpod/define"
"github.com/containers/podman/v3/libpod/events"
"github.com/containers/podman/v3/pkg/signal"
- "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -27,10 +27,6 @@ import (
// containers). The `recursive` parameter will, if set to true, start these
// dependency containers before initializing this container.
func (c *Container) Init(ctx context.Context, recursive bool) error {
- span, _ := opentracing.StartSpanFromContext(ctx, "containerInit")
- span.SetTag("struct", "container")
- defer span.Finish()
-
if !c.batched {
c.lock.Lock()
defer c.lock.Unlock()
@@ -83,10 +79,6 @@ func (c *Container) Init(ctx context.Context, recursive bool) error {
// running before being run. The recursive parameter, if set, will start all
// dependencies before starting this container.
func (c *Container) Start(ctx context.Context, recursive bool) error {
- span, _ := opentracing.StartSpanFromContext(ctx, "containerStart")
- span.SetTag("struct", "container")
- defer span.Finish()
-
if !c.batched {
c.lock.Lock()
defer c.lock.Unlock()
@@ -349,10 +341,6 @@ func (c *Container) Mount() (string, error) {
}
}
- if c.state.State == define.ContainerStateRemoving {
- return "", errors.Wrapf(define.ErrCtrStateInvalid, "cannot mount container %s as it is being removed", c.ID())
- }
-
defer c.newContainerEvent(events.Mount)
return c.mount()
}
@@ -367,7 +355,6 @@ func (c *Container) Unmount(force bool) error {
return err
}
}
-
if c.state.Mounted {
mounted, err := c.runtime.storageService.MountedContainerImage(c.ID())
if err != nil {
@@ -847,31 +834,59 @@ func (c *Container) ShouldRestart(ctx context.Context) bool {
return c.shouldRestart()
}
-// ResolvePath resolves the specified path on the root for the container. The
-// root must either be the mounted image of the container or the already
-// mounted container storage.
-//
-// It returns the resolved root and the resolved path. Note that the path may
-// resolve to the container's mount point or to a volume or bind mount.
-func (c *Container) ResolvePath(ctx context.Context, root string, path string) (string, string, error) {
- logrus.Debugf("Resolving path %q (root %q) on container %s", path, root, c.ID())
+// CopyFromArchive copies the contents from the specified tarStream to path
+// *inside* the container.
+func (c *Container) CopyFromArchive(ctx context.Context, containerPath string, tarStream io.Reader) (func() error, error) {
+ if !c.batched {
+ c.lock.Lock()
+ defer c.lock.Unlock()
- // Minimal sanity checks.
- if len(root)*len(path) == 0 {
- return "", "", errors.Wrapf(define.ErrInternal, "ResolvePath: root (%q) and path (%q) must be non empty", root, path)
+ if err := c.syncContainer(); err != nil {
+ return nil, err
+ }
}
- if _, err := os.Stat(root); err != nil {
- return "", "", errors.Wrapf(err, "cannot locate root to resolve path on container %s", c.ID())
+
+ return c.copyFromArchive(ctx, containerPath, tarStream)
+}
+
+// CopyToArchive copies the contents from the specified path *inside* the
+// container to the tarStream.
+func (c *Container) CopyToArchive(ctx context.Context, containerPath string, tarStream io.Writer) (func() error, error) {
+ if !c.batched {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ if err := c.syncContainer(); err != nil {
+ return nil, err
+ }
}
+ return c.copyToArchive(ctx, containerPath, tarStream)
+}
+
+// Stat the specified path *inside* the container and return a file info.
+func (c *Container) Stat(ctx context.Context, containerPath string) (*define.FileInfo, error) {
if !c.batched {
c.lock.Lock()
defer c.lock.Unlock()
if err := c.syncContainer(); err != nil {
- return "", "", err
+ return nil, err
+ }
+ }
+
+ var mountPoint string
+ var err error
+ if c.state.Mounted {
+ mountPoint = c.state.Mountpoint
+ } else {
+ mountPoint, err = c.mount()
+ if err != nil {
+ return nil, err
}
+ defer c.unmount(false)
}
- return c.resolvePath(root, path)
+ info, _, _, err := c.stat(ctx, mountPoint, containerPath)
+ return info, err
}
diff --git a/libpod/container_copy_linux.go b/libpod/container_copy_linux.go
new file mode 100644
index 000000000..5c275c641
--- /dev/null
+++ b/libpod/container_copy_linux.go
@@ -0,0 +1,264 @@
+// +build linux
+
+package libpod
+
+import (
+ "context"
+ "io"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+
+ buildahCopiah "github.com/containers/buildah/copier"
+ "github.com/containers/buildah/pkg/chrootuser"
+ "github.com/containers/buildah/util"
+ "github.com/containers/podman/v3/libpod/define"
+ "github.com/containers/podman/v3/pkg/rootless"
+ "github.com/containers/storage/pkg/idtools"
+ "github.com/docker/docker/pkg/archive"
+ "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sys/unix"
+)
+
+func (c *Container) copyFromArchive(ctx context.Context, path string, reader io.Reader) (func() error, error) {
+ var (
+ mountPoint string
+ resolvedRoot string
+ resolvedPath string
+ unmount func()
+ err error
+ )
+
+ // Make sure that "/" copies the *contents* of the mount point and not
+ // the directory.
+ if path == "/" {
+ path = "/."
+ }
+
+ // Optimization: only mount if the container is not already.
+ if c.state.Mounted {
+ mountPoint = c.state.Mountpoint
+ unmount = func() {}
+ } else {
+ // NOTE: make sure to unmount in error paths.
+ mountPoint, err = c.mount()
+ if err != nil {
+ return nil, err
+ }
+ unmount = func() { c.unmount(false) }
+ }
+
+ if c.state.State == define.ContainerStateRunning {
+ resolvedRoot = "/"
+ resolvedPath = c.pathAbs(path)
+ } else {
+ resolvedRoot, resolvedPath, err = c.resolvePath(mountPoint, path)
+ if err != nil {
+ unmount()
+ return nil, err
+ }
+ }
+
+ // Make sure we chown the files to the container's main user and group ID.
+ user, err := getContainerUser(c, mountPoint)
+ if err != nil {
+ unmount()
+ return nil, err
+ }
+ idPair := idtools.IDPair{UID: int(user.UID), GID: int(user.GID)}
+
+ decompressed, err := archive.DecompressStream(reader)
+ if err != nil {
+ unmount()
+ return nil, err
+ }
+
+ logrus.Debugf("Container copy *to* %q (resolved: %q) on container %q (ID: %s)", path, resolvedPath, c.Name(), c.ID())
+
+ return func() error {
+ defer unmount()
+ defer decompressed.Close()
+ putOptions := buildahCopiah.PutOptions{
+ UIDMap: c.config.IDMappings.UIDMap,
+ GIDMap: c.config.IDMappings.GIDMap,
+ ChownDirs: &idPair,
+ ChownFiles: &idPair,
+ }
+
+ return c.joinMountAndExec(ctx,
+ func() error {
+ return buildahCopiah.Put(resolvedRoot, resolvedPath, putOptions, decompressed)
+ },
+ )
+ }, nil
+}
+
+func (c *Container) copyToArchive(ctx context.Context, path string, writer io.Writer) (func() error, error) {
+ var (
+ mountPoint string
+ unmount func()
+ err error
+ )
+
+ // Optimization: only mount if the container is not already.
+ if c.state.Mounted {
+ mountPoint = c.state.Mountpoint
+ unmount = func() {}
+ } else {
+ // NOTE: make sure to unmount in error paths.
+ mountPoint, err = c.mount()
+ if err != nil {
+ return nil, err
+ }
+ unmount = func() { c.unmount(false) }
+ }
+
+ statInfo, resolvedRoot, resolvedPath, err := c.stat(ctx, mountPoint, path)
+ if err != nil {
+ unmount()
+ return nil, err
+ }
+
+ // We optimistically chown to the host user. In case of a hypothetical
+ // container-to-container copy, the reading side will chown back to the
+ // container user.
+ user, err := getContainerUser(c, mountPoint)
+ if err != nil {
+ unmount()
+ return nil, err
+ }
+ hostUID, hostGID, err := util.GetHostIDs(
+ idtoolsToRuntimeSpec(c.config.IDMappings.UIDMap),
+ idtoolsToRuntimeSpec(c.config.IDMappings.GIDMap),
+ user.UID,
+ user.GID,
+ )
+ if err != nil {
+ unmount()
+ return nil, err
+ }
+ idPair := idtools.IDPair{UID: int(hostUID), GID: int(hostGID)}
+
+ logrus.Debugf("Container copy *from* %q (resolved: %q) on container %q (ID: %s)", path, resolvedPath, c.Name(), c.ID())
+
+ return func() error {
+ defer unmount()
+ getOptions := buildahCopiah.GetOptions{
+ // Unless the specified points to ".", we want to copy the base directory.
+ KeepDirectoryNames: statInfo.IsDir && filepath.Base(path) != ".",
+ UIDMap: c.config.IDMappings.UIDMap,
+ GIDMap: c.config.IDMappings.GIDMap,
+ ChownDirs: &idPair,
+ ChownFiles: &idPair,
+ Excludes: []string{"dev", "proc", "sys"},
+ // Ignore EPERMs when copying from rootless containers
+ // since we cannot read TTY devices. Those are owned
+ // by the host's root and hence "nobody" inside the
+ // container's user namespace.
+ IgnoreUnreadable: rootless.IsRootless() && c.state.State == define.ContainerStateRunning,
+ }
+ return c.joinMountAndExec(ctx,
+ func() error {
+ return buildahCopiah.Get(resolvedRoot, "", getOptions, []string{resolvedPath}, writer)
+ },
+ )
+ }, nil
+}
+
+// getContainerUser returns the specs.User and ID mappings of the container.
+func getContainerUser(container *Container, mountPoint string) (specs.User, error) {
+ userspec := container.Config().User
+
+ uid, gid, _, err := chrootuser.GetUser(mountPoint, userspec)
+ u := specs.User{
+ UID: uid,
+ GID: gid,
+ Username: userspec,
+ }
+
+ if !strings.Contains(userspec, ":") {
+ groups, err2 := chrootuser.GetAdditionalGroupsForUser(mountPoint, uint64(u.UID))
+ if err2 != nil {
+ if errors.Cause(err2) != chrootuser.ErrNoSuchUser && err == nil {
+ err = err2
+ }
+ } else {
+ u.AdditionalGids = groups
+ }
+ }
+
+ return u, err
+}
+
+// idtoolsToRuntimeSpec converts idtools ID mapping to the one of the runtime spec.
+func idtoolsToRuntimeSpec(idMaps []idtools.IDMap) (convertedIDMap []specs.LinuxIDMapping) {
+ for _, idmap := range idMaps {
+ tempIDMap := specs.LinuxIDMapping{
+ ContainerID: uint32(idmap.ContainerID),
+ HostID: uint32(idmap.HostID),
+ Size: uint32(idmap.Size),
+ }
+ convertedIDMap = append(convertedIDMap, tempIDMap)
+ }
+ return convertedIDMap
+}
+
+// joinMountAndExec executes the specified function `f` inside the container's
+// mount and PID namespace. That allows for having the exact view on the
+// container's file system.
+//
+// Note, if the container is not running `f()` will be executed as is.
+func (c *Container) joinMountAndExec(ctx context.Context, f func() error) error {
+ if c.state.State != define.ContainerStateRunning {
+ return f()
+ }
+
+ // Container's running, so we need to execute `f()` inside its mount NS.
+ errChan := make(chan error)
+ go func() {
+ runtime.LockOSThread()
+
+ // Join the mount and PID NS of the container.
+ getFD := func(ns LinuxNS) (*os.File, error) {
+ nsPath, err := c.namespacePath(ns)
+ if err != nil {
+ return nil, err
+ }
+ return os.Open(nsPath)
+ }
+
+ mountFD, err := getFD(MountNS)
+ if err != nil {
+ errChan <- err
+ return
+ }
+ defer mountFD.Close()
+
+ pidFD, err := getFD(PIDNS)
+ if err != nil {
+ errChan <- err
+ return
+ }
+ defer pidFD.Close()
+ if err := unix.Unshare(unix.CLONE_NEWNS); err != nil {
+ errChan <- err
+ return
+ }
+ if err := unix.Setns(int(pidFD.Fd()), unix.CLONE_NEWPID); err != nil {
+ errChan <- err
+ return
+ }
+
+ if err := unix.Setns(int(mountFD.Fd()), unix.CLONE_NEWNS); err != nil {
+ errChan <- err
+ return
+ }
+
+ // Last but not least, execute the workload.
+ errChan <- f()
+ }()
+ return <-errChan
+}
diff --git a/libpod/container_copy_unsupported.go b/libpod/container_copy_unsupported.go
new file mode 100644
index 000000000..b2bdd3e3d
--- /dev/null
+++ b/libpod/container_copy_unsupported.go
@@ -0,0 +1,16 @@
+// +build !linux
+
+package libpod
+
+import (
+ "context"
+ "io"
+)
+
+func (c *Container) copyFromArchive(ctx context.Context, path string, reader io.Reader) (func() error, error) {
+ return nil, nil
+}
+
+func (c *Container) copyToArchive(ctx context.Context, path string, writer io.Writer) (func() error, error) {
+ return nil, nil
+}
diff --git a/libpod/container_internal.go b/libpod/container_internal.go
index 2e0c24579..1614211fb 100644
--- a/libpod/container_internal.go
+++ b/libpod/container_internal.go
@@ -13,6 +13,7 @@ import (
"strings"
"time"
+ metadata "github.com/checkpoint-restore/checkpointctl/lib"
"github.com/containers/buildah/copier"
"github.com/containers/common/pkg/secrets"
"github.com/containers/podman/v3/libpod/define"
@@ -32,7 +33,6 @@ import (
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/runtime-tools/generate"
"github.com/opencontainers/selinux/go-selinux/label"
- "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -135,7 +135,7 @@ func (c *Container) ControlSocketPath() string {
// CheckpointPath returns the path to the directory containing the checkpoint
func (c *Container) CheckpointPath() string {
- return filepath.Join(c.bundlePath(), "checkpoint")
+ return filepath.Join(c.bundlePath(), metadata.CheckpointDirectory)
}
// PreCheckpointPath returns the path to the directory containing the pre-checkpoint-images
@@ -398,10 +398,6 @@ func (c *Container) setupStorageMapping(dest, from *storage.IDMappingOptions) {
// Create container root filesystem for use
func (c *Container) setupStorage(ctx context.Context) error {
- span, _ := opentracing.StartSpanFromContext(ctx, "setupStorage")
- span.SetTag("type", "container")
- defer span.Finish()
-
if !c.valid {
return errors.Wrapf(define.ErrCtrRemoved, "container %s is not valid", c.ID())
}
@@ -1034,10 +1030,6 @@ func (c *Container) cniHosts() string {
// Initialize a container, creating it in the runtime
func (c *Container) init(ctx context.Context, retainRetries bool) error {
- span, _ := opentracing.StartSpanFromContext(ctx, "init")
- span.SetTag("struct", "container")
- defer span.Finish()
-
// Unconditionally remove conmon temporary files.
// We've been running into far too many issues where they block startup.
if err := c.removeConmonFiles(); err != nil {
@@ -1110,10 +1102,6 @@ func (c *Container) init(ctx context.Context, retainRetries bool) error {
// Deletes the container in the runtime, and resets its state to Exited.
// The container can be restarted cleanly after this.
func (c *Container) cleanupRuntime(ctx context.Context) error {
- span, _ := opentracing.StartSpanFromContext(ctx, "cleanupRuntime")
- span.SetTag("struct", "container")
- defer span.Finish()
-
// If the container is not ContainerStateStopped or
// ContainerStateCreated, do nothing.
if !c.ensureState(define.ContainerStateStopped, define.ContainerStateCreated) {
@@ -1155,10 +1143,6 @@ func (c *Container) cleanupRuntime(ctx context.Context) error {
// Not necessary for ContainerStateExited - the container has already been
// removed from the runtime, so init() can proceed freely.
func (c *Container) reinit(ctx context.Context, retainRetries bool) error {
- span, _ := opentracing.StartSpanFromContext(ctx, "reinit")
- span.SetTag("struct", "container")
- defer span.Finish()
-
logrus.Debugf("Recreating container %s in OCI runtime", c.ID())
if err := c.cleanupRuntime(ctx); err != nil {
@@ -1306,9 +1290,7 @@ func (c *Container) stop(timeout uint) error {
c.lock.Unlock()
}
- if err := c.ociRuntime.StopContainer(c, timeout, all); err != nil {
- return err
- }
+ stopErr := c.ociRuntime.StopContainer(c, timeout, all)
if !c.batched {
c.lock.Lock()
@@ -1317,13 +1299,23 @@ func (c *Container) stop(timeout uint) error {
// If the container has already been removed (e.g., via
// the cleanup process), there's nothing left to do.
case define.ErrNoSuchCtr, define.ErrCtrRemoved:
- return nil
+ return stopErr
default:
+ if stopErr != nil {
+ logrus.Errorf("Error syncing container %s status: %v", c.ID(), err)
+ return stopErr
+ }
return err
}
}
}
+ // We have to check stopErr *after* we lock again - otherwise, we have a
+ // change of panicing on a double-unlock. Ref: GH Issue 9615
+ if stopErr != nil {
+ return stopErr
+ }
+
// Since we're now subject to a race condition with other processes who
// may have altered the state (and other data), let's check if the
// state has changed. If so, we should return immediately and log a
@@ -1811,10 +1803,6 @@ func (c *Container) cleanupStorage() error {
func (c *Container) cleanup(ctx context.Context) error {
var lastError error
- span, _ := opentracing.StartSpanFromContext(ctx, "cleanup")
- span.SetTag("struct", "container")
- defer span.Finish()
-
logrus.Debugf("Cleaning up container %s", c.ID())
// Remove healthcheck unit/timer file if it execs
@@ -1875,10 +1863,6 @@ func (c *Container) cleanup(ctx context.Context) error {
// delete deletes the container and runs any configured poststop
// hooks.
func (c *Container) delete(ctx context.Context) error {
- span, _ := opentracing.StartSpanFromContext(ctx, "delete")
- span.SetTag("struct", "container")
- defer span.Finish()
-
if err := c.ociRuntime.DeleteContainer(c); err != nil {
return errors.Wrapf(err, "error removing container %s from runtime", c.ID())
}
@@ -1894,10 +1878,6 @@ func (c *Container) delete(ctx context.Context) error {
// the OCI Runtime Specification (which requires them to run
// post-delete, despite the stage name).
func (c *Container) postDeleteHooks(ctx context.Context) error {
- span, _ := opentracing.StartSpanFromContext(ctx, "postDeleteHooks")
- span.SetTag("struct", "container")
- defer span.Finish()
-
if c.state.ExtensionStageHooks != nil {
extensionHooks, ok := c.state.ExtensionStageHooks["poststop"]
if ok {
@@ -2085,6 +2065,10 @@ func (c *Container) setupOCIHooks(ctx context.Context, config *spec.Spec) (map[s
// mount mounts the container's root filesystem
func (c *Container) mount() (string, error) {
+ if c.state.State == define.ContainerStateRemoving {
+ return "", errors.Wrapf(define.ErrCtrStateInvalid, "cannot mount container %s as it is being removed", c.ID())
+ }
+
mountPoint, err := c.runtime.storageService.MountContainerImage(c.ID())
if err != nil {
return "", errors.Wrapf(err, "error mounting storage for container %s", c.ID())
@@ -2141,26 +2125,11 @@ func (c *Container) canWithPrevious() error {
return err
}
-// writeJSONFile marshalls and writes the given data to a JSON file
-// in the bundle path
-func (c *Container) writeJSONFile(v interface{}, file string) error {
- fileJSON, err := json.MarshalIndent(v, "", " ")
- if err != nil {
- return errors.Wrapf(err, "error writing JSON to %s for container %s", file, c.ID())
- }
- file = filepath.Join(c.bundlePath(), file)
- if err := ioutil.WriteFile(file, fileJSON, 0644); err != nil {
- return err
- }
-
- return nil
-}
-
// prepareCheckpointExport writes the config and spec to
// JSON files for later export
func (c *Container) prepareCheckpointExport() error {
// save live config
- if err := c.writeJSONFile(c.Config(), "config.dump"); err != nil {
+ if _, err := metadata.WriteJSONFile(c.Config(), c.bundlePath(), metadata.ConfigDumpFile); err != nil {
return err
}
@@ -2171,7 +2140,7 @@ func (c *Container) prepareCheckpointExport() error {
logrus.Debugf("generating spec for container %q failed with %v", c.ID(), err)
return err
}
- if err := c.writeJSONFile(g.Config, "spec.dump"); err != nil {
+ if _, err := metadata.WriteJSONFile(g.Config, c.bundlePath(), metadata.SpecDumpFile); err != nil {
return err
}
diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go
index dc0418148..24319f4b5 100644
--- a/libpod/container_internal_linux.go
+++ b/libpod/container_internal_linux.go
@@ -19,6 +19,7 @@ import (
"syscall"
"time"
+ metadata "github.com/checkpoint-restore/checkpointctl/lib"
cnitypes "github.com/containernetworking/cni/pkg/types/current"
"github.com/containernetworking/plugins/pkg/ns"
"github.com/containers/buildah/pkg/chrootuser"
@@ -33,6 +34,7 @@ import (
"github.com/containers/podman/v3/libpod/events"
"github.com/containers/podman/v3/pkg/annotations"
"github.com/containers/podman/v3/pkg/cgroups"
+ "github.com/containers/podman/v3/pkg/checkpoint/crutils"
"github.com/containers/podman/v3/pkg/criu"
"github.com/containers/podman/v3/pkg/lookup"
"github.com/containers/podman/v3/pkg/resolvconf"
@@ -47,7 +49,6 @@ import (
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/runtime-tools/generate"
"github.com/opencontainers/selinux/go-selinux/label"
- "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
@@ -320,10 +321,6 @@ func (c *Container) getUserOverrides() *lookup.Overrides {
// Generate spec for a container
// Accepts a map of the container's dependencies
func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
- span, _ := opentracing.StartSpanFromContext(ctx, "generateSpec")
- span.SetTag("type", "container")
- defer span.Finish()
-
overrides := c.getUserOverrides()
execUser, err := lookup.GetUserGroupInfo(c.state.Mountpoint, c.config.User, overrides)
if err != nil {
@@ -884,80 +881,32 @@ func (c *Container) exportCheckpoint(options ContainerCheckpointOptions) error {
logrus.Debugf("Exporting checkpoint image of container %q to %q", c.ID(), options.TargetFile)
includeFiles := []string{
- "checkpoint",
"artifacts",
"ctr.log",
- "config.dump",
- "spec.dump",
- "network.status"}
+ metadata.CheckpointDirectory,
+ metadata.ConfigDumpFile,
+ metadata.SpecDumpFile,
+ metadata.NetworkStatusFile,
+ }
if options.PreCheckPoint {
includeFiles[0] = "pre-checkpoint"
}
// Get root file-system changes included in the checkpoint archive
- rootfsDiffPath := filepath.Join(c.bundlePath(), "rootfs-diff.tar")
- deleteFilesList := filepath.Join(c.bundlePath(), "deleted.files")
+ var addToTarFiles []string
if !options.IgnoreRootfs {
// To correctly track deleted files, let's go through the output of 'podman diff'
- tarFiles, err := c.runtime.GetDiff("", c.ID())
+ rootFsChanges, err := c.runtime.GetDiff("", c.ID())
if err != nil {
- return errors.Wrapf(err, "error exporting root file-system diff to %q", rootfsDiffPath)
- }
- var rootfsIncludeFiles []string
- var deletedFiles []string
-
- for _, file := range tarFiles {
- if file.Kind == archive.ChangeAdd {
- rootfsIncludeFiles = append(rootfsIncludeFiles, file.Path)
- continue
- }
- if file.Kind == archive.ChangeDelete {
- deletedFiles = append(deletedFiles, file.Path)
- continue
- }
- fileName, err := os.Stat(file.Path)
- if err != nil {
- continue
- }
- if !fileName.IsDir() && file.Kind == archive.ChangeModify {
- rootfsIncludeFiles = append(rootfsIncludeFiles, file.Path)
- continue
- }
+ return errors.Wrapf(err, "error exporting root file-system diff for %q", c.ID())
}
- if len(rootfsIncludeFiles) > 0 {
- rootfsTar, err := archive.TarWithOptions(c.state.Mountpoint, &archive.TarOptions{
- Compression: archive.Uncompressed,
- IncludeSourceDir: true,
- IncludeFiles: rootfsIncludeFiles,
- })
- if err != nil {
- return errors.Wrapf(err, "error exporting root file-system diff to %q", rootfsDiffPath)
- }
- rootfsDiffFile, err := os.Create(rootfsDiffPath)
- if err != nil {
- return errors.Wrapf(err, "error creating root file-system diff file %q", rootfsDiffPath)
- }
- defer rootfsDiffFile.Close()
- _, err = io.Copy(rootfsDiffFile, rootfsTar)
- if err != nil {
- return err
- }
-
- includeFiles = append(includeFiles, "rootfs-diff.tar")
+ addToTarFiles, err := crutils.CRCreateRootFsDiffTar(&rootFsChanges, c.state.Mountpoint, c.bundlePath())
+ if err != nil {
+ return err
}
- if len(deletedFiles) > 0 {
- formatJSON, err := json.MarshalIndent(deletedFiles, "", " ")
- if err != nil {
- return errors.Wrapf(err, "error creating delete files list file %q", deleteFilesList)
- }
- if err := ioutil.WriteFile(deleteFilesList, formatJSON, 0600); err != nil {
- return errors.Wrap(err, "error creating delete files list file")
- }
-
- includeFiles = append(includeFiles, "deleted.files")
- }
+ includeFiles = append(includeFiles, addToTarFiles...)
}
// Folder containing archived volumes that will be included in the export
@@ -1034,8 +983,9 @@ func (c *Container) exportCheckpoint(options ContainerCheckpointOptions) error {
return err
}
- os.Remove(rootfsDiffPath)
- os.Remove(deleteFilesList)
+ for _, file := range addToTarFiles {
+ os.Remove(filepath.Join(c.bundlePath(), file))
+ }
if !options.IgnoreVolumes {
os.RemoveAll(expVolDir)
@@ -1054,23 +1004,6 @@ func (c *Container) checkpointRestoreSupported() error {
return nil
}
-func (c *Container) checkpointRestoreLabelLog(fileName string) error {
- // Create the CRIU log file and label it
- dumpLog := filepath.Join(c.bundlePath(), fileName)
-
- logFile, err := os.OpenFile(dumpLog, os.O_CREATE, 0600)
- if err != nil {
- return errors.Wrap(err, "failed to create CRIU log file")
- }
- if err := logFile.Close(); err != nil {
- logrus.Error(err)
- }
- if err = label.SetFileLabel(dumpLog, c.MountLabel()); err != nil {
- return err
- }
- return nil
-}
-
func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointOptions) error {
if err := c.checkpointRestoreSupported(); err != nil {
return err
@@ -1084,7 +1017,7 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO
return errors.Errorf("cannot checkpoint containers that have been started with '--rm' unless '--export' is used")
}
- if err := c.checkpointRestoreLabelLog("dump.log"); err != nil {
+ if err := crutils.CRCreateFileWithLabel(c.bundlePath(), "dump.log", c.MountLabel()); err != nil {
return err
}
@@ -1095,11 +1028,7 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO
// Save network.status. This is needed to restore the container with
// the same IP. Currently limited to one IP address in a container
// with one interface.
- formatJSON, err := json.MarshalIndent(c.state.NetworkStatus, "", " ")
- if err != nil {
- return err
- }
- if err := ioutil.WriteFile(filepath.Join(c.bundlePath(), "network.status"), formatJSON, 0644); err != nil {
+ if _, err := metadata.WriteJSONFile(c.state.NetworkStatus, c.bundlePath(), metadata.NetworkStatusFile); err != nil {
return err
}
@@ -1115,7 +1044,7 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO
}
if options.TargetFile != "" {
- if err = c.exportCheckpoint(options); err != nil {
+ if err := c.exportCheckpoint(options); err != nil {
return err
}
}
@@ -1135,8 +1064,8 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO
cleanup := []string{
"dump.log",
"stats-dump",
- "config.dump",
- "spec.dump",
+ metadata.ConfigDumpFile,
+ metadata.SpecDumpFile,
}
for _, del := range cleanup {
file := filepath.Join(c.bundlePath(), del)
@@ -1151,28 +1080,13 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO
}
func (c *Container) importCheckpoint(input string) error {
- archiveFile, err := os.Open(input)
- if err != nil {
- return errors.Wrap(err, "failed to open checkpoint archive for import")
- }
-
- defer archiveFile.Close()
- options := &archive.TarOptions{
- ExcludePatterns: []string{
- // config.dump and spec.dump are only required
- // container creation
- "config.dump",
- "spec.dump",
- },
- }
- err = archive.Untar(archiveFile, c.bundlePath(), options)
- if err != nil {
- return errors.Wrapf(err, "unpacking of checkpoint archive %s failed", input)
+ if err := crutils.CRImportCheckpointWithoutConfig(c.bundlePath(), input); err != nil {
+ return err
}
// Make sure the newly created config.json exists on disk
g := generate.Generator{Config: c.config.Spec}
- if err = c.saveSpec(g.Config); err != nil {
+ if err := c.saveSpec(g.Config); err != nil {
return errors.Wrap(err, "saving imported container specification for restore failed")
}
@@ -1221,7 +1135,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
return errors.Wrapf(err, "a complete checkpoint for this container cannot be found, cannot restore")
}
- if err := c.checkpointRestoreLabelLog("restore.log"); err != nil {
+ if err := crutils.CRCreateFileWithLabel(c.bundlePath(), "restore.log", c.MountLabel()); err != nil {
return err
}
@@ -1244,7 +1158,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
// Read network configuration from checkpoint
// Currently only one interface with one IP is supported.
- networkStatusFile, err := os.Open(filepath.Join(c.bundlePath(), "network.status"))
+ networkStatus, _, err := metadata.ReadContainerCheckpointNetworkStatus(c.bundlePath())
// If the restored container should get a new name, the IP address of
// the container will not be restored. This assumes that if a new name is
// specified, the container is restored multiple times.
@@ -1254,43 +1168,14 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
if err == nil && options.Name == "" && (!options.IgnoreStaticIP || !options.IgnoreStaticMAC) {
// The file with the network.status does exist. Let's restore the
// container with the same IP address / MAC address as during checkpointing.
- defer networkStatusFile.Close()
- var networkStatus []*cnitypes.Result
- networkJSON, err := ioutil.ReadAll(networkStatusFile)
- if err != nil {
- return err
- }
- if err := json.Unmarshal(networkJSON, &networkStatus); err != nil {
- return err
- }
if !options.IgnoreStaticIP {
- // Take the first IP address
- var IP net.IP
- if len(networkStatus) > 0 {
- if len(networkStatus[0].IPs) > 0 {
- IP = networkStatus[0].IPs[0].Address.IP
- }
- }
- if IP != nil {
+ if IP := metadata.GetIPFromNetworkStatus(networkStatus); IP != nil {
// Tell CNI which IP address we want.
c.requestedIP = IP
}
}
if !options.IgnoreStaticMAC {
- // Take the first device with a defined sandbox.
- var MAC net.HardwareAddr
- if len(networkStatus) > 0 {
- for _, n := range networkStatus[0].Interfaces {
- if n.Sandbox != "" {
- MAC, err = net.ParseMAC(n.Mac)
- if err != nil {
- return err
- }
- break
- }
- }
- }
- if MAC != nil {
+ if MAC := metadata.GetMACFromNetworkStatus(networkStatus); MAC != nil {
// Tell CNI which MAC address we want.
c.requestedMAC = MAC
}
@@ -1398,36 +1283,12 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
// Before actually restarting the container, apply the root file-system changes
if !options.IgnoreRootfs {
- rootfsDiffPath := filepath.Join(c.bundlePath(), "rootfs-diff.tar")
- if _, err := os.Stat(rootfsDiffPath); err == nil {
- // Only do this if a rootfs-diff.tar actually exists
- rootfsDiffFile, err := os.Open(rootfsDiffPath)
- if err != nil {
- return errors.Wrap(err, "failed to open root file-system diff file")
- }
- defer rootfsDiffFile.Close()
- if err := c.runtime.ApplyDiffTarStream(c.ID(), rootfsDiffFile); err != nil {
- return errors.Wrapf(err, "failed to apply root file-system diff file %s", rootfsDiffPath)
- }
+ if err := crutils.CRApplyRootFsDiffTar(c.bundlePath(), c.state.Mountpoint); err != nil {
+ return err
}
- deletedFilesPath := filepath.Join(c.bundlePath(), "deleted.files")
- if _, err := os.Stat(deletedFilesPath); err == nil {
- var deletedFiles []string
- deletedFilesJSON, err := ioutil.ReadFile(deletedFilesPath)
- if err != nil {
- return errors.Wrapf(err, "failed to read deleted files file")
- }
- if err := json.Unmarshal(deletedFilesJSON, &deletedFiles); err != nil {
- return errors.Wrapf(err, "failed to unmarshal deleted files file %s", deletedFilesPath)
- }
- for _, deleteFile := range deletedFiles {
- // Using RemoveAll as deletedFiles, which is generated from 'podman diff'
- // lists completely deleted directories as a single entry: 'D /root'.
- err = os.RemoveAll(filepath.Join(c.state.Mountpoint, deleteFile))
- if err != nil {
- return errors.Wrapf(err, "failed to delete files from container %s during restore", c.ID())
- }
- }
+
+ if err := crutils.CRRemoveDeletedFiles(c.ID(), c.bundlePath(), c.state.Mountpoint); err != nil {
+ return err
}
}
@@ -1452,7 +1313,15 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
if err != nil {
logrus.Debugf("Non-fatal: removal of pre-checkpoint directory (%s) failed: %v", c.PreCheckPointPath(), err)
}
- cleanup := [...]string{"restore.log", "dump.log", "stats-dump", "stats-restore", "network.status", "rootfs-diff.tar", "deleted.files"}
+ cleanup := [...]string{
+ "restore.log",
+ "dump.log",
+ "stats-dump",
+ "stats-restore",
+ metadata.NetworkStatusFile,
+ metadata.RootFsDiffTar,
+ metadata.DeletedFilesFile,
+ }
for _, del := range cleanup {
file := filepath.Join(c.bundlePath(), del)
err = os.Remove(file)
diff --git a/libpod/container_log.go b/libpod/container_log.go
index a3b700004..c207df819 100644
--- a/libpod/container_log.go
+++ b/libpod/container_log.go
@@ -29,7 +29,6 @@ func (c *Container) ReadLog(ctx context.Context, options *logs.LogOptions, logCh
case define.NoLogging:
return errors.Wrapf(define.ErrNoLogs, "this container is using the 'none' log driver, cannot read logs")
case define.JournaldLogging:
- // TODO Skip sending logs until journald logs can be read
return c.readFromJournal(ctx, options, logChannel)
case define.JSONLogging:
// TODO provide a separate implementation of this when Conmon
diff --git a/libpod/container_log_linux.go b/libpod/container_log_linux.go
index 5792633b0..4a541b6e7 100644
--- a/libpod/container_log_linux.go
+++ b/libpod/container_log_linux.go
@@ -52,6 +52,7 @@ func (c *Container) readFromJournal(ctx context.Context, options *logs.LogOption
if time.Now().Before(options.Since) {
return nil
}
+ // coreos/go-systemd/sdjournal expects a negative time.Duration for times in the past
config.Since = -time.Since(options.Since)
}
config.Matches = append(config.Matches, journal.Match{
diff --git a/libpod/container_path_resolution.go b/libpod/container_path_resolution.go
index 5245314ae..d798963b1 100644
--- a/libpod/container_path_resolution.go
+++ b/libpod/container_path_resolution.go
@@ -1,3 +1,4 @@
+// +linux
package libpod
import (
@@ -10,6 +11,19 @@ import (
"github.com/sirupsen/logrus"
)
+// pathAbs returns an absolute path. If the specified path is
+// relative, it will be resolved relative to the container's working dir.
+func (c *Container) pathAbs(path string) string {
+ if !filepath.IsAbs(path) {
+ // If the containerPath is not absolute, it's relative to the
+ // container's working dir. To be extra careful, let's first
+ // join the working dir with "/", and the add the containerPath
+ // to it.
+ path = filepath.Join(filepath.Join("/", c.WorkingDir()), path)
+ }
+ return path
+}
+
// resolveContainerPaths resolves the container's mount point and the container
// path as specified by the user. Both may resolve to paths outside of the
// container's mount point when the container path hits a volume or bind mount.
@@ -20,14 +34,7 @@ import (
// the host).
func (c *Container) resolvePath(mountPoint string, containerPath string) (string, string, error) {
// Let's first make sure we have a path relative to the mount point.
- pathRelativeToContainerMountPoint := containerPath
- if !filepath.IsAbs(containerPath) {
- // If the containerPath is not absolute, it's relative to the
- // container's working dir. To be extra careful, let's first
- // join the working dir with "/", and the add the containerPath
- // to it.
- pathRelativeToContainerMountPoint = filepath.Join(filepath.Join("/", c.WorkingDir()), containerPath)
- }
+ pathRelativeToContainerMountPoint := c.pathAbs(containerPath)
resolvedPathOnTheContainerMountPoint := filepath.Join(mountPoint, pathRelativeToContainerMountPoint)
pathRelativeToContainerMountPoint = strings.TrimPrefix(pathRelativeToContainerMountPoint, mountPoint)
pathRelativeToContainerMountPoint = filepath.Join("/", pathRelativeToContainerMountPoint)
diff --git a/libpod/container_stat_linux.go b/libpod/container_stat_linux.go
new file mode 100644
index 000000000..0b4d9e2df
--- /dev/null
+++ b/libpod/container_stat_linux.go
@@ -0,0 +1,181 @@
+// +build linux
+
+package libpod
+
+import (
+ "context"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/containers/buildah/copier"
+ "github.com/containers/podman/v3/libpod/define"
+ "github.com/containers/podman/v3/pkg/copy"
+ "github.com/pkg/errors"
+)
+
+// statInsideMount stats the specified path *inside* the container's mount and PID
+// namespace. It returns the file info along with the resolved root ("/") and
+// the resolved path (relative to the root).
+func (c *Container) statInsideMount(ctx context.Context, containerPath string) (*copier.StatForItem, string, string, error) {
+ resolvedRoot := "/"
+ resolvedPath := c.pathAbs(containerPath)
+ var statInfo *copier.StatForItem
+
+ err := c.joinMountAndExec(ctx,
+ func() error {
+ var statErr error
+ statInfo, statErr = secureStat(resolvedRoot, resolvedPath)
+ return statErr
+ },
+ )
+
+ return statInfo, resolvedRoot, resolvedPath, err
+}
+
+// statOnHost stats the specified path *on the host*. It returns the file info
+// along with the resolved root and the resolved path. Both paths are absolute
+// to the host's root. Note that the paths may resolved outside the
+// container's mount point (e.g., to a volume or bind mount).
+func (c *Container) statOnHost(ctx context.Context, mountPoint string, containerPath string) (*copier.StatForItem, string, string, error) {
+ // Now resolve the container's path. It may hit a volume, it may hit a
+ // bind mount, it may be relative.
+ resolvedRoot, resolvedPath, err := c.resolvePath(mountPoint, containerPath)
+ if err != nil {
+ return nil, "", "", err
+ }
+
+ statInfo, err := secureStat(resolvedRoot, resolvedPath)
+ return statInfo, resolvedRoot, resolvedPath, err
+}
+
+func (c *Container) stat(ctx context.Context, containerMountPoint string, containerPath string) (*define.FileInfo, string, string, error) {
+ var (
+ resolvedRoot string
+ resolvedPath string
+ absContainerPath string
+ statInfo *copier.StatForItem
+ statErr error
+ )
+
+ // Make sure that "/" copies the *contents* of the mount point and not
+ // the directory.
+ if containerPath == "/" {
+ containerPath = "/."
+ }
+
+ // Wildcards are not allowed.
+ // TODO: it's now technically possible wildcards.
+ // We may consider enabling support in the future.
+ if strings.Contains(containerPath, "*") {
+ return nil, "", "", copy.ErrENOENT
+ }
+
+ if c.state.State == define.ContainerStateRunning {
+ // If the container is running, we need to join it's mount namespace
+ // and stat there.
+ statInfo, resolvedRoot, resolvedPath, statErr = c.statInsideMount(ctx, containerPath)
+ } else {
+ // If the container is NOT running, we need to resolve the path
+ // on the host.
+ statInfo, resolvedRoot, resolvedPath, statErr = c.statOnHost(ctx, containerMountPoint, containerPath)
+ }
+
+ if statErr != nil {
+ if statInfo == nil {
+ return nil, "", "", statErr
+ }
+ // Not all errors from secureStat map to ErrNotExist, so we
+ // have to look into the error string. Turning it into an
+ // ENOENT let's the API handlers return the correct status code
+ // which is crucial for the remote client.
+ if os.IsNotExist(statErr) || strings.Contains(statErr.Error(), "o such file or directory") {
+ statErr = copy.ErrENOENT
+ }
+ }
+
+ if statInfo.IsSymlink {
+ // Symlinks are already evaluated and always relative to the
+ // container's mount point.
+ absContainerPath = statInfo.ImmediateTarget
+ } else if strings.HasPrefix(resolvedPath, containerMountPoint) {
+ // If the path is on the container's mount point, strip it off.
+ absContainerPath = strings.TrimPrefix(resolvedPath, containerMountPoint)
+ absContainerPath = filepath.Join("/", absContainerPath)
+ } else {
+ // No symlink and not on the container's mount point, so let's
+ // move it back to the original input. It must have evaluated
+ // to a volume or bind mount but we cannot return host paths.
+ absContainerPath = containerPath
+ }
+
+ // Preserve the base path as specified by the user. The `filepath`
+ // packages likes to remove trailing slashes and dots that are crucial
+ // to the copy logic.
+ absContainerPath = copy.PreserveBasePath(containerPath, absContainerPath)
+ resolvedPath = copy.PreserveBasePath(containerPath, resolvedPath)
+
+ info := &define.FileInfo{
+ IsDir: statInfo.IsDir,
+ Name: filepath.Base(absContainerPath),
+ Size: statInfo.Size,
+ Mode: statInfo.Mode,
+ ModTime: statInfo.ModTime,
+ LinkTarget: absContainerPath,
+ }
+
+ return info, resolvedRoot, resolvedPath, statErr
+}
+
+// secureStat extracts file info for path in a chroot'ed environment in root.
+func secureStat(root string, path string) (*copier.StatForItem, error) {
+ var glob string
+ var err error
+
+ // If root and path are equal, then dir must be empty and the glob must
+ // be ".".
+ if filepath.Clean(root) == filepath.Clean(path) {
+ glob = "."
+ } else {
+ glob, err = filepath.Rel(root, path)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ globStats, err := copier.Stat(root, "", copier.StatOptions{}, []string{glob})
+ if err != nil {
+ return nil, err
+ }
+
+ if len(globStats) != 1 {
+ return nil, errors.Errorf("internal error: secureStat: expected 1 item but got %d", len(globStats))
+ }
+ if len(globStats) != 1 {
+ return nil, errors.Errorf("internal error: secureStat: expected 1 result but got %d", len(globStats[0].Results))
+ }
+
+ // NOTE: the key in the map differ from `glob` when hitting symlink.
+ // Hence, we just take the first (and only) key/value pair.
+ for _, stat := range globStats[0].Results {
+ var statErr error
+ if stat.Error != "" {
+ statErr = errors.New(stat.Error)
+ }
+ // If necessary evaluate the symlink
+ if stat.IsSymlink {
+ target, err := copier.Eval(root, path, copier.EvalOptions{})
+ if err != nil {
+ return nil, errors.Wrap(err, "error evaluating symlink in container")
+ }
+ // Need to make sure the symlink is relative to the root!
+ target = strings.TrimPrefix(target, root)
+ target = filepath.Join("/", target)
+ stat.ImmediateTarget = target
+ }
+ return stat, statErr
+ }
+
+ // Nothing found!
+ return nil, copy.ErrENOENT
+}
diff --git a/libpod/container_stat_unsupported.go b/libpod/container_stat_unsupported.go
new file mode 100644
index 000000000..c002e4d32
--- /dev/null
+++ b/libpod/container_stat_unsupported.go
@@ -0,0 +1,13 @@
+// +build !linux
+
+package libpod
+
+import (
+ "context"
+
+ "github.com/containers/podman/v3/libpod/define"
+)
+
+func (c *Container) stat(ctx context.Context, containerMountPoint string, containerPath string) (*define.FileInfo, string, string, error) {
+ return nil, "", "", nil
+}
diff --git a/libpod/define/fileinfo.go b/libpod/define/fileinfo.go
new file mode 100644
index 000000000..2c7b6fe99
--- /dev/null
+++ b/libpod/define/fileinfo.go
@@ -0,0 +1,16 @@
+package define
+
+import (
+ "os"
+ "time"
+)
+
+// FileInfo describes the attributes of a file or diretory.
+type FileInfo struct {
+ Name string `json:"name"`
+ Size int64 `json:"size"`
+ Mode os.FileMode `json:"mode"`
+ ModTime time.Time `json:"mtime"`
+ IsDir bool `json:"isDir"`
+ LinkTarget string `json:"linkTarget"`
+}
diff --git a/libpod/define/mount.go b/libpod/define/mount.go
new file mode 100644
index 000000000..1b0d019c8
--- /dev/null
+++ b/libpod/define/mount.go
@@ -0,0 +1,12 @@
+package define
+
+const (
+ // TypeBind is the type for mounting host dir
+ TypeBind = "bind"
+ // TypeVolume is the type for named volumes
+ TypeVolume = "volume"
+ // TypeTmpfs is the type for mounting tmpfs
+ TypeTmpfs = "tmpfs"
+ // TypeDevpts is the type for creating a devpts
+ TypeDevpts = "devpts"
+)
diff --git a/libpod/define/version.go b/libpod/define/version.go
index 67dc730ac..5249b5d84 100644
--- a/libpod/define/version.go
+++ b/libpod/define/version.go
@@ -5,7 +5,7 @@ import (
"strconv"
"time"
- podmanVersion "github.com/containers/podman/v3/version"
+ "github.com/containers/podman/v3/version"
)
// Overwritten at build time
@@ -42,8 +42,8 @@ func GetVersion() (Version, error) {
}
}
return Version{
- APIVersion: podmanVersion.APIVersion.String(),
- Version: podmanVersion.Version.String(),
+ APIVersion: version.APIVersion[version.Libpod][version.CurrentAPI].String(),
+ Version: version.Version.String(),
GoVersion: runtime.Version(),
GitCommit: gitCommit,
BuiltTime: time.Unix(buildTime, 0).Format(time.ANSIC),
diff --git a/libpod/image/image.go b/libpod/image/image.go
index 7c760a79a..12dc22360 100644
--- a/libpod/image/image.go
+++ b/libpod/image/image.go
@@ -38,7 +38,6 @@ import (
"github.com/containers/storage"
digest "github.com/opencontainers/go-digest"
ociv1 "github.com/opencontainers/image-spec/specs-go/v1"
- opentracing "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -143,11 +142,7 @@ func (ir *Runtime) NewFromLocal(name string) (*Image, error) {
// New creates a new image object where the image could be local
// or remote
-func (ir *Runtime) New(ctx context.Context, name, signaturePolicyPath, authfile string, writer io.Writer, dockeroptions *DockerRegistryOptions, signingoptions SigningOptions, label *string, pullType util.PullType) (*Image, error) {
- span, _ := opentracing.StartSpanFromContext(ctx, "newImage")
- span.SetTag("type", "runtime")
- defer span.Finish()
-
+func (ir *Runtime) New(ctx context.Context, name, signaturePolicyPath, authfile string, writer io.Writer, dockeroptions *DockerRegistryOptions, signingoptions SigningOptions, label *string, pullType util.PullType, progress chan types.ProgressProperties) (*Image, error) {
// We don't know if the image is local or not ... check local first
if pullType != util.PullImageAlways {
newImage, err := ir.NewFromLocal(name)
@@ -162,7 +157,7 @@ func (ir *Runtime) New(ctx context.Context, name, signaturePolicyPath, authfile
if signaturePolicyPath == "" {
signaturePolicyPath = ir.SignaturePolicyPath
}
- imageName, err := ir.pullImageFromHeuristicSource(ctx, name, writer, authfile, signaturePolicyPath, signingoptions, dockeroptions, &retry.RetryOptions{MaxRetry: maxRetry}, label)
+ imageName, err := ir.pullImageFromHeuristicSource(ctx, name, writer, authfile, signaturePolicyPath, signingoptions, dockeroptions, &retry.RetryOptions{MaxRetry: maxRetry}, label, progress)
if err != nil {
return nil, err
}
@@ -323,7 +318,7 @@ func (ir *Runtime) LoadAllImagesFromDockerArchive(ctx context.Context, fileName
}
defer goal.cleanUp()
- imageNames, err := ir.doPullImage(ctx, sc, goal, writer, SigningOptions{}, &DockerRegistryOptions{}, &retry.RetryOptions{}, nil)
+ imageNames, err := ir.doPullImage(ctx, sc, goal, writer, SigningOptions{}, &DockerRegistryOptions{}, &retry.RetryOptions{}, nil, nil)
if err != nil {
return nil, err
}
@@ -816,7 +811,7 @@ func (i *Image) UntagImage(tag string) error {
// PushImageToHeuristicDestination pushes the given image to "destination", which is heuristically parsed.
// Use PushImageToReference if the destination is known precisely.
-func (i *Image) PushImageToHeuristicDestination(ctx context.Context, destination, manifestMIMEType, authFile, digestFile, signaturePolicyPath string, writer io.Writer, forceCompress bool, signingOptions SigningOptions, dockerRegistryOptions *DockerRegistryOptions, additionalDockerArchiveTags []reference.NamedTagged) error {
+func (i *Image) PushImageToHeuristicDestination(ctx context.Context, destination, manifestMIMEType, authFile, digestFile, signaturePolicyPath string, writer io.Writer, forceCompress bool, signingOptions SigningOptions, dockerRegistryOptions *DockerRegistryOptions, additionalDockerArchiveTags []reference.NamedTagged, progress chan types.ProgressProperties) error {
if destination == "" {
return errors.Wrapf(syscall.EINVAL, "destination image name must be specified")
}
@@ -834,11 +829,11 @@ func (i *Image) PushImageToHeuristicDestination(ctx context.Context, destination
return err
}
}
- return i.PushImageToReference(ctx, dest, manifestMIMEType, authFile, digestFile, signaturePolicyPath, writer, forceCompress, signingOptions, dockerRegistryOptions, additionalDockerArchiveTags)
+ return i.PushImageToReference(ctx, dest, manifestMIMEType, authFile, digestFile, signaturePolicyPath, writer, forceCompress, signingOptions, dockerRegistryOptions, additionalDockerArchiveTags, progress)
}
// PushImageToReference pushes the given image to a location described by the given path
-func (i *Image) PushImageToReference(ctx context.Context, dest types.ImageReference, manifestMIMEType, authFile, digestFile, signaturePolicyPath string, writer io.Writer, forceCompress bool, signingOptions SigningOptions, dockerRegistryOptions *DockerRegistryOptions, additionalDockerArchiveTags []reference.NamedTagged) error {
+func (i *Image) PushImageToReference(ctx context.Context, dest types.ImageReference, manifestMIMEType, authFile, digestFile, signaturePolicyPath string, writer io.Writer, forceCompress bool, signingOptions SigningOptions, dockerRegistryOptions *DockerRegistryOptions, additionalDockerArchiveTags []reference.NamedTagged, progress chan types.ProgressProperties) error {
sc := GetSystemContext(signaturePolicyPath, authFile, forceCompress)
sc.BlobInfoCacheDir = filepath.Join(i.imageruntime.store.GraphRoot(), "cache")
@@ -859,6 +854,10 @@ func (i *Image) PushImageToReference(ctx context.Context, dest types.ImageRefere
}
copyOptions := getCopyOptions(sc, writer, nil, dockerRegistryOptions, signingOptions, manifestMIMEType, additionalDockerArchiveTags)
copyOptions.DestinationCtx.SystemRegistriesConfPath = registries.SystemRegistriesConfPath() // FIXME: Set this more globally. Probably no reason not to have it in every types.SystemContext, and to compute the value just once in one place.
+ if progress != nil {
+ copyOptions.Progress = progress
+ copyOptions.ProgressInterval = time.Second
+ }
// Copy the image to the remote destination
manifestBytes, err := cp.Image(ctx, policyContext, dest, src, copyOptions)
if err != nil {
@@ -1293,21 +1292,11 @@ func (i *Image) inspect(ctx context.Context, calculateSize bool) (*inspect.Image
// Inspect returns an image's inspect data
func (i *Image) Inspect(ctx context.Context) (*inspect.ImageData, error) {
- span, _ := opentracing.StartSpanFromContext(ctx, "imageInspect")
-
- span.SetTag("type", "image")
- defer span.Finish()
-
return i.inspect(ctx, true)
}
// InspectNoSize returns an image's inspect data without calculating the size for the image
func (i *Image) InspectNoSize(ctx context.Context) (*inspect.ImageData, error) {
- span, _ := opentracing.StartSpanFromContext(ctx, "imageInspectNoSize")
-
- span.SetTag("type", "image")
- defer span.Finish()
-
return i.inspect(ctx, false)
}
@@ -1648,7 +1637,7 @@ func (i *Image) Save(ctx context.Context, source, format, output string, moreTag
return err
}
}
- if err := i.PushImageToReference(ctx, destRef, manifestType, "", "", "", writer, compress, SigningOptions{RemoveSignatures: removeSignatures}, &DockerRegistryOptions{}, additionaltags); err != nil {
+ if err := i.PushImageToReference(ctx, destRef, manifestType, "", "", "", writer, compress, SigningOptions{RemoveSignatures: removeSignatures}, &DockerRegistryOptions{}, additionaltags, nil); err != nil {
return errors.Wrapf(err, "unable to save %q", source)
}
i.newImageEvent(events.Save)
diff --git a/libpod/image/image_test.go b/libpod/image/image_test.go
index 1ea4f6c11..3e6e7b9db 100644
--- a/libpod/image/image_test.go
+++ b/libpod/image/image_test.go
@@ -94,9 +94,9 @@ func TestImage_NewFromLocal(t *testing.T) {
ir, err := NewImageRuntimeFromOptions(so)
assert.NoError(t, err)
ir.Eventer = events.NewNullEventer()
- bb, err := ir.New(context.Background(), "docker.io/library/busybox:latest", "", "", writer, nil, SigningOptions{}, nil, util.PullImageMissing)
+ bb, err := ir.New(context.Background(), "docker.io/library/busybox:latest", "", "", writer, nil, SigningOptions{}, nil, util.PullImageMissing, nil)
assert.NoError(t, err)
- bbglibc, err := ir.New(context.Background(), "docker.io/library/busybox:glibc", "", "", writer, nil, SigningOptions{}, nil, util.PullImageMissing)
+ bbglibc, err := ir.New(context.Background(), "docker.io/library/busybox:glibc", "", "", writer, nil, SigningOptions{}, nil, util.PullImageMissing, nil)
assert.NoError(t, err)
tm := makeLocalMatrix(bb, bbglibc)
@@ -140,7 +140,7 @@ func TestImage_New(t *testing.T) {
// Iterate over the names and delete the image
// after the pull
for _, img := range names {
- newImage, err := ir.New(context.Background(), img, "", "", writer, nil, SigningOptions{}, nil, util.PullImageMissing)
+ newImage, err := ir.New(context.Background(), img, "", "", writer, nil, SigningOptions{}, nil, util.PullImageMissing, nil)
assert.NoError(t, err)
assert.NotEqual(t, newImage.ID(), "")
err = newImage.Remove(context.Background(), false)
@@ -169,7 +169,7 @@ func TestImage_MatchRepoTag(t *testing.T) {
ir, err := NewImageRuntimeFromOptions(so)
assert.NoError(t, err)
ir.Eventer = events.NewNullEventer()
- newImage, err := ir.New(context.Background(), "busybox", "", "", os.Stdout, nil, SigningOptions{}, nil, util.PullImageMissing)
+ newImage, err := ir.New(context.Background(), "busybox", "", "", os.Stdout, nil, SigningOptions{}, nil, util.PullImageMissing, nil)
assert.NoError(t, err)
err = newImage.TagImage("foo:latest")
assert.NoError(t, err)
diff --git a/libpod/image/layer_tree.go b/libpod/image/layer_tree.go
index dde39dba1..aa3084449 100644
--- a/libpod/image/layer_tree.go
+++ b/libpod/image/layer_tree.go
@@ -4,7 +4,6 @@ import (
"context"
ociv1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -188,7 +187,12 @@ func (t *layerTree) parent(ctx context.Context, child *Image) (*Image, error) {
node, exists := t.nodes[child.TopLayer()]
if !exists {
- return nil, errors.Errorf("layer not found in layer tree: %q", child.TopLayer())
+ // Note: erroring out in this case has turned out having been a
+ // mistake. Users may not be able to recover, so we're now
+ // throwing a warning to guide them to resolve the issue and
+ // turn the errors non-fatal.
+ logrus.Warnf("Layer %s not found in layer. The storage may be corrupted, consider running `podman system reset`.", child.TopLayer())
+ return nil, nil
}
childOCI, err := t.toOCI(ctx, child)
diff --git a/libpod/image/pull.go b/libpod/image/pull.go
index 3cb1e57c7..58160b52f 100644
--- a/libpod/image/pull.go
+++ b/libpod/image/pull.go
@@ -6,6 +6,7 @@ import (
"io"
"path/filepath"
"strings"
+ "time"
"github.com/containers/common/pkg/retry"
cp "github.com/containers/image/v5/copy"
@@ -22,7 +23,6 @@ import (
"github.com/containers/podman/v3/libpod/events"
"github.com/containers/podman/v3/pkg/errorhandling"
"github.com/containers/podman/v3/pkg/registries"
- "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -171,9 +171,6 @@ func (ir *Runtime) getPullRefPairsFromDockerArchiveReference(ctx context.Context
// pullGoalFromImageReference returns a pull goal for a single ImageReference, depending on the used transport.
// Note that callers are responsible for invoking (*pullGoal).cleanUp() to clean up possibly open resources.
func (ir *Runtime) pullGoalFromImageReference(ctx context.Context, srcRef types.ImageReference, imgName string, sc *types.SystemContext) (*pullGoal, error) {
- span, _ := opentracing.StartSpanFromContext(ctx, "pullGoalFromImageReference")
- defer span.Finish()
-
// supports pulling from docker-archive, oci, and registries
switch srcRef.Transport().Name() {
case DockerArchive:
@@ -241,10 +238,7 @@ func toLocalImageName(imageName string) string {
// pullImageFromHeuristicSource pulls an image based on inputName, which is heuristically parsed and may involve configured registries.
// Use pullImageFromReference if the source is known precisely.
-func (ir *Runtime) pullImageFromHeuristicSource(ctx context.Context, inputName string, writer io.Writer, authfile, signaturePolicyPath string, signingOptions SigningOptions, dockerOptions *DockerRegistryOptions, retryOptions *retry.RetryOptions, label *string) ([]string, error) {
- span, _ := opentracing.StartSpanFromContext(ctx, "pullImageFromHeuristicSource")
- defer span.Finish()
-
+func (ir *Runtime) pullImageFromHeuristicSource(ctx context.Context, inputName string, writer io.Writer, authfile, signaturePolicyPath string, signingOptions SigningOptions, dockerOptions *DockerRegistryOptions, retryOptions *retry.RetryOptions, label *string, progress chan types.ProgressProperties) ([]string, error) {
var goal *pullGoal
sc := GetSystemContext(signaturePolicyPath, authfile, false)
if dockerOptions != nil {
@@ -275,14 +269,11 @@ func (ir *Runtime) pullImageFromHeuristicSource(ctx context.Context, inputName s
}
}
defer goal.cleanUp()
- return ir.doPullImage(ctx, sc, *goal, writer, signingOptions, dockerOptions, retryOptions, label)
+ return ir.doPullImage(ctx, sc, *goal, writer, signingOptions, dockerOptions, retryOptions, label, progress)
}
// pullImageFromReference pulls an image from a types.imageReference.
func (ir *Runtime) pullImageFromReference(ctx context.Context, srcRef types.ImageReference, writer io.Writer, authfile, signaturePolicyPath string, signingOptions SigningOptions, dockerOptions *DockerRegistryOptions, retryOptions *retry.RetryOptions) ([]string, error) {
- span, _ := opentracing.StartSpanFromContext(ctx, "pullImageFromReference")
- defer span.Finish()
-
sc := GetSystemContext(signaturePolicyPath, authfile, false)
if dockerOptions != nil {
sc.OSChoice = dockerOptions.OSChoice
@@ -294,7 +285,7 @@ func (ir *Runtime) pullImageFromReference(ctx context.Context, srcRef types.Imag
return nil, errors.Wrapf(err, "error determining pull goal for image %q", transports.ImageName(srcRef))
}
defer goal.cleanUp()
- return ir.doPullImage(ctx, sc, *goal, writer, signingOptions, dockerOptions, retryOptions, nil)
+ return ir.doPullImage(ctx, sc, *goal, writer, signingOptions, dockerOptions, retryOptions, nil, nil)
}
func cleanErrorMessage(err error) string {
@@ -304,10 +295,7 @@ func cleanErrorMessage(err error) string {
}
// doPullImage is an internal helper interpreting pullGoal. Almost everyone should call one of the callers of doPullImage instead.
-func (ir *Runtime) doPullImage(ctx context.Context, sc *types.SystemContext, goal pullGoal, writer io.Writer, signingOptions SigningOptions, dockerOptions *DockerRegistryOptions, retryOptions *retry.RetryOptions, label *string) ([]string, error) {
- span, _ := opentracing.StartSpanFromContext(ctx, "doPullImage")
- defer span.Finish()
-
+func (ir *Runtime) doPullImage(ctx context.Context, sc *types.SystemContext, goal pullGoal, writer io.Writer, signingOptions SigningOptions, dockerOptions *DockerRegistryOptions, retryOptions *retry.RetryOptions, label *string, progress chan types.ProgressProperties) ([]string, error) {
policyContext, err := getPolicyContext(sc)
if err != nil {
return nil, err
@@ -328,6 +316,10 @@ func (ir *Runtime) doPullImage(ctx context.Context, sc *types.SystemContext, goa
for _, imageInfo := range goal.refPairs {
copyOptions := getCopyOptions(sc, writer, dockerOptions, nil, signingOptions, "", nil)
copyOptions.SourceCtx.SystemRegistriesConfPath = systemRegistriesConfPath // FIXME: Set this more globally. Probably no reason not to have it in every types.SystemContext, and to compute the value just once in one place.
+ if progress != nil {
+ copyOptions.Progress = progress
+ copyOptions.ProgressInterval = time.Second
+ }
// Print the following statement only when pulling from a docker or atomic registry
if writer != nil && (imageInfo.srcRef.Transport().Name() == DockerTransport || imageInfo.srcRef.Transport().Name() == AtomicTransport) {
if _, err := io.WriteString(writer, fmt.Sprintf("Trying to pull %s...\n", imageInfo.image)); err != nil {
diff --git a/libpod/in_memory_state.go b/libpod/in_memory_state.go
index 26f15d9c8..3875878ed 100644
--- a/libpod/in_memory_state.go
+++ b/libpod/in_memory_state.go
@@ -822,6 +822,46 @@ func (s *InMemoryState) RewriteContainerConfig(ctr *Container, newCfg *Container
return nil
}
+// SafeRewriteContainerConfig rewrites a container's configuration.
+// It's safer than RewriteContainerConfig, but still has limitations. Please
+// read the comment in state.go before using.
+func (s *InMemoryState) SafeRewriteContainerConfig(ctr *Container, oldName, newName string, newCfg *ContainerConfig) error {
+ if !ctr.valid {
+ return define.ErrCtrRemoved
+ }
+
+ if _, err := s.nameIndex.Get(newName); err == nil {
+ return errors.Wrapf(define.ErrCtrExists, "name %s is in use", newName)
+ }
+
+ // If the container does not exist, return error
+ stateCtr, ok := s.containers[ctr.ID()]
+ if !ok {
+ ctr.valid = false
+ return errors.Wrapf(define.ErrNoSuchCtr, "container with ID %s not found in state", ctr.ID())
+ }
+
+ // Change name in registry.
+ if s.namespace != "" {
+ nsIndex, ok := s.namespaceIndexes[s.namespace]
+ if !ok {
+ return define.ErrInternal
+ }
+ nsIndex.nameIndex.Release(oldName)
+ if err := nsIndex.nameIndex.Reserve(newName, ctr.ID()); err != nil {
+ return errors.Wrapf(err, "error registering name %s", newName)
+ }
+ }
+ s.nameIndex.Release(oldName)
+ if err := s.nameIndex.Reserve(newName, ctr.ID()); err != nil {
+ return errors.Wrapf(err, "error registering name %s", newName)
+ }
+
+ stateCtr.config = newCfg
+
+ return nil
+}
+
// RewritePodConfig rewrites a pod's configuration.
// This function is DANGEROUS, even with in-memory state.
// Please read the full comment on it in state.go before using it.
diff --git a/libpod/kube.go b/libpod/kube.go
index 0c4f9f0a0..6feb69fea 100644
--- a/libpod/kube.go
+++ b/libpod/kube.go
@@ -676,8 +676,18 @@ func generateKubeSecurityContext(c *Container) (*v1.SecurityContext, error) {
return nil, errors.Wrapf(err, "unable to sync container during YAML generation")
}
+ mountpoint := c.state.Mountpoint
+ if mountpoint == "" {
+ var err error
+ mountpoint, err = c.mount()
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to mount %s mountpoint", c.ID())
+ }
+ defer c.unmount(false)
+ }
logrus.Debugf("Looking in container for user: %s", c.User())
- execUser, err := lookup.GetUserGroupInfo(c.state.Mountpoint, c.User(), nil)
+
+ execUser, err := lookup.GetUserGroupInfo(mountpoint, c.User(), nil)
if err != nil {
return nil, err
}
diff --git a/libpod/networking_linux.go b/libpod/networking_linux.go
index 0526e646e..d6968a6b5 100644
--- a/libpod/networking_linux.go
+++ b/libpod/networking_linux.go
@@ -809,7 +809,7 @@ func (r *Runtime) teardownCNI(ctr *Container) error {
requestedMAC = ctr.config.StaticMAC
}
- podNetwork := r.getPodNetwork(ctr.ID(), ctr.Name(), ctr.state.NetNS.Path(), networks, ctr.config.PortMappings, requestedIP, requestedMAC, ContainerNetworkDescriptions{})
+ podNetwork := r.getPodNetwork(ctr.ID(), ctr.Name(), ctr.state.NetNS.Path(), networks, ctr.config.PortMappings, requestedIP, requestedMAC, ctr.state.NetInterfaceDescriptions)
if err := r.netPlugin.TearDownPod(podNetwork); err != nil {
return errors.Wrapf(err, "error tearing down CNI namespace configuration for container %s", ctr.ID())
diff --git a/libpod/oci_conmon_linux.go b/libpod/oci_conmon_linux.go
index de7630c06..ef5f6fb0c 100644
--- a/libpod/oci_conmon_linux.go
+++ b/libpod/oci_conmon_linux.go
@@ -28,6 +28,7 @@ import (
"github.com/containers/podman/v3/libpod/define"
"github.com/containers/podman/v3/libpod/logs"
"github.com/containers/podman/v3/pkg/cgroups"
+ "github.com/containers/podman/v3/pkg/checkpoint/crutils"
"github.com/containers/podman/v3/pkg/errorhandling"
"github.com/containers/podman/v3/pkg/lookup"
"github.com/containers/podman/v3/pkg/rootless"
@@ -112,9 +113,11 @@ func newConmonOCIRuntime(name string, paths []string, conmonPath string, runtime
// TODO: probe OCI runtime for feature and enable automatically if
// available.
- runtime.supportsJSON = supportsJSON[name]
- runtime.supportsNoCgroups = supportsNoCgroups[name]
- runtime.supportsKVM = supportsKVM[name]
+
+ base := filepath.Base(name)
+ runtime.supportsJSON = supportsJSON[base]
+ runtime.supportsNoCgroups = supportsNoCgroups[base]
+ runtime.supportsKVM = supportsKVM[base]
foundPath := false
for _, path := range paths {
@@ -837,16 +840,7 @@ func (r *ConmonOCIRuntime) CheckConmonRunning(ctr *Container) (bool, error) {
// SupportsCheckpoint checks if the OCI runtime supports checkpointing
// containers.
func (r *ConmonOCIRuntime) SupportsCheckpoint() bool {
- // Check if the runtime implements checkpointing. Currently only
- // runc's checkpoint/restore implementation is supported.
- cmd := exec.Command(r.path, "checkpoint", "--help")
- if err := cmd.Start(); err != nil {
- return false
- }
- if err := cmd.Wait(); err == nil {
- return true
- }
- return false
+ return crutils.CRRuntimeSupportsCheckpointRestore(r.path)
}
// SupportsJSONErrors checks if the OCI runtime supports JSON-formatted error
diff --git a/libpod/options.go b/libpod/options.go
index 6344e1acc..48888a2f2 100644
--- a/libpod/options.go
+++ b/libpod/options.go
@@ -64,15 +64,22 @@ func WithStorageConfig(config storage.StoreOptions) RuntimeOption {
setField = true
}
+ graphDriverChanged := false
if config.GraphDriverName != "" {
rt.storageConfig.GraphDriverName = config.GraphDriverName
rt.storageSet.GraphDriverNameSet = true
setField = true
+ graphDriverChanged = true
}
if config.GraphDriverOptions != nil {
- rt.storageConfig.GraphDriverOptions = make([]string, len(config.GraphDriverOptions))
- copy(rt.storageConfig.GraphDriverOptions, config.GraphDriverOptions)
+ if graphDriverChanged {
+ rt.storageConfig.GraphDriverOptions = make([]string, len(config.GraphDriverOptions))
+ copy(rt.storageConfig.GraphDriverOptions, config.GraphDriverOptions)
+ } else {
+ // append new options after what is specified in the config files
+ rt.storageConfig.GraphDriverOptions = append(rt.storageConfig.GraphDriverOptions, config.GraphDriverOptions...)
+ }
setField = true
}
diff --git a/libpod/rootless_cni_linux.go b/libpod/rootless_cni_linux.go
index e97985180..df690e914 100644
--- a/libpod/rootless_cni_linux.go
+++ b/libpod/rootless_cni_linux.go
@@ -265,7 +265,7 @@ func startRootlessCNIInfraContainer(ctx context.Context, r *Runtime) (*Container
}
logrus.Debugf("rootless CNI: ensuring image %q to exist", imageName)
newImage, err := r.ImageRuntime().New(ctx, imageName, "", "", nil, nil,
- image.SigningOptions{}, nil, util.PullImageMissing)
+ image.SigningOptions{}, nil, util.PullImageMissing, nil)
if err != nil {
return nil, err
}
diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go
index 8bf862bf2..19690d79b 100644
--- a/libpod/runtime_ctr.go
+++ b/libpod/runtime_ctr.go
@@ -22,7 +22,6 @@ import (
"github.com/docker/go-units"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/runtime-tools/generate"
- "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -74,8 +73,7 @@ func (r *Runtime) RestoreContainer(ctx context.Context, rSpec *spec.Spec, config
}
// RenameContainer renames the given container.
-// The given container object will be rendered unusable, and a new, renamed
-// Container will be returned.
+// Returns a copy of the container that has been renamed if successful.
func (r *Runtime) RenameContainer(ctx context.Context, ctr *Container, newName string) (*Container, error) {
ctr.lock.Lock()
defer ctr.lock.Unlock()
@@ -88,26 +86,6 @@ func (r *Runtime) RenameContainer(ctx context.Context, ctr *Container, newName s
return nil, define.RegexError
}
- // Check if the name is available.
- // This is *100% NOT ATOMIC* so any failures in-flight will do
- // *VERY BAD THINGS* to the state. So we have to try and catch all we
- // can before starting.
- if _, err := r.state.LookupContainerID(newName); err == nil {
- return nil, errors.Wrapf(define.ErrCtrExists, "name %s is already in use by another container", newName)
- }
- if _, err := r.state.LookupPod(newName); err == nil {
- return nil, errors.Wrapf(define.ErrPodExists, "name %s is already in use by another pod", newName)
- }
-
- // TODO: Investigate if it is possible to remove this limitation.
- depCtrs, err := r.state.ContainerInUse(ctr)
- if err != nil {
- return nil, err
- }
- if len(depCtrs) > 0 {
- return nil, errors.Wrapf(define.ErrCtrExists, "cannot rename container %s as it is in use by other containers: %v", ctr.ID(), strings.Join(depCtrs, ","))
- }
-
// We need to pull an updated config, in case another rename fired and
// the config was re-written.
newConf, err := r.state.GetContainerConfig(ctr.ID())
@@ -116,95 +94,33 @@ func (r *Runtime) RenameContainer(ctx context.Context, ctr *Container, newName s
}
ctr.config = newConf
- // TODO: This is going to fail if we have active exec sessions, too.
- // Investigate fixing that at a later date.
-
- var pod *Pod
- if ctr.config.Pod != "" {
- tmpPod, err := r.state.Pod(ctr.config.Pod)
- if err != nil {
- return nil, errors.Wrapf(err, "error retrieving container %s pod", ctr.ID())
- }
- pod = tmpPod
- // Lock pod to ensure it's not removed while we're working
- pod.lock.Lock()
- defer pod.lock.Unlock()
- }
-
- // Lock all volumes to ensure they are not removed while we're working
- volsLocked := make(map[string]bool)
- for _, namedVol := range ctr.config.NamedVolumes {
- if volsLocked[namedVol.Name] {
- continue
- }
- vol, err := r.state.Volume(namedVol.Name)
- if err != nil {
- return nil, errors.Wrapf(err, "error retrieving volume used by container %s", ctr.ID())
- }
-
- volsLocked[vol.Name()] = true
- vol.lock.Lock()
- defer vol.lock.Unlock()
- }
-
logrus.Infof("Going to rename container %s from %q to %q", ctr.ID(), ctr.Name(), newName)
- // Step 1: remove the old container.
- if pod != nil {
- if err := r.state.RemoveContainerFromPod(pod, ctr); err != nil {
- return nil, errors.Wrapf(err, "error renaming container %s", ctr.ID())
- }
- } else {
- if err := r.state.RemoveContainer(ctr); err != nil {
- return nil, errors.Wrapf(err, "error renaming container %s", ctr.ID())
- }
- }
-
- // Step 2: Make a new container based on the old one.
- // TODO: Should we deep-copy the container config and state, to be safe?
- newCtr := new(Container)
- newCtr.config = ctr.config
- newCtr.state = ctr.state
- newCtr.lock = ctr.lock
- newCtr.ociRuntime = ctr.ociRuntime
- newCtr.runtime = r
- newCtr.rootlessSlirpSyncR = ctr.rootlessSlirpSyncR
- newCtr.rootlessSlirpSyncW = ctr.rootlessSlirpSyncW
- newCtr.rootlessPortSyncR = ctr.rootlessPortSyncR
- newCtr.rootlessPortSyncW = ctr.rootlessPortSyncW
-
- newCtr.valid = true
- newCtr.config.Name = newName
-
- // Step 3: Add that new container to the DB
- if pod != nil {
- if err := r.state.AddContainerToPod(pod, newCtr); err != nil {
- return nil, errors.Wrapf(err, "error renaming container %s", newCtr.ID())
- }
- } else {
- if err := r.state.AddContainer(newCtr); err != nil {
- return nil, errors.Wrapf(err, "error renaming container %s", newCtr.ID())
- }
- }
+ // Step 1: Alter the config. Save the old name, we need it to rewrite
+ // the config.
+ oldName := ctr.config.Name
+ ctr.config.Name = newName
- // Step 4: Save the new container, to force the state to be written to
- // the DB. This may not be necessary, depending on DB implementation,
- // but let's do it to be safe.
- if err := newCtr.save(); err != nil {
- return nil, err
+ // Step 2: rewrite the old container's config in the DB.
+ if err := r.state.SafeRewriteContainerConfig(ctr, oldName, ctr.config.Name, ctr.config); err != nil {
+ // Assume the rename failed.
+ // Set config back to the old name so reflect what is actually
+ // present in the DB.
+ ctr.config.Name = oldName
+ return nil, errors.Wrapf(err, "error renaming container %s", ctr.ID())
}
- // Step 5: rename the container in c/storage.
+ // Step 3: rename the container in c/storage.
// This can fail if the name is already in use by a non-Podman
// container. This puts us in a bad spot - we've already renamed the
// container in Podman. We can swap the order, but then we have the
// opposite problem. Atomicity is a real problem here, with no easy
// solution.
- if err := r.store.SetNames(newCtr.ID(), []string{newCtr.Name()}); err != nil {
+ if err := r.store.SetNames(ctr.ID(), []string{ctr.Name()}); err != nil {
return nil, err
}
- return newCtr, nil
+ return ctr, nil
}
func (r *Runtime) initContainerVariables(rSpec *spec.Spec, config *ContainerConfig) (*Container, error) {
@@ -262,10 +178,6 @@ func (r *Runtime) initContainerVariables(rSpec *spec.Spec, config *ContainerConf
}
func (r *Runtime) newContainer(ctx context.Context, rSpec *spec.Spec, options ...CtrCreateOption) (*Container, error) {
- span, _ := opentracing.StartSpanFromContext(ctx, "newContainer")
- span.SetTag("type", "runtime")
- defer span.Finish()
-
ctr, err := r.initContainerVariables(rSpec, nil)
if err != nil {
return nil, errors.Wrapf(err, "error initializing container variables")
@@ -555,10 +467,6 @@ func (r *Runtime) RemoveContainer(ctx context.Context, c *Container, force bool,
// infra container protections, and *not* remove from the database (as pod
// remove will handle that).
func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, removeVolume, removePod bool) error {
- span, _ := opentracing.StartSpanFromContext(ctx, "removeContainer")
- span.SetTag("type", "runtime")
- defer span.Finish()
-
if !c.valid {
if ok, _ := r.state.HasContainer(c.ID()); !ok {
// Container probably already removed
@@ -806,7 +714,7 @@ func (r *Runtime) evictContainer(ctx context.Context, idOrName string, removeVol
id, err := r.state.LookupContainerID(idOrName)
if err != nil {
- return "", errors.Wrapf(err, "failed to find container %q in state", idOrName)
+ return "", err
}
// Begin by trying a normal removal. Valid containers will be removed normally.
@@ -836,7 +744,7 @@ func (r *Runtime) evictContainer(ctx context.Context, idOrName string, removeVol
return id, err
}
if !exists {
- return id, errors.Wrapf(err, "failed to find container ID %q for eviction", id)
+ return id, err
}
// Re-create a container struct for removal purposes
diff --git a/libpod/runtime_img.go b/libpod/runtime_img.go
index 6e1105b9e..90b11f8ca 100644
--- a/libpod/runtime_img.go
+++ b/libpod/runtime_img.go
@@ -316,7 +316,8 @@ func (r *Runtime) LoadImageFromSingleImageArchive(ctx context.Context, writer io
} {
src, err := referenceFn()
if err == nil && src != nil {
- if newImages, err := r.ImageRuntime().LoadFromArchiveReference(ctx, src, signaturePolicy, writer); err == nil {
+ newImages, err := r.ImageRuntime().LoadFromArchiveReference(ctx, src, signaturePolicy, writer)
+ if err == nil {
return getImageNames(newImages), nil
}
saveErr = err
@@ -325,6 +326,15 @@ func (r *Runtime) LoadImageFromSingleImageArchive(ctx context.Context, writer io
return "", errors.Wrapf(saveErr, "error pulling image")
}
+// RemoveImageFromStorage goes directly to storage and attempts to remove
+// the specified image. This is dangerous and should only be done if libpod
+// reports that image is not known. This call is useful if you have a corrupted
+// image that was never fully added to the libpod database.
+func (r *Runtime) RemoveImageFromStorage(id string) error {
+ _, err := r.store.DeleteImage(id, true)
+ return err
+}
+
func getImageNames(images []*image.Image) string {
var names string
for i := range images {
diff --git a/libpod/runtime_img_test.go b/libpod/runtime_img_test.go
index 7d6390c85..c25f3f08c 100644
--- a/libpod/runtime_img_test.go
+++ b/libpod/runtime_img_test.go
@@ -37,7 +37,7 @@ func TestGetRegistries(t *testing.T) {
registryPath, err := createTmpFile([]byte(registry))
assert.NoError(t, err)
defer os.Remove(registryPath)
- os.Setenv("REGISTRIES_CONFIG_PATH", registryPath)
+ os.Setenv("CONTAINERS_REGISTRIES_CONF", registryPath)
registries, err := sysreg.GetRegistries()
assert.NoError(t, err)
assert.True(t, reflect.DeepEqual(registries, []string{"one"}))
@@ -46,7 +46,7 @@ func TestGetRegistries(t *testing.T) {
func TestGetInsecureRegistries(t *testing.T) {
registryPath, err := createTmpFile([]byte(registry))
assert.NoError(t, err)
- os.Setenv("REGISTRIES_CONFIG_PATH", registryPath)
+ os.Setenv("CONTAINERS_REGISTRIES_CONF", registryPath)
defer os.Remove(registryPath)
registries, err := sysreg.GetInsecureRegistries()
assert.NoError(t, err)
diff --git a/libpod/runtime_pod_infra_linux.go b/libpod/runtime_pod_infra_linux.go
index 000029fa4..0a09e40ea 100644
--- a/libpod/runtime_pod_infra_linux.go
+++ b/libpod/runtime_pod_infra_linux.go
@@ -216,7 +216,7 @@ func (r *Runtime) createInfraContainer(ctx context.Context, p *Pod) (*Container,
if img == "" {
img = r.config.Engine.InfraImage
}
- newImage, err := r.ImageRuntime().New(ctx, img, "", "", nil, nil, image.SigningOptions{}, nil, util.PullImageMissing)
+ newImage, err := r.ImageRuntime().New(ctx, img, "", "", nil, nil, image.SigningOptions{}, nil, util.PullImageMissing, nil)
if err != nil {
return nil, err
}
diff --git a/libpod/state.go b/libpod/state.go
index 074d21740..4b711bae9 100644
--- a/libpod/state.go
+++ b/libpod/state.go
@@ -155,6 +155,19 @@ type State interface {
// answer is this: use this only very sparingly, and only if you really
// know what you're doing.
RewriteContainerConfig(ctr *Container, newCfg *ContainerConfig) error
+ // This is a more limited version of RewriteContainerConfig, though it
+ // comes with the added ability to alter a container's name. In exchange
+ // it loses the ability to manipulate the container's locks.
+ // It is not intended to be as restrictive as RewriteContainerConfig, in
+ // that we allow it to be run while other Podman processes are running,
+ // and without holding the alive lock.
+ // Container ID and pod membership still *ABSOLUTELY CANNOT* be altered.
+ // Also, you cannot change a container's dependencies - shared namespace
+ // containers or generic dependencies - at present. This is
+ // theoretically possible but not yet implemented.
+ // If newName is not "" the container will be renamed to the new name.
+ // The oldName parameter is only required if newName is given.
+ SafeRewriteContainerConfig(ctr *Container, oldName, newName string, newCfg *ContainerConfig) error
// PLEASE READ THE DESCRIPTION FOR RewriteContainerConfig BEFORE USING.
// This function is identical to RewriteContainerConfig, save for the
// fact that it is used with pods instead.
diff --git a/libpod/storage.go b/libpod/storage.go
index 418eb3151..4aa42dc8e 100644
--- a/libpod/storage.go
+++ b/libpod/storage.go
@@ -10,7 +10,6 @@ import (
"github.com/containers/storage"
"github.com/containers/storage/pkg/idtools"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -67,10 +66,6 @@ func (metadata *RuntimeContainerMetadata) SetMountLabel(mountLabel string) {
// CreateContainerStorage creates the storage end of things. We already have the container spec created
// TO-DO We should be passing in an Image object in the future.
func (r *storageService) CreateContainerStorage(ctx context.Context, systemContext *types.SystemContext, imageName, imageID, containerName, containerID string, options storage.ContainerOptions) (_ ContainerInfo, retErr error) {
- span, _ := opentracing.StartSpanFromContext(ctx, "createContainerStorage")
- span.SetTag("type", "storageService")
- defer span.Finish()
-
var imageConfig *v1.Image
if imageName != "" {
var ref types.ImageReference
diff --git a/nix/nixpkgs.json b/nix/nixpkgs.json
index 459fb28f8..cc8daf55c 100644
--- a/nix/nixpkgs.json
+++ b/nix/nixpkgs.json
@@ -1,9 +1,9 @@
{
"url": "https://github.com/nixos/nixpkgs",
- "rev": "30c2fb65feaf1068b1c413a0b75470afd351c291",
- "date": "2021-01-28T21:27:34-05:00",
- "path": "/nix/store/zk71rlw37vg9hqc5j0vqi9x8qzb2ir0m-nixpkgs",
- "sha256": "0b1y1lgzbagpgh9cvi9szkm162laifz0q2ss4pibns3j3gqpf5gl",
+ "rev": "f38b9b258f3f4db5ecf7dd27a7d5b48f23202843",
+ "date": "2021-03-07T14:22:16+01:00",
+ "path": "/nix/store/df3v1b2qfsbnsd6fwaw4787qdy5rcxkc-nixpkgs",
+ "sha256": "1dbi7rjyfkv3rw6zqwbc6jknbdgyv16cd8zgcpq5gdj0mwnp9b13",
"fetchSubmodules": false,
"deepClone": false,
"leaveDotGit": false
diff --git a/pkg/api/handlers/compat/containers.go b/pkg/api/handlers/compat/containers.go
index 971b6aa50..d3277b815 100644
--- a/pkg/api/handlers/compat/containers.go
+++ b/pkg/api/handlers/compat/containers.go
@@ -76,7 +76,12 @@ func RemoveContainer(w http.ResponseWriter, r *http.Request) {
return
}
if len(report) > 0 && report[0].Err != nil {
- utils.InternalServerError(w, report[0].Err)
+ err = report[0].Err
+ if errors.Cause(err) == define.ErrNoSuchCtr {
+ utils.ContainerNotFound(w, name, err)
+ return
+ }
+ utils.InternalServerError(w, err)
return
}
@@ -307,6 +312,34 @@ func LibpodToContainer(l *libpod.Container, sz bool) (*handlers.Container, error
}
}
+ portMappings, err := l.PortMappings()
+ if err != nil {
+ return nil, err
+ }
+
+ ports := make([]types.Port, len(portMappings))
+ for idx, portMapping := range portMappings {
+ ports[idx] = types.Port{
+ IP: portMapping.HostIP,
+ PrivatePort: uint16(portMapping.ContainerPort),
+ PublicPort: uint16(portMapping.HostPort),
+ Type: portMapping.Protocol,
+ }
+ }
+ inspect, err := l.Inspect(false)
+ if err != nil {
+ return nil, err
+ }
+
+ n, err := json.Marshal(inspect.NetworkSettings)
+ if err != nil {
+ return nil, err
+ }
+ networkSettings := types.SummaryNetworkSettings{}
+ if err := json.Unmarshal(n, &networkSettings); err != nil {
+ return nil, err
+ }
+
return &handlers.Container{Container: types.Container{
ID: l.ID(),
Names: []string{fmt.Sprintf("/%s", l.Name())},
@@ -314,7 +347,7 @@ func LibpodToContainer(l *libpod.Container, sz bool) (*handlers.Container, error
ImageID: imageID,
Command: strings.Join(l.Command(), " "),
Created: l.CreatedTime().Unix(),
- Ports: nil,
+ Ports: ports,
SizeRw: sizeRW,
SizeRootFs: sizeRootFs,
Labels: l.Labels(),
@@ -324,7 +357,7 @@ func LibpodToContainer(l *libpod.Container, sz bool) (*handlers.Container, error
NetworkMode string `json:",omitempty"`
}{
"host"},
- NetworkSettings: nil,
+ NetworkSettings: &networkSettings,
Mounts: nil,
},
ContainerCreateConfig: types.ContainerCreateConfig{},
diff --git a/pkg/api/handlers/compat/containers_stop.go b/pkg/api/handlers/compat/containers_stop.go
index 0526865b9..3ae223693 100644
--- a/pkg/api/handlers/compat/containers_stop.go
+++ b/pkg/api/handlers/compat/containers_stop.go
@@ -39,11 +39,11 @@ func StopContainer(w http.ResponseWriter, r *http.Request) {
Ignore: query.Ignore,
}
if utils.IsLibpodRequest(r) {
- if query.LibpodTimeout > 0 {
+ if _, found := r.URL.Query()["timeout"]; found {
options.Timeout = &query.LibpodTimeout
}
} else {
- if query.DockerTimeout > 0 {
+ if _, found := r.URL.Query()["t"]; found {
options.Timeout = &query.DockerTimeout
}
}
diff --git a/pkg/api/handlers/compat/images.go b/pkg/api/handlers/compat/images.go
index 1a4dd939e..e5caa9ea5 100644
--- a/pkg/api/handlers/compat/images.go
+++ b/pkg/api/handlers/compat/images.go
@@ -1,6 +1,7 @@
package compat
import (
+ "context"
"encoding/json"
"fmt"
"io"
@@ -11,11 +12,13 @@ import (
"github.com/containers/buildah"
"github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/types"
"github.com/containers/podman/v3/libpod"
image2 "github.com/containers/podman/v3/libpod/image"
"github.com/containers/podman/v3/pkg/api/handlers"
"github.com/containers/podman/v3/pkg/api/handlers/utils"
"github.com/containers/podman/v3/pkg/auth"
+ "github.com/containers/podman/v3/pkg/channel"
"github.com/containers/podman/v3/pkg/domain/entities"
"github.com/containers/podman/v3/pkg/util"
"github.com/gorilla/schema"
@@ -236,33 +239,103 @@ func CreateImageFromImage(w http.ResponseWriter, r *http.Request) {
if sys := runtime.SystemContext(); sys != nil {
registryOpts.DockerCertPath = sys.DockerCertPath
}
- img, err := runtime.ImageRuntime().New(r.Context(),
- fromImage,
- "", // signature policy
- authfile,
- nil, // writer
- &registryOpts,
- image2.SigningOptions{},
- nil, // label
- util.PullImageAlways,
- )
- if err != nil {
- utils.Error(w, "Something went wrong.", http.StatusInternalServerError, err)
- return
+
+ stderr := channel.NewWriter(make(chan []byte))
+ defer stderr.Close()
+
+ progress := make(chan types.ProgressProperties)
+
+ var img string
+ runCtx, cancel := context.WithCancel(context.Background())
+ go func() {
+ defer cancel()
+
+ newImage, err := runtime.ImageRuntime().New(
+ runCtx,
+ fromImage,
+ "", // signature policy
+ authfile,
+ nil, // writer
+ &registryOpts,
+ image2.SigningOptions{},
+ nil, // label
+ util.PullImageAlways,
+ progress)
+ if err != nil {
+ stderr.Write([]byte(err.Error() + "\n"))
+ } else {
+ img = newImage.ID()
+ }
+ }()
+
+ flush := func() {
+ if flusher, ok := w.(http.Flusher); ok {
+ flusher.Flush()
+ }
}
- // Success
- utils.WriteResponse(w, http.StatusOK, struct {
- Status string `json:"status"`
- Error string `json:"error,omitempty"`
- Progress string `json:"progress"`
- ProgressDetail map[string]string `json:"progressDetail"`
- Id string `json:"id"` // nolint
- }{
- Status: fmt.Sprintf("pulling image (%s) from %s (Download complete)", img.Tag, strings.Join(img.Names(), ", ")),
- ProgressDetail: map[string]string{},
- Id: img.ID(),
- })
+ w.WriteHeader(http.StatusOK)
+ w.Header().Add("Content-Type", "application/json")
+ flush()
+
+ enc := json.NewEncoder(w)
+ enc.SetEscapeHTML(true)
+ var failed bool
+
+loop: // break out of for/select infinite loop
+ for {
+ var report struct {
+ Stream string `json:"stream,omitempty"`
+ Status string `json:"status,omitempty"`
+ Progress struct {
+ Current uint64 `json:"current,omitempty"`
+ Total int64 `json:"total,omitempty"`
+ } `json:"progressDetail,omitempty"`
+ Error string `json:"error,omitempty"`
+ Id string `json:"id,omitempty"` // nolint
+ }
+
+ select {
+ case e := <-progress:
+ switch e.Event {
+ case types.ProgressEventNewArtifact:
+ report.Status = "Pulling fs layer"
+ case types.ProgressEventRead:
+ report.Status = "Downloading"
+ report.Progress.Current = e.Offset
+ report.Progress.Total = e.Artifact.Size
+ case types.ProgressEventSkipped:
+ report.Status = "Already exists"
+ case types.ProgressEventDone:
+ report.Status = "Download complete"
+ }
+ report.Id = e.Artifact.Digest.Encoded()[0:12]
+ if err := enc.Encode(report); err != nil {
+ stderr.Write([]byte(err.Error()))
+ }
+ flush()
+ case e := <-stderr.Chan():
+ failed = true
+ report.Error = string(e)
+ if err := enc.Encode(report); err != nil {
+ logrus.Warnf("Failed to json encode error %q", err.Error())
+ }
+ flush()
+ case <-runCtx.Done():
+ if !failed {
+ report.Status = "Pull complete"
+ report.Id = img[0:12]
+ if err := enc.Encode(report); err != nil {
+ logrus.Warnf("Failed to json encode error %q", err.Error())
+ }
+ flush()
+ }
+ break loop // break out of for/select infinite loop
+ case <-r.Context().Done():
+ // Client has closed connection
+ break loop // break out of for/select infinite loop
+ }
+ }
}
func GetImage(w http.ResponseWriter, r *http.Request) {
diff --git a/pkg/api/handlers/compat/images_build.go b/pkg/api/handlers/compat/images_build.go
index 2c70352e0..7751b91a7 100644
--- a/pkg/api/handlers/compat/images_build.go
+++ b/pkg/api/handlers/compat/images_build.go
@@ -77,6 +77,9 @@ func BuildImage(w http.ResponseWriter, r *http.Request) {
Devices string `schema:"devices"`
Dockerfile string `schema:"dockerfile"`
DropCapabilities string `schema:"dropcaps"`
+ DNSServers string `schema:"dnsservers"`
+ DNSOptions string `schema:"dnsoptions"`
+ DNSSearch string `schema:"dnssearch"`
Excludes string `schema:"excludes"`
ForceRm bool `schema:"forcerm"`
From string `schema:"from"`
@@ -104,6 +107,7 @@ func BuildImage(w http.ResponseWriter, r *http.Request) {
Squash bool `schema:"squash"`
Tag []string `schema:"t"`
Target string `schema:"target"`
+ Timestamp int64 `schema:"timestamp"`
}{
Dockerfile: "Dockerfile",
Registry: "docker.io",
@@ -159,6 +163,36 @@ func BuildImage(w http.ResponseWriter, r *http.Request) {
devices = m
}
+ var dnsservers = []string{}
+ if _, found := r.URL.Query()["dnsservers"]; found {
+ var m = []string{}
+ if err := json.Unmarshal([]byte(query.DNSServers), &m); err != nil {
+ utils.BadRequest(w, "dnsservers", query.DNSServers, err)
+ return
+ }
+ dnsservers = m
+ }
+
+ var dnsoptions = []string{}
+ if _, found := r.URL.Query()["dnsoptions"]; found {
+ var m = []string{}
+ if err := json.Unmarshal([]byte(query.DNSOptions), &m); err != nil {
+ utils.BadRequest(w, "dnsoptions", query.DNSOptions, err)
+ return
+ }
+ dnsoptions = m
+ }
+
+ var dnssearch = []string{}
+ if _, found := r.URL.Query()["dnssearch"]; found {
+ var m = []string{}
+ if err := json.Unmarshal([]byte(query.DNSSearch), &m); err != nil {
+ utils.BadRequest(w, "dnssearches", query.DNSSearch, err)
+ return
+ }
+ dnssearch = m
+ }
+
var output string
if len(query.Tag) > 0 {
output = query.Tag[0]
@@ -221,9 +255,17 @@ func BuildImage(w http.ResponseWriter, r *http.Request) {
// convert label formats
var labels = []string{}
if _, found := r.URL.Query()["labels"]; found {
- if err := json.Unmarshal([]byte(query.Labels), &labels); err != nil {
- utils.BadRequest(w, "labels", query.Labels, err)
- return
+ makeLabels := make(map[string]string)
+ err := json.Unmarshal([]byte(query.Labels), &makeLabels)
+ if err == nil {
+ for k, v := range makeLabels {
+ labels = append(labels, k+"="+v)
+ }
+ } else {
+ if err := json.Unmarshal([]byte(query.Labels), &labels); err != nil {
+ utils.BadRequest(w, "labels", query.Labels, err)
+ return
+ }
}
}
jobs := 1
@@ -276,6 +318,9 @@ func BuildImage(w http.ResponseWriter, r *http.Request) {
CPUQuota: query.CpuQuota,
CPUShares: query.CpuShares,
CPUSetCPUs: query.CpuSetCpus,
+ DNSServers: dnsservers,
+ DNSOptions: dnsoptions,
+ DNSSearch: dnssearch,
HTTPProxy: query.HTTPProxy,
Memory: query.Memory,
MemorySwap: query.MemSwap,
@@ -318,6 +363,11 @@ func BuildImage(w http.ResponseWriter, r *http.Request) {
Target: query.Target,
}
+ if _, found := r.URL.Query()["timestamp"]; found {
+ ts := time.Unix(query.Timestamp, 0)
+ buildOptions.Timestamp = &ts
+ }
+
runCtx, cancel := context.WithCancel(context.Background())
var imageID string
go func() {
diff --git a/pkg/api/handlers/compat/images_push.go b/pkg/api/handlers/compat/images_push.go
index 4f613338f..db02af445 100644
--- a/pkg/api/handlers/compat/images_push.go
+++ b/pkg/api/handlers/compat/images_push.go
@@ -1,6 +1,8 @@
package compat
import (
+ "context"
+ "encoding/json"
"fmt"
"io/ioutil"
"net/http"
@@ -10,11 +12,14 @@ import (
"github.com/containers/podman/v3/libpod"
"github.com/containers/podman/v3/pkg/api/handlers/utils"
"github.com/containers/podman/v3/pkg/auth"
+ "github.com/containers/podman/v3/pkg/channel"
"github.com/containers/podman/v3/pkg/domain/entities"
"github.com/containers/podman/v3/pkg/domain/infra/abi"
"github.com/containers/storage"
+ "github.com/docker/docker/pkg/jsonmessage"
"github.com/gorilla/schema"
"github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
)
// PushImage is the handler for the compat http endpoint for pushing images.
@@ -82,6 +87,8 @@ func PushImage(w http.ResponseWriter, r *http.Request) {
Password: password,
Username: username,
DigestFile: digestFile.Name(),
+ Quiet: true,
+ Progress: make(chan types.ProgressProperties),
}
if _, found := r.URL.Query()["tlsVerify"]; found {
options.SkipTLSVerify = types.NewOptionalBool(!query.TLSVerify)
@@ -94,31 +101,103 @@ func PushImage(w http.ResponseWriter, r *http.Request) {
destination = imageName
}
- if err := imageEngine.Push(r.Context(), imageName, destination, options); err != nil {
- if errors.Cause(err) != storage.ErrImageUnknown {
- utils.ImageNotFound(w, imageName, errors.Wrapf(err, "failed to find image %s", imageName))
- return
+ errorWriter := channel.NewWriter(make(chan []byte))
+ defer errorWriter.Close()
+
+ statusWriter := channel.NewWriter(make(chan []byte))
+ defer statusWriter.Close()
+
+ runCtx, cancel := context.WithCancel(context.Background())
+ var failed bool
+
+ go func() {
+ defer cancel()
+
+ statusWriter.Write([]byte(fmt.Sprintf("The push refers to repository [%s]", imageName)))
+
+ err := imageEngine.Push(runCtx, imageName, destination, options)
+ if err != nil {
+ if errors.Cause(err) != storage.ErrImageUnknown {
+ errorWriter.Write([]byte("An image does not exist locally with the tag: " + imageName))
+ } else {
+ errorWriter.Write([]byte(err.Error()))
+ }
}
+ }()
- utils.Error(w, "Something went wrong.", http.StatusBadRequest, errors.Wrapf(err, "error pushing image %q", imageName))
- return
+ flush := func() {
+ if flusher, ok := w.(http.Flusher); ok {
+ flusher.Flush()
+ }
}
- digestBytes, err := ioutil.ReadAll(digestFile)
- if err != nil {
- utils.Error(w, "Something went wrong.", http.StatusInternalServerError, errors.Wrap(err, "failed to read digest tmp file"))
- return
- }
+ w.WriteHeader(http.StatusOK)
+ w.Header().Add("Content-Type", "application/json")
+ flush()
- tag := query.Tag
- if tag == "" {
- tag = "latest"
- }
- respData := struct {
- Status string `json:"status"`
- }{
- Status: fmt.Sprintf("%s: digest: %s size: null", tag, string(digestBytes)),
- }
+ enc := json.NewEncoder(w)
+ enc.SetEscapeHTML(true)
+
+loop: // break out of for/select infinite loop
+ for {
+ var report jsonmessage.JSONMessage
- utils.WriteJSON(w, http.StatusOK, &respData)
+ select {
+ case e := <-options.Progress:
+ switch e.Event {
+ case types.ProgressEventNewArtifact:
+ report.Status = "Preparing"
+ case types.ProgressEventRead:
+ report.Status = "Pushing"
+ report.Progress = &jsonmessage.JSONProgress{
+ Current: int64(e.Offset),
+ Total: e.Artifact.Size,
+ }
+ case types.ProgressEventSkipped:
+ report.Status = "Layer already exists"
+ case types.ProgressEventDone:
+ report.Status = "Pushed"
+ }
+ report.ID = e.Artifact.Digest.Encoded()[0:12]
+ if err := enc.Encode(report); err != nil {
+ errorWriter.Write([]byte(err.Error()))
+ }
+ flush()
+ case e := <-statusWriter.Chan():
+ report.Status = string(e)
+ if err := enc.Encode(report); err != nil {
+ errorWriter.Write([]byte(err.Error()))
+ }
+ flush()
+ case e := <-errorWriter.Chan():
+ failed = true
+ report.Error = &jsonmessage.JSONError{
+ Message: string(e),
+ }
+ report.ErrorMessage = string(e)
+ if err := enc.Encode(report); err != nil {
+ logrus.Warnf("Failed to json encode error %q", err.Error())
+ }
+ flush()
+ case <-runCtx.Done():
+ if !failed {
+ digestBytes, err := ioutil.ReadAll(digestFile)
+ if err == nil {
+ tag := query.Tag
+ if tag == "" {
+ tag = "latest"
+ }
+ report.Status = fmt.Sprintf("%s: digest: %s", tag, string(digestBytes))
+ if err := enc.Encode(report); err != nil {
+ logrus.Warnf("Failed to json encode error %q", err.Error())
+ }
+ flush()
+ }
+ }
+ break loop // break out of for/select infinite loop
+ case <-r.Context().Done():
+ // Client has closed connection
+ break loop // break out of for/select infinite loop
+ }
+ }
}
diff --git a/pkg/api/handlers/compat/networks.go b/pkg/api/handlers/compat/networks.go
index 1a04b4289..28e90ac28 100644
--- a/pkg/api/handlers/compat/networks.go
+++ b/pkg/api/handlers/compat/networks.go
@@ -180,16 +180,18 @@ func findPluginByName(plugins []*libcni.NetworkConfig, pluginType string) ([]byt
func ListNetworks(w http.ResponseWriter, r *http.Request) {
runtime := r.Context().Value("runtime").(*libpod.Runtime)
- decoder := r.Context().Value("decoder").(*schema.Decoder)
- query := struct {
- Filters map[string][]string `schema:"filters"`
- }{
- // override any golang type defaults
- }
- if err := decoder.Decode(&query, r.URL.Query()); err != nil {
+ filters, err := filtersFromRequest(r)
+ if err != nil {
utils.Error(w, "Something went wrong.", http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
return
}
+ filterMap := map[string][]string{}
+ for _, filter := range filters {
+ split := strings.SplitN(filter, "=", 2)
+ if len(split) > 1 {
+ filterMap[split[0]] = append(filterMap[split[0]], split[1])
+ }
+ }
config, err := runtime.GetConfig()
if err != nil {
utils.InternalServerError(w, err)
@@ -205,7 +207,7 @@ func ListNetworks(w http.ResponseWriter, r *http.Request) {
reports := []*types.NetworkResource{}
logrus.Debugf("netNames: %q", strings.Join(netNames, ", "))
for _, name := range netNames {
- report, err := getNetworkResourceByNameOrID(name, runtime, query.Filters)
+ report, err := getNetworkResourceByNameOrID(name, runtime, filterMap)
if err != nil {
utils.InternalServerError(w, err)
return
diff --git a/pkg/api/handlers/compat/secrets.go b/pkg/api/handlers/compat/secrets.go
index c5ee8c324..86e3887a4 100644
--- a/pkg/api/handlers/compat/secrets.go
+++ b/pkg/api/handlers/compat/secrets.go
@@ -40,7 +40,21 @@ func ListSecrets(w http.ResponseWriter, r *http.Request) {
utils.InternalServerError(w, err)
return
}
- utils.WriteResponse(w, http.StatusOK, reports)
+ if utils.IsLibpodRequest(r) {
+ utils.WriteResponse(w, http.StatusOK, reports)
+ return
+ }
+ // Docker compat expects a version field that increments when the secret is updated
+ // We currently can't update a secret, so we default the version to 1
+ compatReports := make([]entities.SecretInfoReportCompat, 0, len(reports))
+ for _, report := range reports {
+ compatRep := entities.SecretInfoReportCompat{
+ SecretInfoReport: *report,
+ Version: entities.SecretVersion{Index: 1},
+ }
+ compatReports = append(compatReports, compatRep)
+ }
+ utils.WriteResponse(w, http.StatusOK, compatReports)
}
func InspectSecret(w http.ResponseWriter, r *http.Request) {
@@ -59,7 +73,21 @@ func InspectSecret(w http.ResponseWriter, r *http.Request) {
utils.SecretNotFound(w, name, errs[0])
return
}
- utils.WriteResponse(w, http.StatusOK, reports[0])
+ if len(reports) < 1 {
+ utils.InternalServerError(w, err)
+ return
+ }
+ if utils.IsLibpodRequest(r) {
+ utils.WriteResponse(w, http.StatusOK, reports[0])
+ return
+ }
+ // Docker compat expects a version field that increments when the secret is updated
+ // We currently can't update a secret, so we default the version to 1
+ compatReport := entities.SecretInfoReportCompat{
+ SecretInfoReport: *reports[0],
+ Version: entities.SecretVersion{Index: 1},
+ }
+ utils.WriteResponse(w, http.StatusOK, compatReport)
}
func RemoveSecret(w http.ResponseWriter, r *http.Request) {
diff --git a/pkg/api/handlers/compat/version.go b/pkg/api/handlers/compat/version.go
index d90a892c1..fae147440 100644
--- a/pkg/api/handlers/compat/version.go
+++ b/pkg/api/handlers/compat/version.go
@@ -10,6 +10,7 @@ import (
"github.com/containers/podman/v3/libpod/define"
"github.com/containers/podman/v3/pkg/api/handlers/utils"
"github.com/containers/podman/v3/pkg/domain/entities"
+ "github.com/containers/podman/v3/version"
docker "github.com/docker/docker/api/types"
"github.com/pkg/errors"
)
@@ -35,20 +36,20 @@ func VersionHandler(w http.ResponseWriter, r *http.Request) {
Name: "Podman Engine",
Version: versionInfo.Version,
Details: map[string]string{
- "APIVersion": utils.APIVersion[utils.LibpodTree][utils.CurrentAPIVersion].String(),
+ "APIVersion": version.APIVersion[version.Libpod][version.CurrentAPI].String(),
"Arch": goRuntime.GOARCH,
"BuildTime": time.Unix(versionInfo.Built, 0).Format(time.RFC3339),
"Experimental": "true",
"GitCommit": versionInfo.GitCommit,
"GoVersion": versionInfo.GoVersion,
"KernelVersion": infoData.Host.Kernel,
- "MinAPIVersion": utils.APIVersion[utils.LibpodTree][utils.MinimalAPIVersion].String(),
+ "MinAPIVersion": version.APIVersion[version.Libpod][version.MinimalAPI].String(),
"Os": goRuntime.GOOS,
},
}}
- apiVersion := utils.APIVersion[utils.CompatTree][utils.CurrentAPIVersion]
- minVersion := utils.APIVersion[utils.CompatTree][utils.MinimalAPIVersion]
+ apiVersion := version.APIVersion[version.Compat][version.CurrentAPI]
+ minVersion := version.APIVersion[version.Compat][version.MinimalAPI]
utils.WriteResponse(w, http.StatusOK, entities.ComponentVersion{
Version: docker.Version{
diff --git a/pkg/api/handlers/libpod/copy.go b/pkg/api/handlers/libpod/copy.go
deleted file mode 100644
index 4b345bacc..000000000
--- a/pkg/api/handlers/libpod/copy.go
+++ /dev/null
@@ -1,12 +0,0 @@
-package libpod
-
-import (
- "net/http"
-
- "github.com/containers/podman/v3/pkg/api/handlers/utils"
- "github.com/pkg/errors"
-)
-
-func Archive(w http.ResponseWriter, r *http.Request) {
- utils.Error(w, "not implemented", http.StatusNotImplemented, errors.New("not implemented"))
-}
diff --git a/pkg/api/handlers/libpod/images.go b/pkg/api/handlers/libpod/images.go
index 1a2483784..83fe23621 100644
--- a/pkg/api/handlers/libpod/images.go
+++ b/pkg/api/handlers/libpod/images.go
@@ -451,6 +451,7 @@ func PushImage(w http.ResponseWriter, r *http.Request) {
Password: password,
Format: query.Format,
All: query.All,
+ Quiet: true,
}
if _, found := r.URL.Query()["tlsVerify"]; found {
options.SkipTLSVerify = types.NewOptionalBool(!query.TLSVerify)
diff --git a/pkg/api/handlers/libpod/images_pull.go b/pkg/api/handlers/libpod/images_pull.go
index c8b777be4..e2e4b53b4 100644
--- a/pkg/api/handlers/libpod/images_pull.go
+++ b/pkg/api/handlers/libpod/images_pull.go
@@ -136,7 +136,8 @@ func ImagesPull(w http.ResponseWriter, r *http.Request) {
&dockerRegistryOptions,
image.SigningOptions{},
nil,
- util.PullImageAlways)
+ util.PullImageAlways,
+ nil)
if err != nil {
stderr.Write([]byte(err.Error() + "\n"))
} else {
diff --git a/pkg/api/handlers/utils/handler.go b/pkg/api/handlers/utils/handler.go
index b3c674788..7625f9546 100644
--- a/pkg/api/handlers/utils/handler.go
+++ b/pkg/api/handlers/utils/handler.go
@@ -10,49 +10,14 @@ import (
"unsafe"
"github.com/blang/semver"
+ "github.com/containers/podman/v3/version"
"github.com/gorilla/mux"
jsoniter "github.com/json-iterator/go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
-type (
- // VersionTree determines which API endpoint tree for version
- VersionTree int
- // VersionLevel determines which API level, current or something from the past
- VersionLevel int
-)
-
-const (
- // LibpodTree supports Libpod endpoints
- LibpodTree = VersionTree(iota)
- // CompatTree supports Libpod endpoints
- CompatTree
-
- // CurrentAPIVersion announces what is the current API level
- CurrentAPIVersion = VersionLevel(iota)
- // MinimalAPIVersion announces what is the oldest API level supported
- MinimalAPIVersion
-)
-
var (
- // See https://docs.docker.com/engine/api/v1.40/
- // libpod compat handlers are expected to honor docker API versions
-
- // APIVersion provides the current and minimal API versions for compat and libpod endpoint trees
- // Note: GET|HEAD /_ping is never versioned and provides the API-Version and Libpod-API-Version headers to allow
- // clients to shop for the Version they wish to support
- APIVersion = map[VersionTree]map[VersionLevel]semver.Version{
- LibpodTree: {
- CurrentAPIVersion: semver.MustParse("3.0.0"),
- MinimalAPIVersion: semver.MustParse("3.0.0"),
- },
- CompatTree: {
- CurrentAPIVersion: semver.MustParse("1.40.0"),
- MinimalAPIVersion: semver.MustParse("1.24.0"),
- },
- }
-
// ErrVersionNotGiven returned when version not given by client
ErrVersionNotGiven = errors.New("version not given in URL path")
// ErrVersionNotSupported returned when given version is too old
@@ -98,14 +63,14 @@ func SupportedVersion(r *http.Request, condition string) (semver.Version, error)
// SupportedVersionWithDefaults validates that the version provided by client valid is supported by server
// minimal API version <= client path version <= maximum API version focused on the endpoint tree from URL
func SupportedVersionWithDefaults(r *http.Request) (semver.Version, error) {
- tree := CompatTree
+ tree := version.Compat
if IsLibpodRequest(r) {
- tree = LibpodTree
+ tree = version.Libpod
}
return SupportedVersion(r,
- fmt.Sprintf(">=%s <=%s", APIVersion[tree][MinimalAPIVersion].String(),
- APIVersion[tree][CurrentAPIVersion].String()))
+ fmt.Sprintf(">=%s <=%s", version.APIVersion[tree][version.MinimalAPI].String(),
+ version.APIVersion[tree][version.CurrentAPI].String()))
}
// WriteResponse encodes the given value as JSON or string and renders it for http client
diff --git a/pkg/api/handlers/utils/handler_test.go b/pkg/api/handlers/utils/handler_test.go
index d9fd22b80..18a1d2678 100644
--- a/pkg/api/handlers/utils/handler_test.go
+++ b/pkg/api/handlers/utils/handler_test.go
@@ -7,17 +7,18 @@ import (
"net/http/httptest"
"testing"
+ "github.com/containers/podman/v3/version"
"github.com/gorilla/mux"
)
func TestSupportedVersion(t *testing.T) {
req, err := http.NewRequest("GET",
- fmt.Sprintf("/v%s/libpod/testing/versions", APIVersion[LibpodTree][CurrentAPIVersion]),
+ fmt.Sprintf("/v%s/libpod/testing/versions", version.APIVersion[version.Libpod][version.CurrentAPI]),
nil)
if err != nil {
t.Fatal(err)
}
- req = mux.SetURLVars(req, map[string]string{"version": APIVersion[LibpodTree][CurrentAPIVersion].String()})
+ req = mux.SetURLVars(req, map[string]string{"version": version.APIVersion[version.Libpod][version.CurrentAPI].String()})
rr := httptest.NewRecorder()
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
diff --git a/pkg/api/server/handler_api.go b/pkg/api/server/handler_api.go
index e7bf94fc6..28b8706a8 100644
--- a/pkg/api/server/handler_api.go
+++ b/pkg/api/server/handler_api.go
@@ -8,6 +8,7 @@ import (
"github.com/containers/podman/v3/pkg/api/handlers/utils"
"github.com/containers/podman/v3/pkg/auth"
+ "github.com/containers/podman/v3/version"
"github.com/google/uuid"
"github.com/sirupsen/logrus"
)
@@ -55,10 +56,10 @@ func (s *APIServer) APIHandler(h http.HandlerFunc) http.HandlerFunc {
c = context.WithValue(c, "idletracker", s.idleTracker) // nolint
r = r.WithContext(c)
- cv := utils.APIVersion[utils.CompatTree][utils.CurrentAPIVersion]
+ cv := version.APIVersion[version.Compat][version.CurrentAPI]
w.Header().Set("API-Version", fmt.Sprintf("%d.%d", cv.Major, cv.Minor))
- lv := utils.APIVersion[utils.LibpodTree][utils.CurrentAPIVersion].String()
+ lv := version.APIVersion[version.Libpod][version.CurrentAPI].String()
w.Header().Set("Libpod-API-Version", lv)
w.Header().Set("Server", "Libpod/"+lv+" ("+runtime.GOOS+")")
@@ -72,5 +73,5 @@ func (s *APIServer) APIHandler(h http.HandlerFunc) http.HandlerFunc {
// VersionedPath prepends the version parsing code
// any handler may override this default when registering URL(s)
func VersionedPath(p string) string {
- return "/v{version:[0-9][0-9.]*}" + p
+ return "/v{version:[0-9][0-9A-Za-z.-]*}" + p
}
diff --git a/pkg/api/server/register_archive.go b/pkg/api/server/register_archive.go
index 2a5cfba0b..2ac126644 100644
--- a/pkg/api/server/register_archive.go
+++ b/pkg/api/server/register_archive.go
@@ -91,7 +91,7 @@ func (s *APIServer) registerArchiveHandlers(r *mux.Router) error {
Libpod
*/
- // swagger:operation POST /libpod/containers/{name}/archive libpod libpodPutArchive
+ // swagger:operation PUT /libpod/containers/{name}/archive libpod libpodPutArchive
// ---
// summary: Copy files into a container
// description: Copy a tar archive of files into a container
diff --git a/pkg/api/server/register_pods.go b/pkg/api/server/register_pods.go
index 949bf80e2..c66cc48ff 100644
--- a/pkg/api/server/register_pods.go
+++ b/pkg/api/server/register_pods.go
@@ -36,7 +36,6 @@ func (s *APIServer) registerPodsHandlers(r *mux.Router) error {
// name: create
// description: attributes for creating a pod
// schema:
- // type: object
// $ref: "#/definitions/PodSpecGenerator"
// responses:
// 200:
diff --git a/pkg/api/server/register_secrets.go b/pkg/api/server/register_secrets.go
index 1c5f5954b..531623845 100644
--- a/pkg/api/server/register_secrets.go
+++ b/pkg/api/server/register_secrets.go
@@ -115,7 +115,7 @@ func (s *APIServer) registerSecretHandlers(r *mux.Router) error {
// parameters:
// responses:
// '200':
- // "$ref": "#/responses/SecretListResponse"
+ // "$ref": "#/responses/SecretListCompatResponse"
// '500':
// "$ref": "#/responses/InternalError"
r.Handle(VersionedPath("/secrets"), s.APIHandler(compat.ListSecrets)).Methods(http.MethodGet)
@@ -158,7 +158,7 @@ func (s *APIServer) registerSecretHandlers(r *mux.Router) error {
// - application/json
// responses:
// '200':
- // "$ref": "#/responses/SecretInspectResponse"
+ // "$ref": "#/responses/SecretInspectCompatResponse"
// '404':
// "$ref": "#/responses/NoSuchSecret"
// '500':
diff --git a/pkg/autoupdate/autoupdate.go b/pkg/autoupdate/autoupdate.go
index 0cf51e5a6..53095c295 100644
--- a/pkg/autoupdate/autoupdate.go
+++ b/pkg/autoupdate/autoupdate.go
@@ -304,6 +304,7 @@ func updateImage(runtime *libpod.Runtime, name string, options Options) (*image.
image.SigningOptions{},
nil,
util.PullImageAlways,
+ nil,
)
if err != nil {
return nil, err
diff --git a/pkg/bindings/bindings.go b/pkg/bindings/bindings.go
deleted file mode 100644
index 14f306910..000000000
--- a/pkg/bindings/bindings.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Package bindings provides golang-based access
-// to the Podman REST API. Users can then interact with API endpoints
-// to manage containers, images, pods, etc.
-//
-// This package exposes a series of methods that allow users to firstly
-// create their connection with the API endpoints. Once the connection
-// is established, users can then manage the Podman container runtime.
-package bindings
-
-import (
- "github.com/blang/semver"
-)
-
-var (
- // PTrue is a convenience variable that can be used in bindings where
- // a pointer to a bool (optional parameter) is required.
- pTrue = true
- PTrue = &pTrue
- // PFalse is a convenience variable that can be used in bindings where
- // a pointer to a bool (optional parameter) is required.
- pFalse = false
- PFalse = &pFalse
-
- // APIVersion - podman will fail to run if this value is wrong
- APIVersion = semver.MustParse("2.0.0")
-)
diff --git a/pkg/bindings/connection.go b/pkg/bindings/connection.go
index ad16498d5..21a8e7a8b 100644
--- a/pkg/bindings/connection.go
+++ b/pkg/bindings/connection.go
@@ -14,6 +14,7 @@ import (
"github.com/blang/semver"
"github.com/containers/podman/v3/pkg/terminal"
+ "github.com/containers/podman/v3/version"
jsoniter "github.com/json-iterator/go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -25,7 +26,7 @@ var (
BasePath = &url.URL{
Scheme: "http",
Host: "d",
- Path: "/v" + APIVersion.String() + "/libpod",
+ Path: "/v" + version.APIVersion[version.Libpod][version.CurrentAPI].String() + "/libpod",
}
)
@@ -168,15 +169,16 @@ func pingNewConnection(ctx context.Context) error {
return err
}
- switch APIVersion.Compare(versionSrv) {
+ switch version.APIVersion[version.Libpod][version.MinimalAPI].Compare(versionSrv) {
case -1, 0:
// Server's job when Client version is equal or older
return nil
case 1:
- return errors.Errorf("server API version is too old. Client %q server %q", APIVersion.String(), versionSrv.String())
+ return errors.Errorf("server API version is too old. Client %q server %q",
+ version.APIVersion[version.Libpod][version.MinimalAPI].String(), versionSrv.String())
}
}
- return errors.Errorf("ping response was %q", response.StatusCode)
+ return errors.Errorf("ping response was %d", response.StatusCode)
}
func sshClient(_url *url.URL, secure bool, passPhrase string, identity string) (Connection, error) {
diff --git a/pkg/bindings/images/build.go b/pkg/bindings/images/build.go
index 6e16461e5..1cbd28c37 100644
--- a/pkg/bindings/images/build.go
+++ b/pkg/bindings/images/build.go
@@ -87,6 +87,28 @@ func Build(ctx context.Context, containerFiles []string, options entities.BuildO
params.Add("devices", d)
}
+ if dnsservers := options.CommonBuildOpts.DNSServers; len(dnsservers) > 0 {
+ c, err := jsoniter.MarshalToString(dnsservers)
+ if err != nil {
+ return nil, err
+ }
+ params.Add("dnsservers", c)
+ }
+ if dnsoptions := options.CommonBuildOpts.DNSOptions; len(dnsoptions) > 0 {
+ c, err := jsoniter.MarshalToString(dnsoptions)
+ if err != nil {
+ return nil, err
+ }
+ params.Add("dnsoptions", c)
+ }
+ if dnssearch := options.CommonBuildOpts.DNSSearch; len(dnssearch) > 0 {
+ c, err := jsoniter.MarshalToString(dnssearch)
+ if err != nil {
+ return nil, err
+ }
+ params.Add("dnssearch", c)
+ }
+
if caps := options.DropCapabilities; len(caps) > 0 {
c, err := jsoniter.MarshalToString(caps)
if err != nil {
@@ -185,6 +207,12 @@ func Build(ctx context.Context, containerFiles []string, options entities.BuildO
if options.Squash {
params.Set("squash", "1")
}
+
+ if options.Timestamp != nil {
+ t := *options.Timestamp
+ params.Set("timestamp", strconv.FormatInt(t.Unix(), 10))
+ }
+
var (
headers map[string]string
err error
diff --git a/pkg/bindings/test/attach_test.go b/pkg/bindings/test/attach_test.go
index 16090e104..fbdf18d44 100644
--- a/pkg/bindings/test/attach_test.go
+++ b/pkg/bindings/test/attach_test.go
@@ -35,7 +35,7 @@ var _ = Describe("Podman containers attach", func() {
It("can run top in container", func() {
name := "TopAttachTest"
- id, err := bt.RunTopContainer(&name, nil, nil)
+ id, err := bt.RunTopContainer(&name, nil)
Expect(err).ShouldNot(HaveOccurred())
tickTock := time.NewTimer(2 * time.Second)
diff --git a/pkg/bindings/test/common_test.go b/pkg/bindings/test/common_test.go
index 588f38930..9bac4b620 100644
--- a/pkg/bindings/test/common_test.go
+++ b/pkg/bindings/test/common_test.go
@@ -188,14 +188,14 @@ func (b *bindingTest) restoreImageFromCache(i testImage) {
// Run a container within or without a pod
// and add or append the alpine image to it
-func (b *bindingTest) RunTopContainer(containerName *string, insidePod *bool, podName *string) (string, error) {
+func (b *bindingTest) RunTopContainer(containerName *string, podName *string) (string, error) {
s := specgen.NewSpecGenerator(alpine.name, false)
s.Terminal = false
s.Command = []string{"/usr/bin/top"}
if containerName != nil {
s.Name = *containerName
}
- if insidePod != nil && podName != nil {
+ if podName != nil {
s.Pod = *podName
}
ctr, err := containers.CreateWithSpec(b.conn, s, nil)
diff --git a/pkg/bindings/test/containers_test.go b/pkg/bindings/test/containers_test.go
index f2ab197ce..b0ddc7862 100644
--- a/pkg/bindings/test/containers_test.go
+++ b/pkg/bindings/test/containers_test.go
@@ -55,7 +55,7 @@ var _ = Describe("Podman containers ", func() {
It("podman pause a running container by name", func() {
// Pausing by name should work
var name = "top"
- _, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
+ _, err := bt.RunTopContainer(&name, nil)
Expect(err).To(BeNil())
err = containers.Pause(bt.conn, name, nil)
Expect(err).To(BeNil())
@@ -69,7 +69,7 @@ var _ = Describe("Podman containers ", func() {
It("podman pause a running container by id", func() {
// Pausing by id should work
var name = "top"
- cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
+ cid, err := bt.RunTopContainer(&name, nil)
Expect(err).To(BeNil())
err = containers.Pause(bt.conn, cid, nil)
Expect(err).To(BeNil())
@@ -83,7 +83,7 @@ var _ = Describe("Podman containers ", func() {
It("podman unpause a running container by name", func() {
// Unpausing by name should work
var name = "top"
- _, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
+ _, err := bt.RunTopContainer(&name, nil)
Expect(err).To(BeNil())
err = containers.Pause(bt.conn, name, nil)
Expect(err).To(BeNil())
@@ -99,7 +99,7 @@ var _ = Describe("Podman containers ", func() {
It("podman unpause a running container by ID", func() {
// Unpausing by ID should work
var name = "top"
- _, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
+ _, err := bt.RunTopContainer(&name, nil)
Expect(err).To(BeNil())
// Pause by name
err = containers.Pause(bt.conn, name, nil)
@@ -118,7 +118,7 @@ var _ = Describe("Podman containers ", func() {
It("podman pause a paused container by name", func() {
// Pausing a paused container by name should fail
var name = "top"
- _, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
+ _, err := bt.RunTopContainer(&name, nil)
Expect(err).To(BeNil())
err = containers.Pause(bt.conn, name, nil)
Expect(err).To(BeNil())
@@ -131,7 +131,7 @@ var _ = Describe("Podman containers ", func() {
It("podman pause a paused container by id", func() {
// Pausing a paused container by id should fail
var name = "top"
- cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
+ cid, err := bt.RunTopContainer(&name, nil)
Expect(err).To(BeNil())
err = containers.Pause(bt.conn, cid, nil)
Expect(err).To(BeNil())
@@ -144,7 +144,7 @@ var _ = Describe("Podman containers ", func() {
It("podman pause a stopped container by name", func() {
// Pausing a stopped container by name should fail
var name = "top"
- _, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
+ _, err := bt.RunTopContainer(&name, nil)
Expect(err).To(BeNil())
err = containers.Stop(bt.conn, name, nil)
Expect(err).To(BeNil())
@@ -157,7 +157,7 @@ var _ = Describe("Podman containers ", func() {
It("podman pause a stopped container by id", func() {
// Pausing a stopped container by id should fail
var name = "top"
- cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
+ cid, err := bt.RunTopContainer(&name, nil)
Expect(err).To(BeNil())
err = containers.Stop(bt.conn, cid, nil)
Expect(err).To(BeNil())
@@ -170,7 +170,7 @@ var _ = Describe("Podman containers ", func() {
It("podman remove a paused container by id without force", func() {
// Removing a paused container without force should fail
var name = "top"
- cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
+ cid, err := bt.RunTopContainer(&name, nil)
Expect(err).To(BeNil())
err = containers.Pause(bt.conn, cid, nil)
Expect(err).To(BeNil())
@@ -183,7 +183,7 @@ var _ = Describe("Podman containers ", func() {
It("podman remove a paused container by id with force", func() {
// Removing a paused container with force should work
var name = "top"
- cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
+ cid, err := bt.RunTopContainer(&name, nil)
Expect(err).To(BeNil())
err = containers.Pause(bt.conn, cid, nil)
Expect(err).To(BeNil())
@@ -194,7 +194,7 @@ var _ = Describe("Podman containers ", func() {
It("podman stop a paused container by name", func() {
// Stopping a paused container by name should fail
var name = "top"
- _, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
+ _, err := bt.RunTopContainer(&name, nil)
Expect(err).To(BeNil())
err = containers.Pause(bt.conn, name, nil)
Expect(err).To(BeNil())
@@ -207,7 +207,7 @@ var _ = Describe("Podman containers ", func() {
It("podman stop a paused container by id", func() {
// Stopping a paused container by id should fail
var name = "top"
- cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
+ cid, err := bt.RunTopContainer(&name, nil)
Expect(err).To(BeNil())
err = containers.Pause(bt.conn, cid, nil)
Expect(err).To(BeNil())
@@ -220,7 +220,7 @@ var _ = Describe("Podman containers ", func() {
It("podman stop a running container by name", func() {
// Stopping a running container by name should work
var name = "top"
- _, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
+ _, err := bt.RunTopContainer(&name, nil)
Expect(err).To(BeNil())
err = containers.Stop(bt.conn, name, nil)
Expect(err).To(BeNil())
@@ -234,7 +234,7 @@ var _ = Describe("Podman containers ", func() {
It("podman stop a running container by ID", func() {
// Stopping a running container by ID should work
var name = "top"
- cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
+ cid, err := bt.RunTopContainer(&name, nil)
Expect(err).To(BeNil())
err = containers.Stop(bt.conn, cid, nil)
Expect(err).To(BeNil())
@@ -256,7 +256,7 @@ var _ = Describe("Podman containers ", func() {
Expect(code).To(BeNumerically("==", http.StatusNotFound))
errChan := make(chan error)
- _, err = bt.RunTopContainer(&name, nil, nil)
+ _, err = bt.RunTopContainer(&name, nil)
Expect(err).To(BeNil())
go func() {
exitCode, err = containers.Wait(bt.conn, name, nil)
@@ -278,7 +278,7 @@ var _ = Describe("Podman containers ", func() {
running = define.ContainerStateRunning
)
errChan := make(chan error)
- _, err := bt.RunTopContainer(&name, nil, nil)
+ _, err := bt.RunTopContainer(&name, nil)
Expect(err).To(BeNil())
go func() {
exitCode, err = containers.Wait(bt.conn, name, new(containers.WaitOptions).WithCondition([]define.ContainerStatus{pause}))
@@ -317,7 +317,7 @@ var _ = Describe("Podman containers ", func() {
// a container that has no healthcheck should be a 409
var name = "top"
- bt.RunTopContainer(&name, bindings.PFalse, nil)
+ bt.RunTopContainer(&name, nil)
_, err = containers.RunHealthCheck(bt.conn, name, nil)
Expect(err).ToNot(BeNil())
code, _ = bindings.CheckResponseCode(err)
@@ -376,7 +376,7 @@ var _ = Describe("Podman containers ", func() {
It("podman top", func() {
var name = "top"
- cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
+ cid, err := bt.RunTopContainer(&name, nil)
Expect(err).To(BeNil())
// By name
@@ -414,7 +414,7 @@ var _ = Describe("Podman containers ", func() {
It("podman container exists in local storage by name", func() {
// Container existence check by name should work
var name = "top"
- _, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
+ _, err := bt.RunTopContainer(&name, nil)
Expect(err).To(BeNil())
containerExists, err := containers.Exists(bt.conn, name, nil)
Expect(err).To(BeNil())
@@ -424,7 +424,7 @@ var _ = Describe("Podman containers ", func() {
It("podman container exists in local storage by ID", func() {
// Container existence check by ID should work
var name = "top"
- cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
+ cid, err := bt.RunTopContainer(&name, nil)
Expect(err).To(BeNil())
containerExists, err := containers.Exists(bt.conn, cid, nil)
Expect(err).To(BeNil())
@@ -434,7 +434,7 @@ var _ = Describe("Podman containers ", func() {
It("podman container exists in local storage by short ID", func() {
// Container existence check by short ID should work
var name = "top"
- cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
+ cid, err := bt.RunTopContainer(&name, nil)
Expect(err).To(BeNil())
containerExists, err := containers.Exists(bt.conn, cid[0:12], nil)
Expect(err).To(BeNil())
@@ -452,7 +452,7 @@ var _ = Describe("Podman containers ", func() {
It("podman kill a running container by name with SIGINT", func() {
// Killing a running container should work
var name = "top"
- _, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
+ _, err := bt.RunTopContainer(&name, nil)
Expect(err).To(BeNil())
err = containers.Kill(bt.conn, name, new(containers.KillOptions).WithSignal("SIGINT"))
Expect(err).To(BeNil())
@@ -463,7 +463,7 @@ var _ = Describe("Podman containers ", func() {
It("podman kill a running container by ID with SIGTERM", func() {
// Killing a running container by ID should work
var name = "top"
- cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
+ cid, err := bt.RunTopContainer(&name, nil)
Expect(err).To(BeNil())
err = containers.Kill(bt.conn, cid, new(containers.KillOptions).WithSignal("SIGTERM"))
Expect(err).To(BeNil())
@@ -474,7 +474,7 @@ var _ = Describe("Podman containers ", func() {
It("podman kill a running container by ID with SIGKILL", func() {
// Killing a running container by ID with TERM should work
var name = "top"
- cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
+ cid, err := bt.RunTopContainer(&name, nil)
Expect(err).To(BeNil())
err = containers.Kill(bt.conn, cid, new(containers.KillOptions).WithSignal("SIGKILL"))
Expect(err).To(BeNil())
@@ -483,7 +483,7 @@ var _ = Describe("Podman containers ", func() {
It("podman kill a running container by bogus signal", func() {
//Killing a running container by bogus signal should fail
var name = "top"
- cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
+ cid, err := bt.RunTopContainer(&name, nil)
Expect(err).To(BeNil())
err = containers.Kill(bt.conn, cid, new(containers.KillOptions).WithSignal("foobar"))
Expect(err).ToNot(BeNil())
@@ -495,9 +495,9 @@ var _ = Describe("Podman containers ", func() {
// Killing latest container should work
var name1 = "first"
var name2 = "second"
- _, err := bt.RunTopContainer(&name1, bindings.PFalse, nil)
+ _, err := bt.RunTopContainer(&name1, nil)
Expect(err).To(BeNil())
- _, err = bt.RunTopContainer(&name2, bindings.PFalse, nil)
+ _, err = bt.RunTopContainer(&name2, nil)
Expect(err).To(BeNil())
containerLatestList, err := containers.List(bt.conn, new(containers.ListOptions).WithLast(1))
Expect(err).To(BeNil())
@@ -526,7 +526,7 @@ var _ = Describe("Podman containers ", func() {
It("podman prune stopped containers", func() {
// Start and stop a container to enter in exited state.
var name = "top"
- _, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
+ _, err := bt.RunTopContainer(&name, nil)
Expect(err).To(BeNil())
err = containers.Stop(bt.conn, name, nil)
Expect(err).To(BeNil())
@@ -541,7 +541,7 @@ var _ = Describe("Podman containers ", func() {
It("podman prune stopped containers with filters", func() {
// Start and stop a container to enter in exited state.
var name = "top"
- _, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
+ _, err := bt.RunTopContainer(&name, nil)
Expect(err).To(BeNil())
err = containers.Stop(bt.conn, name, nil)
Expect(err).To(BeNil())
@@ -575,7 +575,7 @@ var _ = Describe("Podman containers ", func() {
It("podman prune running containers", func() {
// Start the container.
var name = "top"
- _, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
+ _, err := bt.RunTopContainer(&name, nil)
Expect(err).To(BeNil())
// Check if the container is running.
@@ -598,7 +598,7 @@ var _ = Describe("Podman containers ", func() {
It("podman inspect running container", func() {
var name = "top"
- _, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
+ _, err := bt.RunTopContainer(&name, nil)
Expect(err).To(BeNil())
// Inspecting running container should succeed
_, err = containers.Inspect(bt.conn, name, nil)
@@ -607,7 +607,7 @@ var _ = Describe("Podman containers ", func() {
It("podman inspect stopped container", func() {
var name = "top"
- _, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
+ _, err := bt.RunTopContainer(&name, nil)
Expect(err).To(BeNil())
err = containers.Stop(bt.conn, name, nil)
Expect(err).To(BeNil())
@@ -618,7 +618,7 @@ var _ = Describe("Podman containers ", func() {
It("podman inspect running container with size", func() {
var name = "top"
- _, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
+ _, err := bt.RunTopContainer(&name, nil)
Expect(err).To(BeNil())
_, err = containers.Inspect(bt.conn, name, new(containers.InspectOptions).WithSize(true))
Expect(err).To(BeNil())
@@ -626,7 +626,7 @@ var _ = Describe("Podman containers ", func() {
It("podman inspect stopped container with size", func() {
var name = "top"
- _, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
+ _, err := bt.RunTopContainer(&name, nil)
Expect(err).To(BeNil())
err = containers.Stop(bt.conn, name, nil)
Expect(err).To(BeNil())
@@ -643,7 +643,7 @@ var _ = Describe("Podman containers ", func() {
It("podman remove running container by name", func() {
var name = "top"
- _, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
+ _, err := bt.RunTopContainer(&name, nil)
Expect(err).To(BeNil())
// Removing running container should fail
err = containers.Remove(bt.conn, name, nil)
@@ -654,7 +654,7 @@ var _ = Describe("Podman containers ", func() {
It("podman remove running container by ID", func() {
var name = "top"
- cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
+ cid, err := bt.RunTopContainer(&name, nil)
Expect(err).To(BeNil())
// Removing running container should fail
err = containers.Remove(bt.conn, cid, nil)
@@ -665,7 +665,7 @@ var _ = Describe("Podman containers ", func() {
It("podman forcibly remove running container by name", func() {
var name = "top"
- _, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
+ _, err := bt.RunTopContainer(&name, nil)
Expect(err).To(BeNil())
// Removing running container should fail
err = containers.Remove(bt.conn, name, new(containers.RemoveOptions).WithForce(true))
@@ -676,7 +676,7 @@ var _ = Describe("Podman containers ", func() {
It("podman forcibly remove running container by ID", func() {
var name = "top"
- cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
+ cid, err := bt.RunTopContainer(&name, nil)
Expect(err).To(BeNil())
// Removing running container should fail
err = containers.Remove(bt.conn, cid, new(containers.RemoveOptions).WithForce(true))
@@ -687,7 +687,7 @@ var _ = Describe("Podman containers ", func() {
It("podman remove running container and volume by name", func() {
var name = "top"
- _, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
+ _, err := bt.RunTopContainer(&name, nil)
Expect(err).To(BeNil())
// Removing running container should fail
err = containers.Remove(bt.conn, name, new(containers.RemoveOptions).WithVolumes(true))
@@ -698,7 +698,7 @@ var _ = Describe("Podman containers ", func() {
It("podman remove running container and volume by ID", func() {
var name = "top"
- cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
+ cid, err := bt.RunTopContainer(&name, nil)
Expect(err).To(BeNil())
// Removing running container should fail
err = containers.Remove(bt.conn, cid, new(containers.RemoveOptions).WithVolumes(true))
@@ -709,7 +709,7 @@ var _ = Describe("Podman containers ", func() {
It("podman forcibly remove running container and volume by name", func() {
var name = "top"
- _, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
+ _, err := bt.RunTopContainer(&name, nil)
Expect(err).To(BeNil())
// Removing running container should fail
err = containers.Remove(bt.conn, name, new(containers.RemoveOptions).WithVolumes(true).WithForce(true))
@@ -720,7 +720,7 @@ var _ = Describe("Podman containers ", func() {
It("podman forcibly remove running container and volume by ID", func() {
var name = "top"
- cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
+ cid, err := bt.RunTopContainer(&name, nil)
Expect(err).To(BeNil())
// Removing running container should fail
err = containers.Remove(bt.conn, cid, new(containers.RemoveOptions).WithForce(true).WithVolumes(true))
@@ -732,9 +732,9 @@ var _ = Describe("Podman containers ", func() {
It("List containers with filters", func() {
var name = "top"
var name2 = "top2"
- cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
+ cid, err := bt.RunTopContainer(&name, nil)
Expect(err).To(BeNil())
- _, err = bt.RunTopContainer(&name2, bindings.PFalse, nil)
+ _, err = bt.RunTopContainer(&name2, nil)
Expect(err).To(BeNil())
s := specgen.NewSpecGenerator(alpine.name, false)
s.Terminal = true
@@ -753,7 +753,7 @@ var _ = Describe("Podman containers ", func() {
podName := "testpod"
ctrName := "testctr"
bt.Podcreate(&podName)
- _, err := bt.RunTopContainer(&ctrName, bindings.PTrue, &podName)
+ _, err := bt.RunTopContainer(&ctrName, &podName)
Expect(err).To(BeNil())
lastNum := 1
diff --git a/pkg/bindings/test/exec_test.go b/pkg/bindings/test/exec_test.go
index 7a21be77f..c10452eaf 100644
--- a/pkg/bindings/test/exec_test.go
+++ b/pkg/bindings/test/exec_test.go
@@ -4,7 +4,6 @@ import (
"time"
"github.com/containers/podman/v3/pkg/api/handlers"
- "github.com/containers/podman/v3/pkg/bindings"
"github.com/containers/podman/v3/pkg/bindings/containers"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@@ -33,7 +32,7 @@ var _ = Describe("Podman containers exec", func() {
It("Podman exec create makes an exec session", func() {
name := "testCtr"
- cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
+ cid, err := bt.RunTopContainer(&name, nil)
Expect(err).To(BeNil())
execConfig := new(handlers.ExecCreateConfig)
@@ -53,7 +52,7 @@ var _ = Describe("Podman containers exec", func() {
It("Podman exec create with bad command fails", func() {
name := "testCtr"
- _, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
+ _, err := bt.RunTopContainer(&name, nil)
Expect(err).To(BeNil())
execConfig := new(handlers.ExecCreateConfig)
diff --git a/pkg/bindings/test/images_test.go b/pkg/bindings/test/images_test.go
index db51d1e68..688bf049f 100644
--- a/pkg/bindings/test/images_test.go
+++ b/pkg/bindings/test/images_test.go
@@ -101,7 +101,7 @@ var _ = Describe("Podman images", func() {
// Start a container with alpine image
var top string = "top"
- _, err = bt.RunTopContainer(&top, bindings.PFalse, nil)
+ _, err = bt.RunTopContainer(&top, nil)
Expect(err).To(BeNil())
// we should now have a container called "top" running
containerResponse, err := containers.Inspect(bt.conn, "top", nil)
diff --git a/pkg/bindings/test/info_test.go b/pkg/bindings/test/info_test.go
index 3ca4b99b3..f61e8c370 100644
--- a/pkg/bindings/test/info_test.go
+++ b/pkg/bindings/test/info_test.go
@@ -49,17 +49,17 @@ var _ = Describe("Podman info", func() {
_, err := containers.CreateWithSpec(bt.conn, s, nil)
Expect(err).To(BeNil())
- idPause, err := bt.RunTopContainer(nil, nil, nil)
+ idPause, err := bt.RunTopContainer(nil, nil)
Expect(err).To(BeNil())
err = containers.Pause(bt.conn, idPause, nil)
Expect(err).To(BeNil())
- idStop, err := bt.RunTopContainer(nil, nil, nil)
+ idStop, err := bt.RunTopContainer(nil, nil)
Expect(err).To(BeNil())
err = containers.Stop(bt.conn, idStop, nil)
Expect(err).To(BeNil())
- _, err = bt.RunTopContainer(nil, nil, nil)
+ _, err = bt.RunTopContainer(nil, nil)
Expect(err).To(BeNil())
info, err := system.Info(bt.conn, nil)
diff --git a/pkg/bindings/test/pods_test.go b/pkg/bindings/test/pods_test.go
index 2b4eb05d3..b06ff31a2 100644
--- a/pkg/bindings/test/pods_test.go
+++ b/pkg/bindings/test/pods_test.go
@@ -63,7 +63,7 @@ var _ = Describe("Podman pods", func() {
Expect(err).To(BeNil())
// Adding an alpine container to the existing pod
- _, err = bt.RunTopContainer(nil, bindings.PTrue, &newpod)
+ _, err = bt.RunTopContainer(nil, &newpod)
Expect(err).To(BeNil())
podSummary, err = pods.List(bt.conn, nil)
// Verify no errors.
@@ -93,7 +93,7 @@ var _ = Describe("Podman pods", func() {
_, err = pods.Start(bt.conn, newpod, nil)
Expect(err).To(BeNil())
- _, err = bt.RunTopContainer(nil, bindings.PTrue, &newpod)
+ _, err = bt.RunTopContainer(nil, &newpod)
Expect(err).To(BeNil())
// Expected err with invalid filter params
@@ -179,7 +179,7 @@ var _ = Describe("Podman pods", func() {
Expect(code).To(BeNumerically("==", http.StatusNotFound))
// Adding an alpine container to the existing pod
- _, err = bt.RunTopContainer(nil, bindings.PTrue, &newpod)
+ _, err = bt.RunTopContainer(nil, &newpod)
Expect(err).To(BeNil())
// Binding needs to be modified to inspect the pod state.
diff --git a/pkg/bindings/test/system_test.go b/pkg/bindings/test/system_test.go
index a68a8099c..68e9d9301 100644
--- a/pkg/bindings/test/system_test.go
+++ b/pkg/bindings/test/system_test.go
@@ -4,7 +4,6 @@ import (
"sync"
"time"
- "github.com/containers/podman/v3/pkg/bindings"
"github.com/containers/podman/v3/pkg/bindings/containers"
"github.com/containers/podman/v3/pkg/bindings/pods"
"github.com/containers/podman/v3/pkg/bindings/system"
@@ -41,7 +40,7 @@ var _ = Describe("Podman system", func() {
It("podman events", func() {
var name = "top"
- _, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
+ _, err := bt.RunTopContainer(&name, nil)
Expect(err).To(BeNil())
filters := make(map[string][]string)
@@ -72,7 +71,7 @@ var _ = Describe("Podman system", func() {
Expect(err).To(BeNil())
// Start and stop a container to enter in exited state.
var name = "top"
- _, err = bt.RunTopContainer(&name, bindings.PFalse, nil)
+ _, err = bt.RunTopContainer(&name, nil)
Expect(err).To(BeNil())
err = containers.Stop(bt.conn, name, nil)
Expect(err).To(BeNil())
@@ -98,14 +97,14 @@ var _ = Describe("Podman system", func() {
// Start and stop a container to enter in exited state.
var name = "top"
- _, err = bt.RunTopContainer(&name, bindings.PFalse, nil)
+ _, err = bt.RunTopContainer(&name, nil)
Expect(err).To(BeNil())
err = containers.Stop(bt.conn, name, nil)
Expect(err).To(BeNil())
// Start container and leave in running
var name2 = "top2"
- _, err = bt.RunTopContainer(&name2, bindings.PFalse, nil)
+ _, err = bt.RunTopContainer(&name2, nil)
Expect(err).To(BeNil())
// Adding an unused volume
@@ -132,14 +131,14 @@ var _ = Describe("Podman system", func() {
// Start and stop a container to enter in exited state.
var name = "top"
- _, err = bt.RunTopContainer(&name, bindings.PFalse, nil)
+ _, err = bt.RunTopContainer(&name, nil)
Expect(err).To(BeNil())
err = containers.Stop(bt.conn, name, nil)
Expect(err).To(BeNil())
// Start second container and leave in running
var name2 = "top2"
- _, err = bt.RunTopContainer(&name2, bindings.PFalse, nil)
+ _, err = bt.RunTopContainer(&name2, nil)
Expect(err).To(BeNil())
// Adding an unused volume should work
@@ -167,14 +166,14 @@ var _ = Describe("Podman system", func() {
// Start and stop a container to enter in exited state.
var name = "top"
- _, err = bt.RunTopContainer(&name, bindings.PFalse, nil)
+ _, err = bt.RunTopContainer(&name, nil)
Expect(err).To(BeNil())
err = containers.Stop(bt.conn, name, nil)
Expect(err).To(BeNil())
// Start second container and leave in running
var name2 = "top2"
- _, err = bt.RunTopContainer(&name2, bindings.PFalse, nil)
+ _, err = bt.RunTopContainer(&name2, nil)
Expect(err).To(BeNil())
// Adding an unused volume should work
diff --git a/pkg/checkpoint/checkpoint_restore.go b/pkg/checkpoint/checkpoint_restore.go
index a608762b5..77a993128 100644
--- a/pkg/checkpoint/checkpoint_restore.go
+++ b/pkg/checkpoint/checkpoint_restore.go
@@ -4,15 +4,14 @@ import (
"context"
"io/ioutil"
"os"
- "path/filepath"
+ metadata "github.com/checkpoint-restore/checkpointctl/lib"
"github.com/containers/podman/v3/libpod"
"github.com/containers/podman/v3/libpod/image"
"github.com/containers/podman/v3/pkg/domain/entities"
"github.com/containers/podman/v3/pkg/errorhandling"
"github.com/containers/podman/v3/pkg/util"
"github.com/containers/storage/pkg/archive"
- jsoniter "github.com/json-iterator/go"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -20,21 +19,6 @@ import (
// Prefixing the checkpoint/restore related functions with 'cr'
-// crImportFromJSON imports the JSON files stored in the exported
-// checkpoint tarball
-func crImportFromJSON(filePath string, v interface{}) error {
- content, err := ioutil.ReadFile(filePath)
- if err != nil {
- return errors.Wrap(err, "failed to read container definition for restore")
- }
- json := jsoniter.ConfigCompatibleWithStandardLibrary
- if err = json.Unmarshal(content, v); err != nil {
- return errors.Wrapf(err, "failed to unmarshal container definition %s for restore", filePath)
- }
-
- return nil
-}
-
// CRImportCheckpoint it the function which imports the information
// from checkpoint tarball and re-creates the container from that information
func CRImportCheckpoint(ctx context.Context, runtime *libpod.Runtime, restoreOptions entities.RestoreOptions) ([]*libpod.Container, error) {
@@ -48,13 +32,13 @@ func CRImportCheckpoint(ctx context.Context, runtime *libpod.Runtime, restoreOpt
options := &archive.TarOptions{
// Here we only need the files config.dump and spec.dump
ExcludePatterns: []string{
- "checkpoint",
- "artifacts",
- "ctr.log",
- "rootfs-diff.tar",
- "network.status",
- "deleted.files",
"volumes",
+ "ctr.log",
+ "artifacts",
+ metadata.RootFsDiffTar,
+ metadata.DeletedFilesFile,
+ metadata.NetworkStatusFile,
+ metadata.CheckpointDirectory,
},
}
dir, err := ioutil.TempDir("", "checkpoint")
@@ -73,13 +57,13 @@ func CRImportCheckpoint(ctx context.Context, runtime *libpod.Runtime, restoreOpt
// Load spec.dump from temporary directory
dumpSpec := new(spec.Spec)
- if err := crImportFromJSON(filepath.Join(dir, "spec.dump"), dumpSpec); err != nil {
+ if _, err := metadata.ReadJSONFile(dumpSpec, dir, metadata.SpecDumpFile); err != nil {
return nil, err
}
// Load config.dump from temporary directory
config := new(libpod.ContainerConfig)
- if err = crImportFromJSON(filepath.Join(dir, "config.dump"), config); err != nil {
+ if _, err = metadata.ReadJSONFile(config, dir, metadata.ConfigDumpFile); err != nil {
return nil, err
}
@@ -121,7 +105,7 @@ func CRImportCheckpoint(ctx context.Context, runtime *libpod.Runtime, restoreOpt
return nil, err
}
- _, err = runtime.ImageRuntime().New(ctx, config.RootfsImageName, rtc.Engine.SignaturePolicyPath, "", writer, nil, image.SigningOptions{}, nil, util.PullImageMissing)
+ _, err = runtime.ImageRuntime().New(ctx, config.RootfsImageName, rtc.Engine.SignaturePolicyPath, "", writer, nil, image.SigningOptions{}, nil, util.PullImageMissing, nil)
if err != nil {
return nil, err
}
diff --git a/pkg/checkpoint/crutils/checkpoint_restore_utils.go b/pkg/checkpoint/crutils/checkpoint_restore_utils.go
new file mode 100644
index 000000000..53ff55865
--- /dev/null
+++ b/pkg/checkpoint/crutils/checkpoint_restore_utils.go
@@ -0,0 +1,191 @@
+package crutils
+
+import (
+ "io"
+ "os"
+ "os/exec"
+ "path/filepath"
+
+ metadata "github.com/checkpoint-restore/checkpointctl/lib"
+ "github.com/containers/storage/pkg/archive"
+ "github.com/opencontainers/selinux/go-selinux/label"
+ "github.com/pkg/errors"
+)
+
+// This file mainly exist to make the checkpoint/restore functions
+// available for other users. One possible candidate would be CRI-O.
+
+// CRImportCheckpointWithoutConfig imports the checkpoint archive (input)
+// into the directory destination without "config.dump" and "spec.dump"
+func CRImportCheckpointWithoutConfig(destination, input string) error {
+ archiveFile, err := os.Open(input)
+ if err != nil {
+ return errors.Wrapf(err, "Failed to open checkpoint archive %s for import", input)
+ }
+
+ defer archiveFile.Close()
+ options := &archive.TarOptions{
+ ExcludePatterns: []string{
+ // Import everything else besides the container config
+ metadata.ConfigDumpFile,
+ metadata.SpecDumpFile,
+ },
+ }
+ if err = archive.Untar(archiveFile, destination, options); err != nil {
+ return errors.Wrapf(err, "Unpacking of checkpoint archive %s failed", input)
+ }
+
+ return nil
+}
+
+// CRRemoveDeletedFiles loads the list of deleted files and if
+// it exists deletes all files listed.
+func CRRemoveDeletedFiles(id, baseDirectory, containerRootDirectory string) error {
+ deletedFiles, _, err := metadata.ReadContainerCheckpointDeletedFiles(baseDirectory)
+ if os.IsNotExist(errors.Unwrap(errors.Unwrap(err))) {
+ // No files to delete. Just return
+ return nil
+ }
+
+ if err != nil {
+ return errors.Wrapf(err, "failed to read deleted files file")
+ }
+
+ for _, deleteFile := range deletedFiles {
+ // Using RemoveAll as deletedFiles, which is generated from 'podman diff'
+ // lists completely deleted directories as a single entry: 'D /root'.
+ if err := os.RemoveAll(filepath.Join(containerRootDirectory, deleteFile)); err != nil {
+ return errors.Wrapf(err, "failed to delete files from container %s during restore", id)
+ }
+ }
+
+ return nil
+}
+
+// CRApplyRootFsDiffTar applies the tar archive found in baseDirectory with the
+// root file system changes on top of containerRootDirectory
+func CRApplyRootFsDiffTar(baseDirectory, containerRootDirectory string) error {
+ rootfsDiffPath := filepath.Join(baseDirectory, metadata.RootFsDiffTar)
+ if _, err := os.Stat(rootfsDiffPath); err != nil {
+ // Only do this if a rootfs-diff.tar actually exists
+ return nil
+ }
+
+ rootfsDiffFile, err := os.Open(rootfsDiffPath)
+ if err != nil {
+ return errors.Wrap(err, "failed to open root file-system diff file")
+ }
+ defer rootfsDiffFile.Close()
+
+ if err := archive.Untar(rootfsDiffFile, containerRootDirectory, nil); err != nil {
+ return errors.Wrapf(err, "failed to apply root file-system diff file %s", rootfsDiffPath)
+ }
+
+ return nil
+}
+
+// CRCreateRootFsDiffTar goes through the 'changes' and can create two files:
+// * metadata.RootFsDiffTar will contain all new and changed files
+// * metadata.DeletedFilesFile will contain a list of deleted files
+// With these two files it is possible to restore the container file system to the same
+// state it was during checkpointing.
+// Changes to directories (owner, mode) are not handled.
+func CRCreateRootFsDiffTar(changes *[]archive.Change, mountPoint, destination string) (includeFiles []string, err error) {
+ if len(*changes) == 0 {
+ return includeFiles, nil
+ }
+
+ var rootfsIncludeFiles []string
+ var deletedFiles []string
+
+ rootfsDiffPath := filepath.Join(destination, metadata.RootFsDiffTar)
+
+ for _, file := range *changes {
+ if file.Kind == archive.ChangeAdd {
+ rootfsIncludeFiles = append(rootfsIncludeFiles, file.Path)
+ continue
+ }
+ if file.Kind == archive.ChangeDelete {
+ deletedFiles = append(deletedFiles, file.Path)
+ continue
+ }
+ fileName, err := os.Stat(file.Path)
+ if err != nil {
+ continue
+ }
+ if !fileName.IsDir() && file.Kind == archive.ChangeModify {
+ rootfsIncludeFiles = append(rootfsIncludeFiles, file.Path)
+ continue
+ }
+ }
+
+ if len(rootfsIncludeFiles) > 0 {
+ rootfsTar, err := archive.TarWithOptions(mountPoint, &archive.TarOptions{
+ Compression: archive.Uncompressed,
+ IncludeSourceDir: true,
+ IncludeFiles: rootfsIncludeFiles,
+ })
+ if err != nil {
+ return includeFiles, errors.Wrapf(err, "error exporting root file-system diff to %q", rootfsDiffPath)
+ }
+ rootfsDiffFile, err := os.Create(rootfsDiffPath)
+ if err != nil {
+ return includeFiles, errors.Wrapf(err, "error creating root file-system diff file %q", rootfsDiffPath)
+ }
+ defer rootfsDiffFile.Close()
+ if _, err = io.Copy(rootfsDiffFile, rootfsTar); err != nil {
+ return includeFiles, err
+ }
+
+ includeFiles = append(includeFiles, metadata.RootFsDiffTar)
+ }
+
+ if len(deletedFiles) == 0 {
+ return includeFiles, nil
+ }
+
+ if _, err := metadata.WriteJSONFile(deletedFiles, destination, metadata.DeletedFilesFile); err != nil {
+ return includeFiles, nil
+ }
+
+ includeFiles = append(includeFiles, metadata.DeletedFilesFile)
+
+ return includeFiles, nil
+}
+
+// CRCreateFileWithLabel creates an empty file and sets the corresponding ('fileLabel')
+// SELinux label on the file.
+// This is necessary for CRIU log files because CRIU infects the processes in
+// the container with a 'parasite' and this will also try to write to the log files
+// from the context of the container processes.
+func CRCreateFileWithLabel(directory, fileName, fileLabel string) error {
+ logFileName := filepath.Join(directory, fileName)
+
+ logFile, err := os.OpenFile(logFileName, os.O_CREATE, 0o600)
+ if err != nil {
+ return errors.Wrapf(err, "failed to create file %q", logFileName)
+ }
+ defer logFile.Close()
+ if err = label.SetFileLabel(logFileName, fileLabel); err != nil {
+ return errors.Wrapf(err, "failed to label file %q", logFileName)
+ }
+
+ return nil
+}
+
+// CRRuntimeSupportsCheckpointRestore tests if the given runtime at 'runtimePath'
+// supports checkpointing. The checkpoint restore interface has no definition
+// but crun implements all commands just as runc does. Whathh runc does it the
+// official definition of the checkpoint/restore interface.
+func CRRuntimeSupportsCheckpointRestore(runtimePath string) bool {
+ // Check if the runtime implements checkpointing. Currently only
+ // runc's and crun's checkpoint/restore implementation is supported.
+ cmd := exec.Command(runtimePath, "checkpoint", "--help")
+ if err := cmd.Start(); err != nil {
+ return false
+ }
+ if err := cmd.Wait(); err == nil {
+ return true
+ }
+ return false
+}
diff --git a/pkg/copy/fileinfo.go b/pkg/copy/fileinfo.go
index b95bcd90c..fb711311c 100644
--- a/pkg/copy/fileinfo.go
+++ b/pkg/copy/fileinfo.go
@@ -7,8 +7,8 @@ import (
"os"
"path/filepath"
"strings"
- "time"
+ "github.com/containers/podman/v3/libpod/define"
"github.com/pkg/errors"
)
@@ -22,14 +22,7 @@ var ErrENOENT = errors.New("No such file or directory")
// FileInfo describes a file or directory and is returned by
// (*CopyItem).Stat().
-type FileInfo struct {
- Name string `json:"name"`
- Size int64 `json:"size"`
- Mode os.FileMode `json:"mode"`
- ModTime time.Time `json:"mtime"`
- IsDir bool `json:"isDir"`
- LinkTarget string `json:"linkTarget"`
-}
+type FileInfo = define.FileInfo
// EncodeFileInfo serializes the specified FileInfo as a base64 encoded JSON
// payload. Intended for Docker compat.
diff --git a/pkg/domain/entities/containers.go b/pkg/domain/entities/containers.go
index ac965834a..7d074f89d 100644
--- a/pkg/domain/entities/containers.go
+++ b/pkg/domain/entities/containers.go
@@ -8,7 +8,6 @@ import (
"github.com/containers/image/v5/types"
"github.com/containers/podman/v3/libpod/define"
- "github.com/containers/podman/v3/pkg/copy"
"github.com/containers/podman/v3/pkg/specgen"
"github.com/cri-o/ocicni/pkg/ocicni"
)
@@ -145,7 +144,7 @@ type ContainerInspectReport struct {
}
type ContainerStatReport struct {
- copy.FileInfo
+ define.FileInfo
}
type CommitOptions struct {
diff --git a/pkg/domain/entities/engine.go b/pkg/domain/entities/engine.go
index f23d964e5..af996ad1e 100644
--- a/pkg/domain/entities/engine.go
+++ b/pkg/domain/entities/engine.go
@@ -1,11 +1,7 @@
package entities
import (
- "context"
- "io"
-
"github.com/containers/common/pkg/config"
- "github.com/opentracing/opentracing-go"
"github.com/spf13/pflag"
)
@@ -37,22 +33,19 @@ type PodmanConfig struct {
*config.Config
*pflag.FlagSet
- CGroupUsage string // rootless code determines Usage message
- ConmonPath string // --conmon flag will set Engine.ConmonPath
- CPUProfile string // Hidden: Should CPU profile be taken
- EngineMode EngineMode // ABI or Tunneling mode
- Identity string // ssh identity for connecting to server
- MaxWorks int // maximum number of parallel threads
- RegistriesConf string // allows for specifying a custom registries.conf
- Remote bool // Connection to Podman API Service will use RESTful API
- RuntimePath string // --runtime flag will set Engine.RuntimePath
- RuntimeFlags []string // global flags for the container runtime
- Span opentracing.Span // tracing object
- SpanCloser io.Closer // Close() for tracing object
- SpanCtx context.Context // context to use when tracing
- Syslog bool // write to StdOut and Syslog, not supported when tunneling
- Trace bool // Hidden: Trace execution
- URI string // URI to RESTful API Service
+ CGroupUsage string // rootless code determines Usage message
+ ConmonPath string // --conmon flag will set Engine.ConmonPath
+ CPUProfile string // Hidden: Should CPU profile be taken
+ EngineMode EngineMode // ABI or Tunneling mode
+ Identity string // ssh identity for connecting to server
+ MaxWorks int // maximum number of parallel threads
+ RegistriesConf string // allows for specifying a custom registries.conf
+ Remote bool // Connection to Podman API Service will use RESTful API
+ RuntimePath string // --runtime flag will set Engine.RuntimePath
+ RuntimeFlags []string // global flags for the container runtime
+ Syslog bool // write to StdOut and Syslog, not supported when tunneling
+ Trace bool // Hidden: Trace execution
+ URI string // URI to RESTful API Service
Runroot string
StorageDriver string
diff --git a/pkg/domain/entities/images.go b/pkg/domain/entities/images.go
index 19109f873..7999d8209 100644
--- a/pkg/domain/entities/images.go
+++ b/pkg/domain/entities/images.go
@@ -203,6 +203,8 @@ type ImagePushOptions struct {
SignBy string
// SkipTLSVerify to skip HTTPS and certificate verification.
SkipTLSVerify types.OptionalBool
+ // Progress to get progress notifications
+ Progress chan types.ProgressProperties
}
// ImageSearchOptions are the arguments for searching images.
diff --git a/pkg/domain/entities/secrets.go b/pkg/domain/entities/secrets.go
index 3481cbe05..8ede981da 100644
--- a/pkg/domain/entities/secrets.go
+++ b/pkg/domain/entities/secrets.go
@@ -42,6 +42,15 @@ type SecretInfoReport struct {
Spec SecretSpec
}
+type SecretInfoReportCompat struct {
+ SecretInfoReport
+ Version SecretVersion
+}
+
+type SecretVersion struct {
+ Index int
+}
+
type SecretSpec struct {
Name string
Driver SecretDriverSpec
@@ -78,6 +87,13 @@ type SwagSecretListResponse struct {
Body []*SecretInfoReport
}
+// Secret list response
+// swagger:response SecretListCompatResponse
+type SwagSecretListCompatResponse struct {
+ // in:body
+ Body []*SecretInfoReportCompat
+}
+
// Secret inspect response
// swagger:response SecretInspectResponse
type SwagSecretInspectResponse struct {
@@ -85,6 +101,13 @@ type SwagSecretInspectResponse struct {
Body SecretInfoReport
}
+// Secret inspect compat
+// swagger:response SecretInspectCompatResponse
+type SwagSecretInspectCompatResponse struct {
+ // in:body
+ Body SecretInfoReportCompat
+}
+
// No such secret
// swagger:response NoSuchSecret
type SwagErrNoSuchSecret struct {
diff --git a/pkg/domain/infra/abi/archive.go b/pkg/domain/infra/abi/archive.go
index 528771ee7..2ea63aa5e 100644
--- a/pkg/domain/infra/abi/archive.go
+++ b/pkg/domain/infra/abi/archive.go
@@ -3,72 +3,16 @@ package abi
import (
"context"
"io"
- "path/filepath"
- "strings"
- buildahCopiah "github.com/containers/buildah/copier"
- "github.com/containers/buildah/pkg/chrootuser"
- "github.com/containers/buildah/util"
- "github.com/containers/podman/v3/libpod"
"github.com/containers/podman/v3/pkg/domain/entities"
- "github.com/containers/storage"
- "github.com/containers/storage/pkg/archive"
- "github.com/containers/storage/pkg/idtools"
- "github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
- "github.com/sirupsen/logrus"
)
-// NOTE: Only the parent directory of the container path must exist. The path
-// itself may be created while copying.
func (ic *ContainerEngine) ContainerCopyFromArchive(ctx context.Context, nameOrID string, containerPath string, reader io.Reader) (entities.ContainerCopyFunc, error) {
container, err := ic.Libpod.LookupContainer(nameOrID)
if err != nil {
return nil, err
}
-
- containerMountPoint, err := container.Mount()
- if err != nil {
- return nil, err
- }
-
- unmount := func() {
- if err := container.Unmount(false); err != nil {
- logrus.Errorf("Error unmounting container: %v", err)
- }
- }
-
- _, resolvedRoot, resolvedContainerPath, err := ic.containerStat(container, containerMountPoint, containerPath)
- if err != nil {
- unmount()
- return nil, err
- }
-
- decompressed, err := archive.DecompressStream(reader)
- if err != nil {
- unmount()
- return nil, err
- }
-
- idMappings, idPair, err := getIDMappingsAndPair(container, resolvedRoot)
- if err != nil {
- unmount()
- return nil, err
- }
-
- logrus.Debugf("Container copy *to* %q (resolved: %q) on container %q (ID: %s)", containerPath, resolvedContainerPath, container.Name(), container.ID())
-
- return func() error {
- defer unmount()
- defer decompressed.Close()
- putOptions := buildahCopiah.PutOptions{
- UIDMap: idMappings.UIDMap,
- GIDMap: idMappings.GIDMap,
- ChownDirs: idPair,
- ChownFiles: idPair,
- }
- return buildahCopiah.Put(resolvedRoot, resolvedContainerPath, putOptions, decompressed)
- }, nil
+ return container.CopyFromArchive(ctx, containerPath, reader)
}
func (ic *ContainerEngine) ContainerCopyToArchive(ctx context.Context, nameOrID string, containerPath string, writer io.Writer) (entities.ContainerCopyFunc, error) {
@@ -76,108 +20,5 @@ func (ic *ContainerEngine) ContainerCopyToArchive(ctx context.Context, nameOrID
if err != nil {
return nil, err
}
-
- containerMountPoint, err := container.Mount()
- if err != nil {
- return nil, err
- }
-
- unmount := func() {
- if err := container.Unmount(false); err != nil {
- logrus.Errorf("Error unmounting container: %v", err)
- }
- }
-
- // Make sure that "/" copies the *contents* of the mount point and not
- // the directory.
- if containerPath == "/" {
- containerPath = "/."
- }
-
- statInfo, resolvedRoot, resolvedContainerPath, err := ic.containerStat(container, containerMountPoint, containerPath)
- if err != nil {
- unmount()
- return nil, err
- }
-
- idMappings, idPair, err := getIDMappingsAndPair(container, resolvedRoot)
- if err != nil {
- unmount()
- return nil, err
- }
-
- logrus.Debugf("Container copy *from* %q (resolved: %q) on container %q (ID: %s)", containerPath, resolvedContainerPath, container.Name(), container.ID())
-
- return func() error {
- defer container.Unmount(false)
- getOptions := buildahCopiah.GetOptions{
- // Unless the specified points to ".", we want to copy the base directory.
- KeepDirectoryNames: statInfo.IsDir && filepath.Base(containerPath) != ".",
- UIDMap: idMappings.UIDMap,
- GIDMap: idMappings.GIDMap,
- ChownDirs: idPair,
- ChownFiles: idPair,
- }
- return buildahCopiah.Get(resolvedRoot, "", getOptions, []string{resolvedContainerPath}, writer)
- }, nil
-}
-
-// getIDMappingsAndPair returns the ID mappings for the container and the host
-// ID pair.
-func getIDMappingsAndPair(container *libpod.Container, containerMount string) (*storage.IDMappingOptions, *idtools.IDPair, error) {
- user, err := getContainerUser(container, containerMount)
- if err != nil {
- return nil, nil, err
- }
-
- idMappingOpts, err := container.IDMappings()
- if err != nil {
- return nil, nil, err
- }
-
- hostUID, hostGID, err := util.GetHostIDs(idtoolsToRuntimeSpec(idMappingOpts.UIDMap), idtoolsToRuntimeSpec(idMappingOpts.GIDMap), user.UID, user.GID)
- if err != nil {
- return nil, nil, err
- }
-
- idPair := idtools.IDPair{UID: int(hostUID), GID: int(hostGID)}
- return &idMappingOpts, &idPair, nil
-}
-
-// getContainerUser returns the specs.User of the container.
-func getContainerUser(container *libpod.Container, mountPoint string) (specs.User, error) {
- userspec := container.Config().User
-
- uid, gid, _, err := chrootuser.GetUser(mountPoint, userspec)
- u := specs.User{
- UID: uid,
- GID: gid,
- Username: userspec,
- }
-
- if !strings.Contains(userspec, ":") {
- groups, err2 := chrootuser.GetAdditionalGroupsForUser(mountPoint, uint64(u.UID))
- if err2 != nil {
- if errors.Cause(err2) != chrootuser.ErrNoSuchUser && err == nil {
- err = err2
- }
- } else {
- u.AdditionalGids = groups
- }
- }
-
- return u, err
-}
-
-// idtoolsToRuntimeSpec converts idtools ID mapping to the one of the runtime spec.
-func idtoolsToRuntimeSpec(idMaps []idtools.IDMap) (convertedIDMap []specs.LinuxIDMapping) {
- for _, idmap := range idMaps {
- tempIDMap := specs.LinuxIDMapping{
- ContainerID: uint32(idmap.ContainerID),
- HostID: uint32(idmap.HostID),
- Size: uint32(idmap.Size),
- }
- convertedIDMap = append(convertedIDMap, tempIDMap)
- }
- return convertedIDMap
+ return container.CopyToArchive(ctx, containerPath, writer)
}
diff --git a/pkg/domain/infra/abi/containers.go b/pkg/domain/infra/abi/containers.go
index 4790bd58c..637531ee9 100644
--- a/pkg/domain/infra/abi/containers.go
+++ b/pkg/domain/infra/abi/containers.go
@@ -301,14 +301,14 @@ func (ic *ContainerEngine) ContainerRm(ctx context.Context, namesOrIds []string,
for _, ctr := range names {
logrus.Debugf("Evicting container %q", ctr)
report := entities.RmReport{Id: ctr}
- id, err := ic.Libpod.EvictContainer(ctx, ctr, options.Volumes)
+ _, err := ic.Libpod.EvictContainer(ctx, ctr, options.Volumes)
if err != nil {
if options.Ignore && errors.Cause(err) == define.ErrNoSuchCtr {
logrus.Debugf("Ignoring error (--allow-missing): %v", err)
reports = append(reports, &report)
continue
}
- report.Err = errors.Wrapf(err, "failed to evict container: %q", id)
+ report.Err = err
reports = append(reports, &report)
continue
}
diff --git a/pkg/domain/infra/abi/containers_stat.go b/pkg/domain/infra/abi/containers_stat.go
index 1baeb9178..98a23c70b 100644
--- a/pkg/domain/infra/abi/containers_stat.go
+++ b/pkg/domain/infra/abi/containers_stat.go
@@ -2,139 +2,20 @@ package abi
import (
"context"
- "os"
- "path/filepath"
- "strings"
- buildahCopiah "github.com/containers/buildah/copier"
- "github.com/containers/podman/v3/libpod"
- "github.com/containers/podman/v3/pkg/copy"
"github.com/containers/podman/v3/pkg/domain/entities"
- "github.com/pkg/errors"
- "github.com/sirupsen/logrus"
)
-func (ic *ContainerEngine) containerStat(container *libpod.Container, containerMountPoint string, containerPath string) (*entities.ContainerStatReport, string, string, error) {
- // Make sure that "/" copies the *contents* of the mount point and not
- // the directory.
- if containerPath == "/" {
- containerPath += "/."
- }
-
- // Now resolve the container's path. It may hit a volume, it may hit a
- // bind mount, it may be relative.
- resolvedRoot, resolvedContainerPath, err := container.ResolvePath(context.Background(), containerMountPoint, containerPath)
- if err != nil {
- return nil, "", "", err
- }
-
- statInfo, statInfoErr := secureStat(resolvedRoot, resolvedContainerPath)
- if statInfoErr != nil {
- // Not all errors from secureStat map to ErrNotExist, so we
- // have to look into the error string. Turning it into an
- // ENOENT let's the API handlers return the correct status code
- // which is crucial for the remote client.
- if os.IsNotExist(err) || strings.Contains(statInfoErr.Error(), "o such file or directory") {
- statInfoErr = copy.ErrENOENT
- }
- // If statInfo is nil, there's nothing we can do anymore. A
- // non-nil statInfo may indicate a symlink where we must have
- // a closer look.
- if statInfo == nil {
- return nil, "", "", statInfoErr
- }
- }
-
- // Now make sure that the info's LinkTarget is relative to the
- // container's mount.
- var absContainerPath string
-
- if statInfo.IsSymlink {
- // Evaluated symlinks are always relative to the container's mount point.
- absContainerPath = statInfo.ImmediateTarget
- } else if strings.HasPrefix(resolvedContainerPath, containerMountPoint) {
- // If the path is on the container's mount point, strip it off.
- absContainerPath = strings.TrimPrefix(resolvedContainerPath, containerMountPoint)
- absContainerPath = filepath.Join("/", absContainerPath)
- } else {
- // No symlink and not on the container's mount point, so let's
- // move it back to the original input. It must have evaluated
- // to a volume or bind mount but we cannot return host paths.
- absContainerPath = containerPath
- }
-
- // Now we need to make sure to preserve the base path as specified by
- // the user. The `filepath` packages likes to remove trailing slashes
- // and dots that are crucial to the copy logic.
- absContainerPath = copy.PreserveBasePath(containerPath, absContainerPath)
- resolvedContainerPath = copy.PreserveBasePath(containerPath, resolvedContainerPath)
-
- info := copy.FileInfo{
- IsDir: statInfo.IsDir,
- Name: filepath.Base(absContainerPath),
- Size: statInfo.Size,
- Mode: statInfo.Mode,
- ModTime: statInfo.ModTime,
- LinkTarget: absContainerPath,
- }
-
- return &entities.ContainerStatReport{FileInfo: info}, resolvedRoot, resolvedContainerPath, statInfoErr
-}
-
func (ic *ContainerEngine) ContainerStat(ctx context.Context, nameOrID string, containerPath string) (*entities.ContainerStatReport, error) {
container, err := ic.Libpod.LookupContainer(nameOrID)
if err != nil {
return nil, err
}
- containerMountPoint, err := container.Mount()
- if err != nil {
- return nil, err
- }
-
- defer func() {
- if err := container.Unmount(false); err != nil {
- logrus.Errorf("Error unmounting container: %v", err)
- }
- }()
-
- statReport, _, _, err := ic.containerStat(container, containerMountPoint, containerPath)
- return statReport, err
-}
-
-// secureStat extracts file info for path in a chroot'ed environment in root.
-func secureStat(root string, path string) (*buildahCopiah.StatForItem, error) {
- var glob string
- var err error
-
- // If root and path are equal, then dir must be empty and the glob must
- // be ".".
- if filepath.Clean(root) == filepath.Clean(path) {
- glob = "."
- } else {
- glob, err = filepath.Rel(root, path)
- if err != nil {
- return nil, err
- }
- }
-
- globStats, err := buildahCopiah.Stat(root, "", buildahCopiah.StatOptions{}, []string{glob})
- if err != nil {
- return nil, err
- }
-
- if len(globStats) != 1 {
- return nil, errors.Errorf("internal error: secureStat: expected 1 item but got %d", len(globStats))
- }
-
- stat, exists := globStats[0].Results[glob] // only one glob passed, so that's okay
- if !exists {
- return nil, copy.ErrENOENT
- }
+ info, err := container.Stat(ctx, containerPath)
- var statErr error
- if stat.Error != "" {
- statErr = errors.New(stat.Error)
+ if info != nil {
+ return &entities.ContainerStatReport{FileInfo: *info}, err
}
- return stat, statErr
+ return nil, err
}
diff --git a/pkg/domain/infra/abi/images.go b/pkg/domain/infra/abi/images.go
index 46d967789..c02eb2bfc 100644
--- a/pkg/domain/infra/abi/images.go
+++ b/pkg/domain/infra/abi/images.go
@@ -247,7 +247,7 @@ func pull(ctx context.Context, runtime *image.Runtime, rawImage string, options
}
if !options.AllTags {
- newImage, err := runtime.New(ctx, rawImage, options.SignaturePolicy, options.Authfile, writer, &dockerRegistryOptions, image.SigningOptions{}, label, options.PullPolicy)
+ newImage, err := runtime.New(ctx, rawImage, options.SignaturePolicy, options.Authfile, writer, &dockerRegistryOptions, image.SigningOptions{}, label, options.PullPolicy, nil)
if err != nil {
return nil, err
}
@@ -280,7 +280,7 @@ func pull(ctx context.Context, runtime *image.Runtime, rawImage string, options
foundIDs := []string{}
for _, tag := range tags {
name := rawImage + ":" + tag
- newImage, err := runtime.New(ctx, name, options.SignaturePolicy, options.Authfile, writer, &dockerRegistryOptions, image.SigningOptions{}, nil, util.PullImageAlways)
+ newImage, err := runtime.New(ctx, name, options.SignaturePolicy, options.Authfile, writer, &dockerRegistryOptions, image.SigningOptions{}, nil, util.PullImageAlways, nil)
if err != nil {
logrus.Errorf("error pulling image %q", name)
continue
@@ -376,7 +376,8 @@ func (ir *ImageEngine) Push(ctx context.Context, source string, destination stri
options.Compress,
signOptions,
&dockerRegistryOptions,
- nil)
+ nil,
+ options.Progress)
if err != nil && errors.Cause(err) != storage.ErrImageUnknown {
// Image might be a manifest list so attempt a manifest push
if _, manifestErr := ir.ManifestPush(ctx, source, destination, options); manifestErr == nil {
@@ -583,8 +584,9 @@ func (ir *ImageEngine) Remove(ctx context.Context, images []string, opts entitie
report.Deleted = append(report.Deleted, results.Deleted)
report.Untagged = append(report.Untagged, results.Untagged...)
return nil
- case storage.ErrImageUnknown:
- // The image must have been removed already (see #6510).
+ case storage.ErrImageUnknown, storage.ErrLayerUnknown:
+ // The image must have been removed already (see #6510)
+ // or the storage is corrupted (see #9617).
report.Deleted = append(report.Deleted, img.ID())
report.Untagged = append(report.Untagged, img.ID())
return nil
@@ -638,6 +640,10 @@ func (ir *ImageEngine) Remove(ctx context.Context, images []string, opts entitie
for _, id := range images {
img, err := ir.Libpod.ImageRuntime().NewFromLocal(id)
if err != nil {
+ // attempt to remove image from storage
+ if forceErr := ir.Libpod.RemoveImageFromStorage(id); forceErr == nil {
+ continue
+ }
rmErrors = append(rmErrors, err)
continue
}
diff --git a/pkg/domain/infra/abi/network.go b/pkg/domain/infra/abi/network.go
index 50a74032c..edde8ece6 100644
--- a/pkg/domain/infra/abi/network.go
+++ b/pkg/domain/infra/abi/network.go
@@ -96,7 +96,15 @@ func (ic *ContainerEngine) NetworkRm(ctx context.Context, namesOrIds []string, o
}
// We need to iterate containers looking to see if they belong to the given network
for _, c := range containers {
- if util.StringInSlice(name, c.Config().Networks) {
+ networks, _, err := c.Networks()
+ // if container vanished or network does not exist, go to next container
+ if errors.Is(err, define.ErrNoSuchNetwork) || errors.Is(err, define.ErrNoSuchCtr) {
+ continue
+ }
+ if err != nil {
+ return reports, err
+ }
+ if util.StringInSlice(name, networks) {
// if user passes force, we nuke containers and pods
if !options.Force {
// Without the force option, we return an error
diff --git a/pkg/domain/infra/abi/play.go b/pkg/domain/infra/abi/play.go
index c5e20a607..b7ca69281 100644
--- a/pkg/domain/infra/abi/play.go
+++ b/pkg/domain/infra/abi/play.go
@@ -221,7 +221,7 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
}
// This ensures the image is the image store
- newImage, err := ic.Libpod.ImageRuntime().New(ctx, container.Image, options.SignaturePolicy, options.Authfile, writer, &dockerRegistryOptions, image.SigningOptions{}, nil, pullPolicy)
+ newImage, err := ic.Libpod.ImageRuntime().New(ctx, container.Image, options.SignaturePolicy, options.Authfile, writer, &dockerRegistryOptions, image.SigningOptions{}, nil, pullPolicy, nil)
if err != nil {
return nil, err
}
diff --git a/pkg/registries/registries.go b/pkg/registries/registries.go
index bf5dee2ce..34c9138e3 100644
--- a/pkg/registries/registries.go
+++ b/pkg/registries/registries.go
@@ -24,7 +24,10 @@ var userRegistriesFile = filepath.Join(os.Getenv("HOME"), ".config/containers/re
// FIXME: This should be centralized in a global SystemContext initializer inherited throughout the code,
// not haphazardly called throughout the way it is being called now.
func SystemRegistriesConfPath() string {
- if envOverride := os.Getenv("REGISTRIES_CONFIG_PATH"); len(envOverride) > 0 {
+ if envOverride, ok := os.LookupEnv("CONTAINERS_REGISTRIES_CONF"); ok {
+ return envOverride
+ }
+ if envOverride, ok := os.LookupEnv("REGISTRIES_CONFIG_PATH"); ok {
return envOverride
}
diff --git a/pkg/specgen/generate/config_linux.go b/pkg/specgen/generate/config_linux.go
index 2792d0cb7..5c945cff3 100644
--- a/pkg/specgen/generate/config_linux.go
+++ b/pkg/specgen/generate/config_linux.go
@@ -8,6 +8,7 @@ import (
"path/filepath"
"strings"
+ "github.com/containers/podman/v3/libpod/define"
"github.com/containers/podman/v3/pkg/rootless"
"github.com/containers/podman/v3/pkg/util"
spec "github.com/opencontainers/runtime-spec/specs-go"
@@ -37,7 +38,7 @@ func addPrivilegedDevices(g *generate.Generator) error {
for _, d := range hostDevices {
devMnt := spec.Mount{
Destination: d.Path,
- Type: TypeBind,
+ Type: define.TypeBind,
Source: d.Path,
Options: []string{"slave", "nosuid", "noexec", "rw", "rbind"},
}
@@ -259,7 +260,7 @@ func addDevice(g *generate.Generator, device string) error {
}
devMnt := spec.Mount{
Destination: dst,
- Type: TypeBind,
+ Type: define.TypeBind,
Source: src,
Options: []string{"slave", "nosuid", "noexec", perm, "rbind"},
}
diff --git a/pkg/specgen/generate/oci.go b/pkg/specgen/generate/oci.go
index 23a9ce831..4eae09a5e 100644
--- a/pkg/specgen/generate/oci.go
+++ b/pkg/specgen/generate/oci.go
@@ -2,12 +2,14 @@ package generate
import (
"context"
+ "path"
"strings"
"github.com/containers/common/pkg/config"
"github.com/containers/podman/v3/libpod"
"github.com/containers/podman/v3/libpod/define"
"github.com/containers/podman/v3/libpod/image"
+ "github.com/containers/podman/v3/pkg/cgroups"
"github.com/containers/podman/v3/pkg/rootless"
"github.com/containers/podman/v3/pkg/specgen"
spec "github.com/opencontainers/runtime-spec/specs-go"
@@ -157,8 +159,32 @@ func canMountSys(isRootless, isNewUserns bool, s *specgen.SpecGenerator) bool {
return true
}
+func getCGroupPermissons(unmask []string) string {
+ ro := "ro"
+ rw := "rw"
+ cgroup := "/sys/fs/cgroup"
+
+ cgroupv2, _ := cgroups.IsCgroup2UnifiedMode()
+ if !cgroupv2 {
+ return ro
+ }
+
+ if unmask != nil && unmask[0] == "ALL" {
+ return rw
+ }
+
+ for _, p := range unmask {
+ if path.Clean(p) == cgroup {
+ return rw
+ }
+ }
+ return ro
+}
+
+// SpecGenToOCI returns the base configuration for the container.
func SpecGenToOCI(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Runtime, rtc *config.Config, newImage *image.Image, mounts []spec.Mount, pod *libpod.Pod, finalCmd []string) (*spec.Spec, error) {
- cgroupPerm := "ro"
+ cgroupPerm := getCGroupPermissons(s.Unmask)
+
g, err := generate.New("linux")
if err != nil {
return nil, err
@@ -251,7 +277,7 @@ func SpecGenToOCI(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Runt
g.RemoveMount("/proc")
procMount := spec.Mount{
Destination: "/proc",
- Type: TypeBind,
+ Type: define.TypeBind,
Source: "/proc",
Options: []string{"rbind", "nosuid", "noexec", "nodev"},
}
diff --git a/pkg/specgen/generate/storage.go b/pkg/specgen/generate/storage.go
index 0bb1421f6..e135f4728 100644
--- a/pkg/specgen/generate/storage.go
+++ b/pkg/specgen/generate/storage.go
@@ -10,6 +10,7 @@ import (
"github.com/containers/common/pkg/config"
"github.com/containers/podman/v3/libpod"
+ "github.com/containers/podman/v3/libpod/define"
"github.com/containers/podman/v3/libpod/image"
"github.com/containers/podman/v3/pkg/specgen"
"github.com/containers/podman/v3/pkg/util"
@@ -18,16 +19,6 @@ import (
"github.com/sirupsen/logrus"
)
-// TODO unify this in one place - maybe libpod/define
-const (
- // TypeBind is the type for mounting host dir
- TypeBind = "bind"
- // TypeVolume is the type for named volumes
- TypeVolume = "volume"
- // TypeTmpfs is the type for mounting tmpfs
- TypeTmpfs = "tmpfs"
-)
-
var (
errDuplicateDest = errors.Errorf("duplicate mount destination")
)
@@ -156,7 +147,7 @@ func finalizeMounts(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Ru
// Final step: maps to arrays
finalMounts := make([]spec.Mount, 0, len(baseMounts))
for _, mount := range baseMounts {
- if mount.Type == TypeBind {
+ if mount.Type == define.TypeBind {
absSrc, err := filepath.Abs(mount.Source)
if err != nil {
return nil, nil, nil, errors.Wrapf(err, "error getting absolute path of %s", mount.Source)
@@ -208,8 +199,8 @@ func getImageVolumes(ctx context.Context, img *image.Image, s *specgen.SpecGener
case "tmpfs":
mount := spec.Mount{
Destination: cleanDest,
- Source: TypeTmpfs,
- Type: TypeTmpfs,
+ Source: define.TypeTmpfs,
+ Type: define.TypeTmpfs,
Options: []string{"rprivate", "rw", "nodev", "exec"},
}
mounts[cleanDest] = mount
@@ -277,7 +268,7 @@ func getVolumesFrom(volumesFrom []string, runtime *libpod.Runtime) (map[string]s
return nil, nil, errors.Errorf("error retrieving container %s spec for volumes-from", ctr.ID())
}
for _, mnt := range spec.Mounts {
- if mnt.Type != TypeBind {
+ if mnt.Type != define.TypeBind {
continue
}
if _, exists := userVolumes[mnt.Destination]; exists {
@@ -338,9 +329,9 @@ func getVolumesFrom(volumesFrom []string, runtime *libpod.Runtime) (map[string]s
func addContainerInitBinary(s *specgen.SpecGenerator, path string) (spec.Mount, error) {
mount := spec.Mount{
Destination: "/dev/init",
- Type: TypeBind,
+ Type: define.TypeBind,
Source: path,
- Options: []string{TypeBind, "ro"},
+ Options: []string{define.TypeBind, "ro"},
}
if path == "" {
@@ -393,13 +384,13 @@ func SupersedeUserMounts(mounts []spec.Mount, configMount []spec.Mount) []spec.M
func InitFSMounts(mounts []spec.Mount) error {
for i, m := range mounts {
switch {
- case m.Type == TypeBind:
+ case m.Type == define.TypeBind:
opts, err := util.ProcessOptions(m.Options, false, m.Source)
if err != nil {
return err
}
mounts[i].Options = opts
- case m.Type == TypeTmpfs && filepath.Clean(m.Destination) != "/dev":
+ case m.Type == define.TypeTmpfs && filepath.Clean(m.Destination) != "/dev":
opts, err := util.ProcessOptions(m.Options, true, "")
if err != nil {
return err
diff --git a/pkg/terminal/util.go b/pkg/terminal/util.go
index 231b47974..04e12f6b3 100644
--- a/pkg/terminal/util.go
+++ b/pkg/terminal/util.go
@@ -10,11 +10,11 @@ import (
"path/filepath"
"sync"
+ "github.com/containers/storage/pkg/homedir"
"github.com/sirupsen/logrus"
"golang.org/x/crypto/ssh"
"golang.org/x/crypto/ssh/knownhosts"
"golang.org/x/crypto/ssh/terminal"
- "k8s.io/client-go/util/homedir"
)
var (
@@ -105,7 +105,7 @@ func ReadLogin() []byte {
func HostKey(host string) ssh.PublicKey {
// parse OpenSSH known_hosts file
// ssh or use ssh-keyscan to get initial key
- knownHosts := filepath.Join(homedir.HomeDir(), ".ssh", "known_hosts")
+ knownHosts := filepath.Join(homedir.Get(), ".ssh", "known_hosts")
fd, err := os.Open(knownHosts)
if err != nil {
logrus.Error(err)
diff --git a/pkg/tracing/tracing.go b/pkg/tracing/tracing.go
deleted file mode 100644
index 5be24faaa..000000000
--- a/pkg/tracing/tracing.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package tracing
-
-import (
- "fmt"
- "io"
-
- "github.com/opentracing/opentracing-go"
- "github.com/uber/jaeger-client-go"
- "github.com/uber/jaeger-client-go/config"
-)
-
-// Init returns an instance of Jaeger Tracer that samples 100% of traces and logs all spans to stdout.
-func Init(service string) (opentracing.Tracer, io.Closer) {
- cfg := &config.Configuration{
- ServiceName: service,
- Sampler: &config.SamplerConfig{
- Type: "const",
- Param: 1,
- },
- Reporter: &config.ReporterConfig{
- LogSpans: true,
- },
- }
- tracer, closer, err := cfg.NewTracer(config.Logger(jaeger.StdLogger))
- if err != nil {
- panic(fmt.Sprintf("ERROR: cannot init Jaeger: %v\n", err))
- }
- return tracer, closer
-}
diff --git a/test/apiv2/01-basic.at b/test/apiv2/01-basic.at
index 1ddf49c6f..1357e0ca6 100644
--- a/test/apiv2/01-basic.at
+++ b/test/apiv2/01-basic.at
@@ -18,7 +18,7 @@ t HEAD libpod/_ping 200
for i in /version version; do
t GET $i 200 \
.Components[0].Name="Podman Engine" \
- .Components[0].Details.APIVersion=3.0.0 \
+ .Components[0].Details.APIVersion=3.1.0-dev \
.Components[0].Details.MinAPIVersion=3.0.0 \
.Components[0].Details.Os=linux \
.ApiVersion=1.40 \
diff --git a/test/apiv2/10-images.at b/test/apiv2/10-images.at
index a650cf958..f866422e2 100644
--- a/test/apiv2/10-images.at
+++ b/test/apiv2/10-images.at
@@ -41,7 +41,7 @@ t GET images/$iid/json 200 \
.Id=sha256:$iid \
.RepoTags[0]=$IMAGE
-t POST "images/create?fromImage=alpine" '' 200 .error=null .status~".*Download complete.*"
+t POST "images/create?fromImage=alpine" '' 200 .error~null .status~".*Download complete.*"
t POST "images/create?fromImage=alpine&tag=latest" '' 200
@@ -49,7 +49,7 @@ t POST "images/create?fromImage=alpine&tag=latest" '' 200
old_iid=$(podman image inspect --format "{{.ID}}" docker.io/library/alpine:latest)
podman rmi -f docker.io/library/alpine:latest
podman tag $IMAGE docker.io/library/alpine:latest
-t POST "images/create?fromImage=alpine" '' 200 .error=null .status~".*$old_iid.*"
+t POST "images/create?fromImage=alpine" '' 200 .error~null .status~".*$old_iid.*"
podman untag $IMAGE docker.io/library/alpine:latest
t POST "images/create?fromImage=quay.io/libpod/alpine&tag=sha256:fa93b01658e3a5a1686dc3ae55f170d8de487006fb53a28efcd12ab0710a2e5f" '' 200
diff --git a/test/apiv2/12-imagesMore.at b/test/apiv2/12-imagesMore.at
index 4f3ddf925..ce3049106 100644
--- a/test/apiv2/12-imagesMore.at
+++ b/test/apiv2/12-imagesMore.at
@@ -46,6 +46,10 @@ t POST "images/localhost:5000/myrepo/push?tlsVerify=false&tag=mytag" '' 200
# Untag the image
t POST "libpod/images/$iid/untag?repo=localhost:5000/myrepo&tag=mytag" '' 201
+# Try to push non-existing image
+t POST "images/localhost:5000/idonotexist/push?tlsVerify=false" '' 200
+jq -re 'select(.errorDetail)' <<<"$output" &>/dev/null || echo -e "${red}not ok: error message not found in output${nc}" 1>&2
+
t GET libpod/images/$IMAGE/json 200 \
.RepoTags[-1]=$IMAGE
diff --git a/test/apiv2/20-containers.at b/test/apiv2/20-containers.at
index a99e9a184..383d92ef3 100644
--- a/test/apiv2/20-containers.at
+++ b/test/apiv2/20-containers.at
@@ -31,6 +31,13 @@ t GET libpod/containers/json?all=true 200 \
.[0].ExitCode=0 \
.[0].IsInfra=false
+# Test compat API for Network Settings
+t GET /containers/json?all=true 200 \
+ length=1 \
+ .[0].Id~[0-9a-f]\\{64\\} \
+ .[0].Image=$IMAGE \
+ .[0].NetworkSettings.Networks.podman.NetworkID=podman
+
# Make sure `limit` works.
t GET libpod/containers/json?limit=1 200 \
length=1 \
@@ -155,6 +162,7 @@ t DELETE images/localhost/newrepo:v1?force=true 200
t DELETE images/localhost/newrepo:v2?force=true 200
t DELETE libpod/containers/$cid 204
t DELETE libpod/containers/myctr 204
+t DELETE libpod/containers/bogus 404
# test apiv2 create container with correct entrypoint and cmd
@@ -206,9 +214,9 @@ t GET 'containers/json?limit=0&all=1' 200 \
t GET containers/json?limit=2 200 length=2
# Filter with two ids should return both container
-t GET "containers/json?filters=%7B%22id%22%3A%5B%22${cid}%22%2C%22${cid_top}%22%5D%7D&all=1" 200 length=2
+t GET containers/json?filters='{"id":["'${cid}'","'${cid_top}'"]}&all=1' 200 length=2
# Filter with two ids and status running should return only 1 container
-t GET "containers/json?filters=%7B%22id%22%3A%5B%22${cid}%22%2C%22${cid_top}%22%5D%2C%22status%22%3A%5B%22running%22%5D%7D&all=1" 200 \
+t GET containers/json?filters='{"id":["'${cid}'","'${cid_top}'"],"status":["running"]}&all=1' 200 \
length=1 \
.[0].Id=${cid_top}
@@ -246,3 +254,22 @@ t GET containers/$cid/json 200 \
.Mounts[0].Destination="/test"
t DELETE containers/$cid?v=true 204
+
+# test port mapping
+podman run -d --rm --name bar -p 8080:9090 $IMAGE top
+
+t GET containers/json 200 \
+ .[0].Ports[0].PrivatePort=9090 \
+ .[0].Ports[0].PublicPort=8080 \
+ .[0].Ports[0].Type="tcp"
+
+podman stop bar
+
+# Test CPU limit (NanoCPUs)
+t POST containers/create '"Image":"'$IMAGE'","HostConfig":{"NanoCpus":500000}' 201 \
+ .Id~[0-9a-f]\\{64\\}
+cid=$(jq -r '.Id' <<<"$output")
+t GET containers/$cid/json 200 \
+ .HostConfig.NanoCpus=500000
+
+t DELETE containers/$cid?v=true 204
diff --git a/test/apiv2/30-volumes.at b/test/apiv2/30-volumes.at
index b38810039..cf4b3d3ea 100644
--- a/test/apiv2/30-volumes.at
+++ b/test/apiv2/30-volumes.at
@@ -45,18 +45,17 @@ t GET libpod/volumes/json 200 \
.[0].Name~.* \
.[0].Mountpoint~.* \
.[0].CreatedAt~[0-9]\\{4\\}-[0-9]\\{2\\}-[0-9]\\{2\\}.*
-# -G --data-urlencode 'filters={"name":["foo1"]}'
-t GET libpod/volumes/json?filters=%7B%22name%22%3A%5B%22foo1%22%5D%7D 200 length=1 .[0].Name=foo1
-# -G --data-urlencode 'filters={"name":["foo1","foo2"]}'
-t GET libpod/volumes/json?filters=%7B%22name%22%3A%20%5B%22foo1%22%2C%20%22foo2%22%5D%7D 200 length=2 .[0].Name=foo1 .[1].Name=foo2
-# -G --data-urlencode 'filters={"name":["nonexistent"]}'
-t GET libpod/volumes/json?filters=%7B%22name%22%3A%5B%22nonexistent%22%5D%7D 200 length=0
-# -G --data-urlencode 'filters={"label":["testlabel"]}'
-t GET libpod/volumes/json?filters=%7B%22label%22:%5B%22testlabel%22%5D%7D 200 length=2
-# -G --data-urlencode 'filters={"label":["testlabel=testonly"]}'
-t GET libpod/volumes/json?filters=%7B%22label%22:%5B%22testlabel=testonly%22%5D%7D 200 length=1
-# -G --data-urlencode 'filters={"label":["testlabel1=testonly"]}'
-t GET libpod/volumes/json?filters=%7B%22label%22:%5B%22testlabel1=testonly%22%5D%7D 200 length=1
+t GET libpod/volumes/json?filters='{"name":["foo1"]}' 200 \
+ length=1 \
+ .[0].Name=foo1
+t GET libpod/volumes/json?filters='{"name":%20["foo1",%20"foo2"]}' 200 \
+ length=2 \
+ .[0].Name=foo1 \
+ .[1].Name=foo2
+t GET libpod/volumes/json?filters='{"name":["nonexistent"]}' 200 length=0
+t GET libpod/volumes/json?filters='{"label":["testlabel"]}' 200 length=2
+t GET libpod/volumes/json?filters='{"label":["testlabel=testonly"]}' 200 length=1
+t GET libpod/volumes/json?filters='{"label":["testlabel1=testonly"]}' 200 length=1
## inspect volume
t GET libpod/volumes/foo1/json 200 \
@@ -79,16 +78,12 @@ t DELETE libpod/volumes/foo1 404 \
.response=404
## Prune volumes with label matching 'testlabel1=testonly'
-# -G --data-urlencode 'filters={"label":["testlabel1=testonly"]}'
-t POST libpod/volumes/prune?filters=%7B%22label%22:%5B%22testlabel1=testonly%22%5D%7D "" 200
-# -G --data-urlencode 'filters={"label":["testlabel1=testonly"]}'
-t GET libpod/volumes/json?filters=%7B%22label%22:%5B%22testlabel1=testonly%22%5D%7D 200 length=0
+t POST libpod/volumes/prune?filters='{"label":["testlabel1=testonly"]}' "" 200
+t GET libpod/volumes/json?filters='{"label":["testlabel1=testonly"]}' 200 length=0
## Prune volumes with label matching 'testlabel'
-# -G --data-urlencode 'filters={"label":["testlabel"]}'
-t POST libpod/volumes/prune?filters=%7B%22label%22:%5B%22testlabel%22%5D%7D "" 200
-# -G --data-urlencode 'filters={"label":["testlabel"]}'
-t GET libpod/volumes/json?filters=%7B%22label%22:%5B%22testlabel%22%5D%7D 200 length=0
+t POST libpod/volumes/prune?filters='{"label":["testlabel"]}' "" 200
+t GET libpod/volumes/json?filters='{"label":["testlabel"]}' 200 length=0
## Prune volumes
t POST libpod/volumes/prune "" 200
diff --git a/test/apiv2/35-networks.at b/test/apiv2/35-networks.at
index 7ce109913..d3bbaf32b 100644
--- a/test/apiv2/35-networks.at
+++ b/test/apiv2/35-networks.at
@@ -7,54 +7,52 @@ t GET networks/non-existing-network 404 \
.cause='network not found'
t POST libpod/networks/create?name=network1 '' 200 \
-.Filename~.*/network1\\.conflist
+ .Filename~.*/network1\\.conflist
# --data '{"Subnet":{"IP":"10.10.254.0","Mask":[255,255,255,0]},"Labels":{"abc":"val"}}'
t POST libpod/networks/create?name=network2 '"Subnet":{"IP":"10.10.254.0","Mask":[255,255,255,0]},"Labels":{"abc":"val"}' 200 \
-.Filename~.*/network2\\.conflist
+ .Filename~.*/network2\\.conflist
# test for empty mask
t POST libpod/networks/create '"Subnet":{"IP":"10.10.1.0","Mask":[]}' 500 \
-.cause~'.*cannot be empty'
+ .cause~'.*cannot be empty'
# test for invalid mask
t POST libpod/networks/create '"Subnet":{"IP":"10.10.1.0","Mask":[0,255,255,0]}' 500 \
-.cause~'.*mask is invalid'
+ .cause~'.*mask is invalid'
# network list
t GET libpod/networks/json 200
-# filters={"name":["network1"]}
-t GET libpod/networks/json?filters=%7B%22name%22%3A%5B%22network1%22%5D%7D 200 \
-length=1 \
-.[0].Name=network1
+t GET libpod/networks/json?filters='{"name":["network1"]}' 200 \
+ length=1 \
+ .[0].Name=network1
t GET networks 200
#network list docker endpoint
-#filters={"name":["network1","network2"]}
-t GET networks?filters=%7B%22name%22%3A%5B%22network1%22%2C%22network2%22%5D%7D 200 \
-length=2
-#filters={"name":["network"]}
-t GET networks?filters=%7B%22name%22%3A%5B%22network%22%5D%7D 200 \
-length=2
-# filters={"label":["abc"]}
-t GET networks?filters=%7B%22label%22%3A%5B%22abc%22%5D%7D 200 \
-length=1
-# id filter filters={"id":["a7662f44d65029fd4635c91feea3d720a57cef52e2a9fcc7772b69072cc1ccd1"]}
-t GET networks?filters=%7B%22id%22%3A%5B%22a7662f44d65029fd4635c91feea3d720a57cef52e2a9fcc7772b69072cc1ccd1%22%5D%7D 200 \
-length=1 \
-.[0].Name=network1 \
-.[0].Id=a7662f44d65029fd4635c91feea3d720a57cef52e2a9fcc7772b69072cc1ccd1
-# invalid filter filters={"dangling":["1"]}
-t GET networks?filters=%7B%22dangling%22%3A%5B%221%22%5D%7D 500 \
-.cause='invalid filter "dangling"'
+t GET networks?filters='{"name":["network1","network2"]}' 200 \
+ length=2
+t GET networks?filters='{"name":["network"]}' 200 \
+ length=2
+t GET networks?filters='{"label":["abc"]}' 200 \
+ length=1
+# old docker filter type see #9526
+t GET networks?filters='{"label":{"abc":true}}' 200 \
+ length=1
+t GET networks?filters='{"id":["a7662f44d65029fd4635c91feea3d720a57cef52e2a9fcc7772b69072cc1ccd1"]}' 200 \
+ length=1 \
+ .[0].Name=network1 \
+ .[0].Id=a7662f44d65029fd4635c91feea3d720a57cef52e2a9fcc7772b69072cc1ccd1
+# invalid filter
+t GET networks?filters='{"dangling":["1"]}' 500 \
+ .cause='invalid filter "dangling"'
# (#9293 with no networks the endpoint should return empty array instead of null)
-t GET networks?filters=%7B%22name%22%3A%5B%22doesnotexists%22%5D%7D 200 \
-"[]"
+t GET networks?filters='{"name":["doesnotexists"]}' 200 \
+ "[]"
# network inspect docker
t GET networks/a7662f44d65029fd4635c91feea3d720a57cef52e2a9fcc7772b69072cc1ccd1 200 \
-.Name=network1 \
-.Id=a7662f44d65029fd4635c91feea3d720a57cef52e2a9fcc7772b69072cc1ccd1 \
-.Scope=local
+ .Name=network1 \
+ .Id=a7662f44d65029fd4635c91feea3d720a57cef52e2a9fcc7772b69072cc1ccd1 \
+ .Scope=local
# network create docker
t POST networks/create '"Name":"net3","IPAM":{"Config":[]}' 201
@@ -63,11 +61,11 @@ t DELETE networks/net3 204
# clean the network
t DELETE libpod/networks/network1 200 \
-.[0].Name~network1 \
-.[0].Err=null
+ .[0].Name~network1 \
+ .[0].Err=null
t DELETE libpod/networks/network2 200 \
-.[0].Name~network2 \
-.[0].Err=null
+ .[0].Name~network2 \
+ .[0].Err=null
# vim: filetype=sh
diff --git a/test/apiv2/44-mounts.at b/test/apiv2/44-mounts.at
new file mode 100644
index 000000000..5dc560852
--- /dev/null
+++ b/test/apiv2/44-mounts.at
@@ -0,0 +1,21 @@
+# -*- sh -*-
+
+podman pull $IMAGE &>/dev/null
+
+# Test various HostConfig options
+tmpfs_name="/mytmpfs"
+t POST containers/create?name=hostconfig_test '"Image":"'$IMAGE'","Cmd":["df"],"HostConfig":{"Binds":["/tmp/doesnotexist:/test1"],"TmpFs":{"'$tmpfs_name'":"rw"}}' 201 \
+ .Id~[0-9a-f]\\{64\\}
+cid=$(jq -r '.Id' <<<"$output")
+
+# Prior to #9512, the tmpfs would be called '/mytmpfs=rw', with the '=rw'
+t GET containers/${cid}/json 200 \
+ .HostConfig.Tmpfs[\"${tmpfs_name}\"]~rw,
+
+# Run the container, verify output
+t POST containers/${cid}/start '' 204
+t POST containers/${cid}/wait '' 200
+t GET containers/${cid}/logs?stdout=true 200
+
+like "$(<$WORKDIR/curl.result.out)" ".* ${tmpfs_name}" \
+ "'df' output includes tmpfs name"
diff --git a/test/apiv2/45-system.at b/test/apiv2/45-system.at
index 985d86e56..ad4bdf4f7 100644
--- a/test/apiv2/45-system.at
+++ b/test/apiv2/45-system.at
@@ -49,18 +49,16 @@ t GET libpod/system/df 200 '.Volumes | length=3'
# Prune volumes
-# -G --data-urlencode 'volumes=true&filters={"label":["testlabel1=idontmatch"]}'
-t POST 'libpod/system/prune?volumes=true&filters=%7B%22label%22:%5B%22testlabel1=idontmatch%22%5D%7D' params='' 200
+t POST 'libpod/system/prune?volumes=true&filters={"label":["testlabel1=idontmatch"]}' params='' 200
# nothing should have been pruned
t GET system/df 200 '.Volumes | length=3'
t GET libpod/system/df 200 '.Volumes | length=3'
-# -G --data-urlencode 'volumes=true&filters={"label":["testlabel1=testonly"]}'
# only foo3 should be pruned because of filter
-t POST 'libpod/system/prune?volumes=true&filters=%7B%22label%22:%5B%22testlabel1=testonly%22%5D%7D' params='' 200 .VolumePruneReports[0].Id=foo3
+t POST 'libpod/system/prune?volumes=true&filters={"label":["testlabel1=testonly"]}' params='' 200 .VolumePruneReports[0].Id=foo3
# only foo2 should be pruned because of filter
-t POST 'libpod/system/prune?volumes=true&filters=%7B%22label%22:%5B%22testlabel1%22%5D%7D' params='' 200 .VolumePruneReports[0].Id=foo2
+t POST 'libpod/system/prune?volumes=true&filters={"label":["testlabel1"]}' params='' 200 .VolumePruneReports[0].Id=foo2
# foo1, the last remaining volume should be pruned without any filters applied
t POST 'libpod/system/prune?volumes=true' params='' 200 .VolumePruneReports[0].Id=foo1
diff --git a/test/apiv2/50-secrets.at b/test/apiv2/50-secrets.at
index 1ef43381a..c4ffb5883 100644
--- a/test/apiv2/50-secrets.at
+++ b/test/apiv2/50-secrets.at
@@ -14,18 +14,21 @@ t POST secrets/create '"Name":"mysecret","Data":"c2VjcmV0","Labels":{"fail":"fai
t POST secrets/create '"Name":"mysecret","Data":"c2VjcmV0"' 409
# secret inspect
-t GET secrets/mysecret 200\
- .Spec.Name=mysecret
+t GET secrets/mysecret 200 \
+ .Spec.Name=mysecret \
+ .Version.Index=1
# secret inspect non-existent secret
t GET secrets/bogus 404
# secret list
-t GET secrets 200\
- length=1
+t GET secrets 200 \
+ length=1 \
+ .[0].Spec.Name=mysecret \
+ .[0].Version.Index=1
# secret list unsupported filters
-t GET secrets?filters=%7B%22name%22%3A%5B%22foo1%22%5D%7D 400
+t GET secrets?filters='{"name":["foo1"]}' 400
# secret rm
t DELETE secrets/mysecret 204
diff --git a/test/apiv2/60-auth.at b/test/apiv2/60-auth.at
new file mode 100644
index 000000000..378955cd7
--- /dev/null
+++ b/test/apiv2/60-auth.at
@@ -0,0 +1,29 @@
+# -*- sh -*-
+#
+# registry-related tests
+#
+
+start_registry
+
+# FIXME FIXME FIXME: remove the 'if false' for use with PR 9589
+if false; then
+
+# FIXME FIXME: please forgive the horrible POST params format; I have an
+# upcoming PR which should fix that.
+
+# Test with wrong password. Confirm bad status and appropriate error message
+t POST /v1.40/auth "\"username\":\"${REGISTRY_USERNAME}\",\"password\":\"WrOnGPassWord\",\"serveraddress\":\"localhost:$REGISTRY_PORT/\"" \
+ 400 \
+ .Status~'.* invalid username/password'
+
+# Test with the right password. Confirm status message and reasonable token
+t POST /v1.40/auth "\"username\":\"${REGISTRY_USERNAME}\",\"password\":\"${REGISTRY_PASSWORD}\",\"serveraddress\":\"localhost:$REGISTRY_PORT/\"" \
+ 200 \
+ .Status="Login Succeeded" \
+ .IdentityToken~[a-zA-Z0-9]
+
+# FIXME: now what? Try something-something using that token?
+token=$(jq -r .IdentityToken <<<"$output")
+# ...
+
+fi # FIXME FIXME FIXME: remove when working
diff --git a/test/apiv2/rest_api/__init__.py b/test/apiv2/rest_api/__init__.py
index db0257f03..b7b8a7649 100644
--- a/test/apiv2/rest_api/__init__.py
+++ b/test/apiv2/rest_api/__init__.py
@@ -27,7 +27,7 @@ class Podman(object):
self.cmd.append("--root=" + os.path.join(self.anchor_directory, "crio"))
self.cmd.append("--runroot=" + os.path.join(self.anchor_directory, "crio-run"))
- os.environ["REGISTRIES_CONFIG_PATH"] = os.path.join(self.anchor_directory, "registry.conf")
+ os.environ["CONTAINERS_REGISTRIES_CONF"] = os.path.join(self.anchor_directory, "registry.conf")
p = configparser.ConfigParser()
p.read_dict(
{
@@ -36,7 +36,7 @@ class Podman(object):
"registries.block": {"registries": "[]"},
}
)
- with open(os.environ["REGISTRIES_CONFIG_PATH"], "w") as w:
+ with open(os.environ["CONTAINERS_REGISTRIES_CONF"], "w") as w:
p.write(w)
os.environ["CNI_CONFIG_PATH"] = os.path.join(self.anchor_directory, "cni", "net.d")
diff --git a/test/apiv2/rest_api/test_rest_v2_0_0.py b/test/apiv2/rest_api/test_rest_v2_0_0.py
index 05c24f2ea..8a78f5185 100644
--- a/test/apiv2/rest_api/test_rest_v2_0_0.py
+++ b/test/apiv2/rest_api/test_rest_v2_0_0.py
@@ -64,7 +64,9 @@ class TestApi(unittest.TestCase):
super().setUpClass()
TestApi.podman = Podman()
- TestApi.service = TestApi.podman.open("system", "service", "tcp:localhost:8080", "--time=0")
+ TestApi.service = TestApi.podman.open(
+ "system", "service", "tcp:localhost:8080", "--time=0"
+ )
# give the service some time to be ready...
time.sleep(2)
@@ -241,7 +243,9 @@ class TestApi(unittest.TestCase):
def test_post_create_compat(self):
"""Create network and connect container during create"""
- net = requests.post(PODMAN_URL + "/v1.40/networks/create", json={"Name": "TestNetwork"})
+ net = requests.post(
+ PODMAN_URL + "/v1.40/networks/create", json={"Name": "TestNetwork"}
+ )
self.assertEqual(net.status_code, 201, net.text)
create = requests.post(
@@ -450,11 +454,15 @@ class TestApi(unittest.TestCase):
self.assertIn(k, o)
def test_network_compat(self):
- name = "Network_" + "".join(random.choice(string.ascii_letters) for i in range(10))
+ name = "Network_" + "".join(
+ random.choice(string.ascii_letters) for i in range(10)
+ )
# Cannot test for 0 existing networks because default "podman" network always exists
- create = requests.post(PODMAN_URL + "/v1.40/networks/create", json={"Name": name})
+ create = requests.post(
+ PODMAN_URL + "/v1.40/networks/create", json={"Name": name}
+ )
self.assertEqual(create.status_code, 201, create.content)
obj = json.loads(create.content)
self.assertIn(type(obj), (dict,))
@@ -484,8 +492,12 @@ class TestApi(unittest.TestCase):
self.assertEqual(inspect.status_code, 404, inspect.content)
# network prune
- prune_name = "Network_" + "".join(random.choice(string.ascii_letters) for i in range(10))
- prune_create = requests.post(PODMAN_URL + "/v1.40/networks/create", json={"Name": prune_name})
+ prune_name = "Network_" + "".join(
+ random.choice(string.ascii_letters) for i in range(10)
+ )
+ prune_create = requests.post(
+ PODMAN_URL + "/v1.40/networks/create", json={"Name": prune_name}
+ )
self.assertEqual(create.status_code, 201, prune_create.content)
prune = requests.post(PODMAN_URL + "/v1.40/networks/prune")
@@ -493,9 +505,10 @@ class TestApi(unittest.TestCase):
obj = json.loads(prune.content)
self.assertTrue(prune_name in obj["NetworksDeleted"])
-
def test_volumes_compat(self):
- name = "Volume_" + "".join(random.choice(string.ascii_letters) for i in range(10))
+ name = "Volume_" + "".join(
+ random.choice(string.ascii_letters) for i in range(10)
+ )
ls = requests.get(PODMAN_URL + "/v1.40/volumes")
self.assertEqual(ls.status_code, 200, ls.content)
@@ -511,7 +524,9 @@ class TestApi(unittest.TestCase):
for k in required_keys:
self.assertIn(k, obj)
- create = requests.post(PODMAN_URL + "/v1.40/volumes/create", json={"Name": name})
+ create = requests.post(
+ PODMAN_URL + "/v1.40/volumes/create", json={"Name": name}
+ )
self.assertEqual(create.status_code, 201, create.content)
# See https://docs.docker.com/engine/api/v1.40/#operation/VolumeCreate
@@ -688,15 +703,21 @@ class TestApi(unittest.TestCase):
"""Verify issue #8865"""
pod_name = list()
- pod_name.append("Pod_" + "".join(random.choice(string.ascii_letters) for i in range(10)))
- pod_name.append("Pod_" + "".join(random.choice(string.ascii_letters) for i in range(10)))
+ pod_name.append(
+ "Pod_" + "".join(random.choice(string.ascii_letters) for i in range(10))
+ )
+ pod_name.append(
+ "Pod_" + "".join(random.choice(string.ascii_letters) for i in range(10))
+ )
r = requests.post(
_url("/pods/create"),
json={
"name": pod_name[0],
"no_infra": False,
- "portmappings": [{"host_ip": "127.0.0.1", "host_port": 8889, "container_port": 89}],
+ "portmappings": [
+ {"host_ip": "127.0.0.1", "host_port": 8889, "container_port": 89}
+ ],
},
)
self.assertEqual(r.status_code, 201, r.text)
@@ -715,7 +736,9 @@ class TestApi(unittest.TestCase):
json={
"name": pod_name[1],
"no_infra": False,
- "portmappings": [{"host_ip": "127.0.0.1", "host_port": 8889, "container_port": 89}],
+ "portmappings": [
+ {"host_ip": "127.0.0.1", "host_port": 8889, "container_port": 89}
+ ],
},
)
self.assertEqual(r.status_code, 201, r.text)
diff --git a/test/apiv2/test-apiv2 b/test/apiv2/test-apiv2
index 5b1e2ef80..e32d6bc62 100755
--- a/test/apiv2/test-apiv2
+++ b/test/apiv2/test-apiv2
@@ -17,6 +17,8 @@ PODMAN_TEST_IMAGE_FQN="$PODMAN_TEST_IMAGE_REGISTRY/$PODMAN_TEST_IMAGE_USER/$PODM
IMAGE=$PODMAN_TEST_IMAGE_FQN
+REGISTRY_IMAGE="${PODMAN_TEST_IMAGE_REGISTRY}/${PODMAN_TEST_IMAGE_USER}/registry:2.7"
+
# END stuff you can but probably shouldn't customize
###############################################################################
# BEGIN setup
@@ -188,6 +190,13 @@ function t() {
# entrypoint path can include a descriptive comment; strip it off
path=${path%% *}
+ # path may include JSONish params that curl will barf on; url-encode them
+ path="${path//'['/%5B}"
+ path="${path//']'/%5D}"
+ path="${path//'{'/%7B}"
+ path="${path//'}'/%7D}"
+ path="${path//':'/%3A}"
+
# curl -X HEAD but without --head seems to wait for output anyway
if [[ $method == "HEAD" ]]; then
curl_args="--head"
@@ -306,13 +315,115 @@ function start_service() {
die "Cannot start service on non-localhost ($HOST)"
fi
- $PODMAN_BIN --root $WORKDIR system service --time 15 tcp:127.0.0.1:$PORT \
+ $PODMAN_BIN --root $WORKDIR/server_root system service \
+ --time 15 \
+ tcp:127.0.0.1:$PORT \
&> $WORKDIR/server.log &
service_pid=$!
wait_for_port $HOST $PORT
}
+function stop_service() {
+ # Stop the server
+ if [[ -n $service_pid ]]; then
+ kill $service_pid
+ wait $service_pid
+ fi
+}
+
+####################
+# start_registry # Run a local registry
+####################
+REGISTRY_PORT=
+REGISTRY_USERNAME=
+REGISTRY_PASSWORD=
+function start_registry() {
+ # We can be invoked multiple times, e.g. from different subtests, but
+ # let's assume that once started we only kill it at the end of tests.
+ if [[ -n "$REGISTRY_PORT" ]]; then
+ return
+ fi
+
+ REGISTRY_PORT=$(random_port)
+ REGISTRY_USERNAME=u$(random_string 7)
+ REGISTRY_PASSWORD=p$(random_string 7)
+
+ local REGDIR=$WORKDIR/registry
+ local AUTHDIR=$REGDIR/auth
+ mkdir -p $AUTHDIR
+
+ mkdir -p ${REGDIR}/{root,runroot}
+ local PODMAN_REGISTRY_ARGS="--root ${REGDIR}/root --runroot ${REGDIR}/runroot"
+
+ # Give it three tries, to compensate for network flakes
+ podman ${PODMAN_REGISTRY_ARGS} pull $REGISTRY_IMAGE ||
+ podman ${PODMAN_REGISTRY_ARGS} pull $REGISTRY_IMAGE ||
+ podman ${PODMAN_REGISTRY_ARGS} pull $REGISTRY_IMAGE
+
+ # Create a local cert and credentials
+ # FIXME: is there a hidden "--quiet" flag? This is too noisy.
+ openssl req -newkey rsa:4096 -nodes -sha256 \
+ -keyout $AUTHDIR/domain.key -x509 -days 2 \
+ -out $AUTHDIR/domain.crt \
+ -subj "/C=US/ST=Foo/L=Bar/O=Red Hat, Inc./CN=registry host certificate" \
+ -addext subjectAltName=DNS:localhost
+ htpasswd -Bbn ${REGISTRY_USERNAME} ${REGISTRY_PASSWORD} \
+ > $AUTHDIR/htpasswd
+
+ # Run the registry, and wait for it to come up
+ podman ${PODMAN_REGISTRY_ARGS} run -d \
+ -p ${REGISTRY_PORT}:5000 \
+ --name registry \
+ -v $AUTHDIR:/auth:Z \
+ -e "REGISTRY_AUTH=htpasswd" \
+ -e "REGISTRY_AUTH_HTPASSWD_REALM=Registry Realm" \
+ -e REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpasswd \
+ -e REGISTRY_HTTP_TLS_CERTIFICATE=/auth/domain.crt \
+ -e REGISTRY_HTTP_TLS_KEY=/auth/domain.key \
+ ${REGISTRY_IMAGE}
+
+ wait_for_port localhost $REGISTRY_PORT
+}
+
+function stop_registry() {
+ local REGDIR=${WORKDIR}/registry
+ if [[ -d $REGDIR ]]; then
+ local OPTS="--root ${REGDIR}/root --runroot ${REGDIR}/runroot"
+ podman $OPTS stop -f -t 0 -a
+
+ # rm/rmi are important when running rootless: without them we
+ # get EPERMS in tmpdir cleanup because files are owned by subuids.
+ podman $OPTS rm -f -a
+ podman $OPTS rmi -f -a
+ fi
+}
+
+#################
+# random_port # Random open port; arg is range (min-max), default 5000-5999
+#################
+function random_port() {
+ local range=${1:-5000-5999}
+
+ local port
+ for port in $(shuf -i ${range}); do
+ if ! { exec 5<> /dev/tcp/127.0.0.1/$port; } &>/dev/null; then
+ echo $port
+ return
+ fi
+ done
+
+ die "Could not find open port in range $range"
+}
+
+###################
+# random_string # Pseudorandom alphanumeric string of given length
+###################
+function random_string() {
+ local length=${1:-10}
+ head /dev/urandom | tr -dc a-zA-Z0-9 | head -c$length
+}
+
###################
# wait_for_port # Returns once port is available on host
###################
@@ -334,8 +445,8 @@ function wait_for_port() {
# podman # Needed by some test scripts to invoke the actual podman binary
############
function podman() {
- echo "\$ $PODMAN_BIN $*" >>$WORKDIR/output.log
- $PODMAN_BIN --root $WORKDIR "$@" >>$WORKDIR/output.log 2>&1
+ echo "\$ $PODMAN_BIN $*" >>$WORKDIR/output.log
+ $PODMAN_BIN --root $WORKDIR/server_root "$@" >>$WORKDIR/output.log 2>&1
}
####################
@@ -405,9 +516,8 @@ if [ -n "$service_pid" ]; then
podman rm -a
podman rmi -af
- # Stop the server
- kill $service_pid
- wait $service_pid
+ stop_registry
+ stop_service
fi
test_count=$(<$testcounter_file)
diff --git a/test/e2e/build_test.go b/test/e2e/build_test.go
index c733db61c..4839d66ec 100644
--- a/test/e2e/build_test.go
+++ b/test/e2e/build_test.go
@@ -532,4 +532,20 @@ RUN grep CapEff /proc/self/status`
// Then
Expect(session.ExitCode()).To(Equal(125))
})
+
+ It("podman build --timestamp flag", func() {
+ containerfile := `FROM quay.io/libpod/alpine:latest
+RUN echo hello`
+
+ containerfilePath := filepath.Join(podmanTest.TempDir, "Containerfile")
+ err := ioutil.WriteFile(containerfilePath, []byte(containerfile), 0755)
+ Expect(err).To(BeNil())
+ session := podmanTest.Podman([]string{"build", "-t", "test", "--timestamp", "0", "--file", containerfilePath, podmanTest.TempDir})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ inspect := podmanTest.Podman([]string{"image", "inspect", "--format", "{{ .Created }}", "test"})
+ inspect.WaitWithDefaultTimeout()
+ Expect(inspect.OutputToString()).To(Equal("1970-01-01 00:00:00 +0000 UTC"))
+ })
})
diff --git a/test/e2e/config/containers.conf b/test/e2e/config/containers.conf
index fdf679664..bbd712254 100644
--- a/test/e2e/config/containers.conf
+++ b/test/e2e/config/containers.conf
@@ -55,6 +55,7 @@ umask = "0002"
annotations=["run.oci.keep_original_groups=1",]
+no_hosts=true
[engine]
network_cmd_options=["allow_host_loopback=true"]
diff --git a/test/e2e/containers_conf_test.go b/test/e2e/containers_conf_test.go
index 9c2260c5f..6b1a0d16e 100644
--- a/test/e2e/containers_conf_test.go
+++ b/test/e2e/containers_conf_test.go
@@ -331,4 +331,26 @@ var _ = Describe("Podman run", func() {
Expect(inspect.OutputToString()).To(ContainSubstring("run.oci.keep_original_groups:1"))
})
+ It("podman run with --add-host and no-hosts=true fails", func() {
+ session := podmanTest.Podman([]string{"run", "-dt", "--add-host", "test1:127.0.0.1", ALPINE, "top"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).To(ExitWithError())
+ Expect(session.ErrorToString()).To(ContainSubstring("--no-hosts and --add-host cannot be set together"))
+
+ session = podmanTest.Podman([]string{"run", "-dt", "--add-host", "test1:127.0.0.1", "--no-hosts=false", ALPINE, "top"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ })
+
+ It("podman run with no-hosts=true /etc/hosts does not include hostname", func() {
+ session := podmanTest.Podman([]string{"run", "--rm", "--name", "test", ALPINE, "cat", "/etc/hosts"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(session.OutputToString()).To(Not(ContainSubstring("test")))
+
+ session = podmanTest.Podman([]string{"run", "--rm", "--name", "test", "--no-hosts=false", ALPINE, "cat", "/etc/hosts"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(session.OutputToString()).To(ContainSubstring("test"))
+ })
})
diff --git a/test/e2e/generate_kube_test.go b/test/e2e/generate_kube_test.go
index d7c697f28..21e006c20 100644
--- a/test/e2e/generate_kube_test.go
+++ b/test/e2e/generate_kube_test.go
@@ -734,4 +734,36 @@ ENTRYPOINT /bin/sleep`
kube.WaitWithDefaultTimeout()
Expect(kube.ExitCode()).To(Equal(0))
})
+
+ It("podman generate kube based on user in container", func() {
+ // Build an image with an entrypoint.
+ containerfile := `FROM quay.io/libpod/alpine:latest
+RUN adduser -u 10001 -S test1
+USER test1`
+
+ targetPath, err := CreateTempDirInTempDir()
+ Expect(err).To(BeNil())
+ containerfilePath := filepath.Join(targetPath, "Containerfile")
+ err = ioutil.WriteFile(containerfilePath, []byte(containerfile), 0644)
+ Expect(err).To(BeNil())
+
+ image := "generatekube:test"
+ session := podmanTest.Podman([]string{"build", "-f", containerfilePath, "-t", image})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"create", "--pod", "new:testpod", image, "test1"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ kube := podmanTest.Podman([]string{"generate", "kube", "testpod"})
+ kube.WaitWithDefaultTimeout()
+ Expect(kube.ExitCode()).To(Equal(0))
+
+ pod := new(v1.Pod)
+ err = yaml.Unmarshal(kube.Out.Contents(), pod)
+ Expect(err).To(BeNil())
+ Expect(*pod.Spec.Containers[0].SecurityContext.RunAsUser).To(Equal(int64(10001)))
+ })
+
})
diff --git a/test/e2e/libpod_suite_remote_test.go b/test/e2e/libpod_suite_remote_test.go
index a26765ee9..3115c246f 100644
--- a/test/e2e/libpod_suite_remote_test.go
+++ b/test/e2e/libpod_suite_remote_test.go
@@ -48,17 +48,17 @@ func (p *PodmanTestIntegration) PodmanExtraFiles(args []string, extraFiles []*os
func (p *PodmanTestIntegration) setDefaultRegistriesConfigEnv() {
defaultFile := filepath.Join(INTEGRATION_ROOT, "test/registries.conf")
- os.Setenv("REGISTRIES_CONFIG_PATH", defaultFile)
+ os.Setenv("CONTAINERS_REGISTRIES_CONF", defaultFile)
}
func (p *PodmanTestIntegration) setRegistriesConfigEnv(b []byte) {
outfile := filepath.Join(p.TempDir, "registries.conf")
- os.Setenv("REGISTRIES_CONFIG_PATH", outfile)
+ os.Setenv("CONTAINERS_REGISTRIES_CONF", outfile)
ioutil.WriteFile(outfile, b, 0644)
}
func resetRegistriesConfigEnv() {
- os.Setenv("REGISTRIES_CONFIG_PATH", "")
+ os.Setenv("CONTAINERS_REGISTRIES_CONF", "")
}
func PodmanTestCreate(tempDir string) *PodmanTestIntegration {
pti := PodmanTestCreateUtil(tempDir, true)
diff --git a/test/e2e/libpod_suite_test.go b/test/e2e/libpod_suite_test.go
index 0ae30ca10..cc03ccc96 100644
--- a/test/e2e/libpod_suite_test.go
+++ b/test/e2e/libpod_suite_test.go
@@ -31,17 +31,17 @@ func (p *PodmanTestIntegration) PodmanExtraFiles(args []string, extraFiles []*os
func (p *PodmanTestIntegration) setDefaultRegistriesConfigEnv() {
defaultFile := filepath.Join(INTEGRATION_ROOT, "test/registries.conf")
- os.Setenv("REGISTRIES_CONFIG_PATH", defaultFile)
+ os.Setenv("CONTAINERS_REGISTRIES_CONF", defaultFile)
}
func (p *PodmanTestIntegration) setRegistriesConfigEnv(b []byte) {
outfile := filepath.Join(p.TempDir, "registries.conf")
- os.Setenv("REGISTRIES_CONFIG_PATH", outfile)
+ os.Setenv("CONTAINERS_REGISTRIES_CONF", outfile)
ioutil.WriteFile(outfile, b, 0644)
}
func resetRegistriesConfigEnv() {
- os.Setenv("REGISTRIES_CONFIG_PATH", "")
+ os.Setenv("CONTAINERS_REGISTRIES_CONF", "")
}
func PodmanTestCreate(tempDir string) *PodmanTestIntegration {
diff --git a/test/e2e/login_logout_test.go b/test/e2e/login_logout_test.go
index 99876de29..6269bb92b 100644
--- a/test/e2e/login_logout_test.go
+++ b/test/e2e/login_logout_test.go
@@ -125,15 +125,15 @@ var _ = Describe("Podman login and logout", func() {
// Environment is per-process, so this looks very unsafe; actually it seems fine because tests are not
// run in parallel unless they opt in by calling t.Parallel(). So don’t do that.
- oldRCP, hasRCP := os.LookupEnv("REGISTRIES_CONFIG_PATH")
+ oldRCP, hasRCP := os.LookupEnv("CONTAINERS_REGISTRIES_CONF")
defer func() {
if hasRCP {
- os.Setenv("REGISTRIES_CONFIG_PATH", oldRCP)
+ os.Setenv("CONTAINERS_REGISTRIES_CONF", oldRCP)
} else {
- os.Unsetenv("REGISTRIES_CONFIG_PATH")
+ os.Unsetenv("CONTAINERS_REGISTRIES_CONF")
}
}()
- os.Setenv("REGISTRIES_CONFIG_PATH", registriesConf.Name())
+ os.Setenv("CONTAINERS_REGISTRIES_CONF", registriesConf.Name())
session := podmanTest.Podman([]string{"login", "-u", "podmantest", "-p", "test"})
session.WaitWithDefaultTimeout()
diff --git a/test/e2e/logs_test.go b/test/e2e/logs_test.go
index 8f695279a..3051031a5 100644
--- a/test/e2e/logs_test.go
+++ b/test/e2e/logs_test.go
@@ -37,16 +37,18 @@ var _ = Describe("Podman logs", func() {
})
for _, log := range []string{"k8s-file", "journald", "json-file"} {
+
It("all lines: "+log, func() {
logc := podmanTest.Podman([]string{"run", "--log-driver", log, "-dt", ALPINE, "sh", "-c", "echo podman; echo podman; echo podman"})
logc.WaitWithDefaultTimeout()
Expect(logc).To(Exit(0))
-
cid := logc.OutputToString()
+
results := podmanTest.Podman([]string{"logs", cid})
results.WaitWithDefaultTimeout()
Expect(results).To(Exit(0))
Expect(len(results.OutputToStringArray())).To(Equal(3))
+ Expect(results.OutputToString()).To(Equal("podman podman podman"))
})
It("tail two lines: "+log, func() {
@@ -73,6 +75,18 @@ var _ = Describe("Podman logs", func() {
Expect(len(results.OutputToStringArray())).To(Equal(0))
})
+ It("tail 99 lines: "+log, func() {
+ logc := podmanTest.Podman([]string{"run", "--log-driver", log, "-dt", ALPINE, "sh", "-c", "echo podman; echo podman; echo podman"})
+ logc.WaitWithDefaultTimeout()
+ Expect(logc).To(Exit(0))
+ cid := logc.OutputToString()
+
+ results := podmanTest.Podman([]string{"logs", "--tail", "99", cid})
+ results.WaitWithDefaultTimeout()
+ Expect(results).To(Exit(0))
+ Expect(len(results.OutputToStringArray())).To(Equal(3))
+ })
+
It("tail 800 lines: "+log, func() {
logc := podmanTest.Podman([]string{"run", "--log-driver", log, "-dt", ALPINE, "sh", "-c", "i=1; while [ \"$i\" -ne 1000 ]; do echo \"line $i\"; i=$((i + 1)); done"})
logc.WaitWithDefaultTimeout()
@@ -158,78 +172,6 @@ var _ = Describe("Podman logs", func() {
Expect(results).To(Exit(0))
})
- It("for container: "+log, func() {
- logc := podmanTest.Podman([]string{"run", "--log-driver", log, "-dt", ALPINE, "sh", "-c", "echo podman; echo podman; echo podman"})
- logc.WaitWithDefaultTimeout()
- Expect(logc).To(Exit(0))
- cid := logc.OutputToString()
-
- results := podmanTest.Podman([]string{"logs", cid})
- results.WaitWithDefaultTimeout()
- Expect(results).To(Exit(0))
- Expect(len(results.OutputToStringArray())).To(Equal(3))
- Expect(results.OutputToString()).To(Equal("podman podman podman"))
- })
-
- It("tail two lines: "+log, func() {
- logc := podmanTest.Podman([]string{"run", "--log-driver", log, "-dt", ALPINE, "sh", "-c", "echo podman; echo podman; echo podman"})
- logc.WaitWithDefaultTimeout()
- Expect(logc).To(Exit(0))
- cid := logc.OutputToString()
- results := podmanTest.Podman([]string{"logs", "--tail", "2", cid})
- results.WaitWithDefaultTimeout()
- Expect(results).To(Exit(0))
- Expect(len(results.OutputToStringArray())).To(Equal(2))
- })
-
- It("tail 99 lines: "+log, func() {
- logc := podmanTest.Podman([]string{"run", "--log-driver", log, "-dt", ALPINE, "sh", "-c", "echo podman; echo podman; echo podman"})
- logc.WaitWithDefaultTimeout()
- Expect(logc).To(Exit(0))
- cid := logc.OutputToString()
-
- results := podmanTest.Podman([]string{"logs", "--tail", "99", cid})
- results.WaitWithDefaultTimeout()
- Expect(results).To(Exit(0))
- Expect(len(results.OutputToStringArray())).To(Equal(3))
- })
-
- It("tail 2 lines with timestamps: "+log, func() {
- logc := podmanTest.Podman([]string{"run", "--log-driver", log, "-dt", ALPINE, "sh", "-c", "echo podman; echo podman; echo podman"})
- logc.WaitWithDefaultTimeout()
- Expect(logc).To(Exit(0))
- cid := logc.OutputToString()
-
- results := podmanTest.Podman([]string{"logs", "--tail", "2", "-t", cid})
- results.WaitWithDefaultTimeout()
- Expect(results).To(Exit(0))
- Expect(len(results.OutputToStringArray())).To(Equal(2))
- })
-
- It("since time 2017-08-07: "+log, func() {
- logc := podmanTest.Podman([]string{"run", "--log-driver", log, "-dt", ALPINE, "sh", "-c", "echo podman; echo podman; echo podman"})
- logc.WaitWithDefaultTimeout()
- Expect(logc).To(Exit(0))
- cid := logc.OutputToString()
-
- results := podmanTest.Podman([]string{"logs", "--since", "2017-08-07T10:10:09.056611202-04:00", cid})
- results.WaitWithDefaultTimeout()
- Expect(results).To(Exit(0))
- Expect(len(results.OutputToStringArray())).To(Equal(3))
- })
-
- It("with duration 10m: "+log, func() {
- logc := podmanTest.Podman([]string{"run", "--log-driver", log, "-dt", ALPINE, "sh", "-c", "echo podman; echo podman; echo podman"})
- logc.WaitWithDefaultTimeout()
- Expect(logc).To(Exit(0))
- cid := logc.OutputToString()
-
- results := podmanTest.Podman([]string{"logs", "--since", "10m", cid})
- results.WaitWithDefaultTimeout()
- Expect(results).To(Exit(0))
- Expect(len(results.OutputToStringArray())).To(Equal(3))
- })
-
It("streaming output: "+log, func() {
containerName := "logs-f-rm"
@@ -259,17 +201,6 @@ var _ = Describe("Podman logs", func() {
}
})
- It("podman logs with log-driver=none errors: "+log, func() {
- ctrName := "logsctr"
- logc := podmanTest.Podman([]string{"run", "--log-driver", log, "--name", ctrName, "-d", "--log-driver", "none", ALPINE, "top"})
- logc.WaitWithDefaultTimeout()
- Expect(logc).To(Exit(0))
-
- logs := podmanTest.Podman([]string{"logs", "-f", ctrName})
- logs.WaitWithDefaultTimeout()
- Expect(logs).To(Not(Exit(0)))
- })
-
It("follow output stopped container: "+log, func() {
containerName := "logs-f"
@@ -373,4 +304,15 @@ var _ = Describe("Podman logs", func() {
Expect(err).To(BeNil())
Expect(string(out)).To(ContainSubstring(containerName))
})
+
+ It("podman logs with log-driver=none errors", func() {
+ ctrName := "logsctr"
+ logc := podmanTest.Podman([]string{"run", "--name", ctrName, "-d", "--log-driver", "none", ALPINE, "top"})
+ logc.WaitWithDefaultTimeout()
+ Expect(logc).To(Exit(0))
+
+ logs := podmanTest.Podman([]string{"logs", "-f", ctrName})
+ logs.WaitWithDefaultTimeout()
+ Expect(logs).To(Not(Exit(0)))
+ })
})
diff --git a/test/e2e/network_connect_disconnect_test.go b/test/e2e/network_connect_disconnect_test.go
index eb8ad7181..e9a7b421f 100644
--- a/test/e2e/network_connect_disconnect_test.go
+++ b/test/e2e/network_connect_disconnect_test.go
@@ -193,6 +193,13 @@ var _ = Describe("Podman network connect and disconnect", func() {
exec = podmanTest.Podman([]string{"exec", "-it", "test", "ip", "addr", "show", "eth1"})
exec.WaitWithDefaultTimeout()
Expect(exec.ExitCode()).To(BeZero())
+
+ // make sure no logrus errors are shown https://github.com/containers/podman/issues/9602
+ rm := podmanTest.Podman([]string{"rm", "-f", "test"})
+ rm.WaitWithDefaultTimeout()
+ Expect(rm.ExitCode()).To(BeZero())
+ Expect(rm.ErrorToString()).To(Equal(""))
+
})
It("podman network connect when not running", func() {
diff --git a/test/e2e/network_test.go b/test/e2e/network_test.go
index 53521cdc4..ff2e1eb66 100644
--- a/test/e2e/network_test.go
+++ b/test/e2e/network_test.go
@@ -352,6 +352,29 @@ var _ = Describe("Podman network", func() {
Expect(rmAll.ExitCode()).To(BeZero())
})
+ It("podman network remove after disconnect when container initially created with the network", func() {
+ SkipIfRootless("disconnect works only in non rootless container")
+
+ container := "test"
+ network := "foo"
+
+ session := podmanTest.Podman([]string{"network", "create", network})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"run", "--name", container, "--network", network, "-d", ALPINE, "top"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"network", "disconnect", network, container})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"network", "rm", network})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ })
+
It("podman network remove bogus", func() {
session := podmanTest.Podman([]string{"network", "rm", "bogus"})
session.WaitWithDefaultTimeout()
diff --git a/test/e2e/rename_test.go b/test/e2e/rename_test.go
index f19413221..14696c0f6 100644
--- a/test/e2e/rename_test.go
+++ b/test/e2e/rename_test.go
@@ -89,4 +89,25 @@ var _ = Describe("podman rename", func() {
Expect(ps.ExitCode()).To(Equal(0))
Expect(ps.OutputToString()).To(ContainSubstring(newName))
})
+
+ It("Rename a running container with exec sessions", func() {
+ ctrName := "testCtr"
+ ctr := podmanTest.Podman([]string{"run", "-d", "--name", ctrName, ALPINE, "top"})
+ ctr.WaitWithDefaultTimeout()
+ Expect(ctr.ExitCode()).To(Equal(0))
+
+ exec := podmanTest.Podman([]string{"exec", "-d", ctrName, "top"})
+ exec.WaitWithDefaultTimeout()
+ Expect(exec.ExitCode()).To(Equal(0))
+
+ newName := "aNewName"
+ rename := podmanTest.Podman([]string{"rename", ctrName, newName})
+ rename.WaitWithDefaultTimeout()
+ Expect(rename.ExitCode()).To(Equal(0))
+
+ ps := podmanTest.Podman([]string{"ps", "-aq", "--filter", fmt.Sprintf("name=%s", newName), "--format", "{{ .Names }}"})
+ ps.WaitWithDefaultTimeout()
+ Expect(ps.ExitCode()).To(Equal(0))
+ Expect(ps.OutputToString()).To(ContainSubstring(newName))
+ })
})
diff --git a/test/e2e/run_selinux_test.go b/test/e2e/run_selinux_test.go
index 8c712b1be..6abe152a9 100644
--- a/test/e2e/run_selinux_test.go
+++ b/test/e2e/run_selinux_test.go
@@ -2,6 +2,7 @@ package integration
import (
"os"
+ "path/filepath"
. "github.com/containers/podman/v3/test/utils"
. "github.com/onsi/ginkgo"
@@ -294,4 +295,52 @@ var _ = Describe("Podman run", func() {
Expect(session.ExitCode()).To(Equal(0))
Expect(session.OutputToString()).To(ContainSubstring("container_t"))
})
+
+ It("podman test --ipc=net", func() {
+ session := podmanTest.Podman([]string{"run", "--net=host", ALPINE, "cat", "/proc/self/attr/current"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(session.OutputToString()).To(ContainSubstring("container_t"))
+ })
+
+ It("podman test --ipc=net", func() {
+ session := podmanTest.Podman([]string{"run", "--net=host", ALPINE, "cat", "/proc/self/attr/current"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(session.OutputToString()).To(ContainSubstring("container_t"))
+ })
+
+ It("podman test --runtime=/PATHTO/kata-runtime", func() {
+ runtime := podmanTest.OCIRuntime
+ podmanTest.OCIRuntime = filepath.Join(podmanTest.TempDir, "kata-runtime")
+ err := os.Symlink("/bin/true", podmanTest.OCIRuntime)
+ Expect(err).To(BeNil())
+ if IsRemote() {
+ podmanTest.StopRemoteService()
+ podmanTest.StartRemoteService()
+ }
+ session := podmanTest.Podman([]string{"create", ALPINE})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ cid := session.OutputToString()
+ session = podmanTest.Podman([]string{"inspect", "--format", "{{ .ProcessLabel }}", cid})
+ session.WaitWithDefaultTimeout()
+ Expect(session.OutputToString()).To(ContainSubstring("container_kvm_t"))
+
+ podmanTest.OCIRuntime = runtime
+ if IsRemote() {
+ podmanTest.StopRemoteService()
+ podmanTest.StartRemoteService()
+ }
+ })
+
+ It("podman test init labels", func() {
+ session := podmanTest.Podman([]string{"create", ubi_init, "/sbin/init"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ cid := session.OutputToString()
+ session = podmanTest.Podman([]string{"inspect", "--format", "{{ .ProcessLabel }}", cid})
+ session.WaitWithDefaultTimeout()
+ Expect(session.OutputToString()).To(ContainSubstring("container_init_t"))
+ })
})
diff --git a/test/e2e/run_test.go b/test/e2e/run_test.go
index f0ba9d1d9..490d05699 100644
--- a/test/e2e/run_test.go
+++ b/test/e2e/run_test.go
@@ -304,6 +304,42 @@ var _ = Describe("Podman run", func() {
})
+ It("podman run security-opt unmask on /sys/fs/cgroup", func() {
+
+ SkipIfCgroupV1("podman umask on /sys/fs/cgroup will fail with cgroups V1")
+ SkipIfRootless("/sys/fs/cgroup rw access is needed")
+ rwOnCGroups := "/sys/fs/cgroup cgroup2 rw"
+ session := podmanTest.Podman([]string{"run", "--security-opt", "unmask=ALL", "--security-opt", "mask=/sys/fs/cgroup", ALPINE, "cat", "/proc/mounts"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(session.OutputToString()).To(ContainSubstring(rwOnCGroups))
+
+ session = podmanTest.Podman([]string{"run", "--security-opt", "unmask=/sys/fs/cgroup", ALPINE, "cat", "/proc/mounts"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(session.OutputToString()).To(ContainSubstring(rwOnCGroups))
+
+ session = podmanTest.Podman([]string{"run", "--security-opt", "unmask=/sys/fs/cgroup///", ALPINE, "cat", "/proc/mounts"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(session.OutputToString()).To(ContainSubstring(rwOnCGroups))
+
+ session = podmanTest.Podman([]string{"run", "--security-opt", "unmask=ALL", ALPINE, "cat", "/proc/mounts"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(session.OutputToString()).To(ContainSubstring(rwOnCGroups))
+
+ session = podmanTest.Podman([]string{"run", "--security-opt", "unmask=/sys/fs/cgroup", "--security-opt", "mask=/sys/fs/cgroup", ALPINE, "cat", "/proc/mounts"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(session.OutputToString()).To(ContainSubstring(rwOnCGroups))
+
+ session = podmanTest.Podman([]string{"run", "--security-opt", "unmask=/sys/fs/cgroup", ALPINE, "ls", "/sys/fs/cgroup"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(session.OutputToString()).ToNot(BeEmpty())
+ })
+
It("podman run seccomp test", func() {
session := podmanTest.Podman([]string{"run", "-it", "--security-opt", strings.Join([]string{"seccomp=", forbidGetCWDSeccompProfile()}, ""), ALPINE, "pwd"})
session.WaitWithDefaultTimeout()
diff --git a/test/e2e/stop_test.go b/test/e2e/stop_test.go
index dd264eb0d..d6d58c94c 100644
--- a/test/e2e/stop_test.go
+++ b/test/e2e/stop_test.go
@@ -150,7 +150,7 @@ var _ = Describe("Podman stop", func() {
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
- session = podmanTest.Podman([]string{"stop", "--time", "1", "test4"})
+ session = podmanTest.Podman([]string{"stop", "--time", "0", "test4"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
output := session.OutputToString()
@@ -166,7 +166,7 @@ var _ = Describe("Podman stop", func() {
session := podmanTest.Podman([]string{"run", "-d", "--name", "test5", ALPINE, "sleep", "100"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
- session = podmanTest.Podman([]string{"stop", "--timeout", "1", "test5"})
+ session = podmanTest.Podman([]string{"stop", "--timeout", "0", "test5"})
// Without timeout container stops in 10 seconds
// If not stopped in 5 seconds, then --timeout did not work
session.Wait(5)
diff --git a/test/python/__init__.py b/test/python/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/test/python/__init__.py
diff --git a/test/python/docker/__init__.py b/test/python/docker/__init__.py
index 351834316..59b7987f4 100644
--- a/test/python/docker/__init__.py
+++ b/test/python/docker/__init__.py
@@ -8,7 +8,7 @@ import tempfile
from docker import DockerClient
-from test.python.docker import constant
+from .compat import constant
class Podman(object):
@@ -39,7 +39,9 @@ class Podman(object):
self.cmd.append("--root=" + os.path.join(self.anchor_directory, "crio"))
self.cmd.append("--runroot=" + os.path.join(self.anchor_directory, "crio-run"))
- os.environ["REGISTRIES_CONFIG_PATH"] = os.path.join(self.anchor_directory, "registry.conf")
+ os.environ["CONTAINERS_REGISTRIES_CONF"] = os.path.join(
+ self.anchor_directory, "registry.conf"
+ )
p = configparser.ConfigParser()
p.read_dict(
{
@@ -48,13 +50,17 @@ class Podman(object):
"registries.block": {"registries": "[]"},
}
)
- with open(os.environ["REGISTRIES_CONFIG_PATH"], "w") as w:
+ with open(os.environ["CONTAINERS_REGISTRIES_CONF"], "w") as w:
p.write(w)
- os.environ["CNI_CONFIG_PATH"] = os.path.join(self.anchor_directory, "cni", "net.d")
+ os.environ["CNI_CONFIG_PATH"] = os.path.join(
+ self.anchor_directory, "cni", "net.d"
+ )
os.makedirs(os.environ["CNI_CONFIG_PATH"], exist_ok=True)
self.cmd.append("--cni-config-dir=" + os.environ["CNI_CONFIG_PATH"])
- cni_cfg = os.path.join(os.environ["CNI_CONFIG_PATH"], "87-podman-bridge.conflist")
+ cni_cfg = os.path.join(
+ os.environ["CNI_CONFIG_PATH"], "87-podman-bridge.conflist"
+ )
# json decoded and encoded to ensure legal json
buf = json.loads(
"""
diff --git a/test/python/docker/build_labels/Dockerfile b/test/python/docker/build_labels/Dockerfile
new file mode 100644
index 000000000..f6e07066c
--- /dev/null
+++ b/test/python/docker/build_labels/Dockerfile
@@ -0,0 +1 @@
+FROM quay.io/libpod/alpine:latest
diff --git a/test/python/docker/README.md b/test/python/docker/compat/README.md
index c10fd636d..50796d66b 100644
--- a/test/python/docker/README.md
+++ b/test/python/docker/compat/README.md
@@ -13,26 +13,26 @@ To run the tests locally in your sandbox (Fedora 32,33):
### Run the entire test suite
+All commands are run from the root of the repository.
+
```shell
-# python3 -m unittest discover test/python/docker
+# python3 -m unittest discover -s test/python/docker
```
Passing the -v option to your test script will instruct unittest.main() to enable a higher level of verbosity, and produce detailed output:
```shell
-# python3 -m unittest -v discover test/python/docker
+# python3 -m unittest -v discover -s test/python/docker
```
### Run a specific test class
```shell
-# cd test/python/docker
-# python3 -m unittest -v tests.test_images
+# python3 -m unittest -v test.python.docker.compat.test_images.TestImages
```
### Run a specific test within the test class
```shell
-# cd test/python/docker
-# python3 -m unittest tests.test_images.TestImages.test_import_image
+# python3 -m unittest test.python.docker.compat.test_images.TestImages.test_tag_valid_image
```
diff --git a/test/python/docker/compat/__init__.py b/test/python/docker/compat/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/test/python/docker/compat/__init__.py
diff --git a/test/python/docker/common.py b/test/python/docker/compat/common.py
index 11f512495..bdc67c287 100644
--- a/test/python/docker/common.py
+++ b/test/python/docker/compat/common.py
@@ -1,10 +1,12 @@
from docker import DockerClient
-from test.python.docker import constant
+from test.python.docker.compat import constant
def run_top_container(client: DockerClient):
- c = client.containers.create(constant.ALPINE, command="top", detach=True, tty=True, name="top")
+ c = client.containers.create(
+ constant.ALPINE, command="top", detach=True, tty=True, name="top"
+ )
c.start()
return c.id
diff --git a/test/python/docker/constant.py b/test/python/docker/compat/constant.py
index 892293c97..892293c97 100644
--- a/test/python/docker/constant.py
+++ b/test/python/docker/compat/constant.py
diff --git a/test/python/docker/test_containers.py b/test/python/docker/compat/test_containers.py
index 337cacd5c..be70efa67 100644
--- a/test/python/docker/test_containers.py
+++ b/test/python/docker/compat/test_containers.py
@@ -5,7 +5,8 @@ import unittest
from docker import DockerClient, errors
-from test.python.docker import Podman, common, constant
+from test.python.docker import Podman
+from test.python.docker.compat import common, constant
class TestContainers(unittest.TestCase):
@@ -87,9 +88,11 @@ class TestContainers(unittest.TestCase):
self.assertEqual(len(containers), 2)
def test_start_container_with_random_port_bind(self):
- container = self.client.containers.create(image=constant.ALPINE,
- name="containerWithRandomBind",
- ports={'1234/tcp': None})
+ container = self.client.containers.create(
+ image=constant.ALPINE,
+ name="containerWithRandomBind",
+ ports={"1234/tcp": None},
+ )
containers = self.client.containers.list(all=True)
self.assertTrue(container in containers)
diff --git a/test/python/docker/test_images.py b/test/python/docker/compat/test_images.py
index f2b6a5190..4a90069a9 100644
--- a/test/python/docker/test_images.py
+++ b/test/python/docker/compat/test_images.py
@@ -7,7 +7,8 @@ import unittest
from docker import DockerClient, errors
-from test.python.docker import Podman, common, constant
+from test.python.docker import Podman
+from test.python.docker.compat import common, constant
class TestImages(unittest.TestCase):
@@ -78,7 +79,9 @@ class TestImages(unittest.TestCase):
self.assertEqual(len(self.client.images.list()), 2)
# List images with filter
- self.assertEqual(len(self.client.images.list(filters={"reference": "alpine"})), 1)
+ self.assertEqual(
+ len(self.client.images.list(filters={"reference": "alpine"})), 1
+ )
def test_search_image(self):
"""Search for image"""
@@ -91,7 +94,7 @@ class TestImages(unittest.TestCase):
r = self.client.images.search("bogus/bogus")
except:
return
- self.assertTrue(len(r)==0)
+ self.assertTrue(len(r) == 0)
def test_remove_image(self):
"""Remove image"""
@@ -146,6 +149,14 @@ class TestImages(unittest.TestCase):
self.assertEqual(len(self.client.images.list()), 2)
+ def test_build_image(self):
+ labels = {"apple": "red", "grape": "green"}
+ _ = self.client.images.build(path="test/python/docker/build_labels", labels=labels, tag="labels")
+ image = self.client.images.get("labels")
+ self.assertEqual(image.labels["apple"], labels["apple"])
+ self.assertEqual(image.labels["grape"], labels["grape"])
+
+
if __name__ == "__main__":
# Setup temporary space
diff --git a/test/python/docker/test_system.py b/test/python/docker/compat/test_system.py
index 46b90e5f6..131b18991 100644
--- a/test/python/docker/test_system.py
+++ b/test/python/docker/compat/test_system.py
@@ -5,7 +5,8 @@ import unittest
from docker import DockerClient
-from test.python.docker import Podman, common, constant
+from test.python.docker import Podman, constant
+from test.python.docker.compat import common
class TestSystem(unittest.TestCase):
diff --git a/test/python/requirements.txt b/test/python/requirements.txt
new file mode 100644
index 000000000..ee85bf1d1
--- /dev/null
+++ b/test/python/requirements.txt
@@ -0,0 +1,6 @@
+docker~=4.4.3
+
+requests~=2.20.0
+setuptools~=50.3.2
+python-dateutil~=2.8.1
+PyYAML~=5.4.1
diff --git a/test/system/030-run.bats b/test/system/030-run.bats
index 93449ece9..b2999a9e7 100644
--- a/test/system/030-run.bats
+++ b/test/system/030-run.bats
@@ -139,7 +139,7 @@ echo $rand | 0 | $rand
is "$output" "" "--pull=never [present]: no output"
# Now test with a remote image which we don't have present (the 00 tag)
- NONLOCAL_IMAGE="$PODMAN_TEST_IMAGE_REGISTRY/$PODMAN_TEST_IMAGE_USER/$PODMAN_TEST_IMAGE_NAME:00000000"
+ NONLOCAL_IMAGE="$PODMAN_NONLOCAL_IMAGE_FQN"
run_podman 125 run --pull=never $NONLOCAL_IMAGE true
is "$output" "Error: unable to find a name and tag match for $NONLOCAL_IMAGE in repotags: no such image" "--pull=never [with image not present]: error"
@@ -175,7 +175,7 @@ echo $rand | 0 | $rand
# 'run --rmi' deletes the image in the end unless it's used by another container
@test "podman run --rmi" {
# Name of a nonlocal image. It should be pulled in by the first 'run'
- NONLOCAL_IMAGE="$PODMAN_TEST_IMAGE_REGISTRY/$PODMAN_TEST_IMAGE_USER/$PODMAN_TEST_IMAGE_NAME:00000000"
+ NONLOCAL_IMAGE="$PODMAN_NONLOCAL_IMAGE_FQN"
run_podman 1 image exists $NONLOCAL_IMAGE
# Run a container, without --rm; this should block subsequent --rmi
diff --git a/test/system/050-stop.bats b/test/system/050-stop.bats
index 7d9f1fcb3..0652a97e4 100644
--- a/test/system/050-stop.bats
+++ b/test/system/050-stop.bats
@@ -66,7 +66,7 @@ load helpers
name=thiscontainerdoesnotexist
run_podman 125 stop $name
is "$output" \
- "Error: no container with name or ID $name found: no such container" \
+ "Error: no container with name or ID \"$name\" found: no such container" \
"podman stop nonexistent container"
run_podman stop --ignore $name
diff --git a/test/system/065-cp.bats b/test/system/065-cp.bats
index 312106b36..73e807843 100644
--- a/test/system/065-cp.bats
+++ b/test/system/065-cp.bats
@@ -15,6 +15,7 @@ load helpers
random-1-$(random_string 15)
random-2-$(random_string 20)
)
+
echo "${randomcontent[0]}" > $srcdir/hostfile0
echo "${randomcontent[1]}" > $srcdir/hostfile1
echo "${randomcontent[2]}" > $srcdir/hostfile2
@@ -24,6 +25,10 @@ load helpers
run_podman run -d --name cpcontainer --workdir=/srv $IMAGE sleep infinity
run_podman exec cpcontainer mkdir /srv/subdir
+ # Commit the image for testing non-running containers
+ run_podman commit -q cpcontainer
+ cpimage="$output"
+
# format is: <id> | <destination arg to cp> | <full dest path> | <test name>
# where:
# id is 0-2, one of the random strings/files
@@ -44,8 +49,7 @@ load helpers
0 | subdir | /srv/subdir/hostfile0 | copy to workdir/subdir
"
- # Copy one of the files into container, exec+cat, confirm the file
- # is there and matches what we expect
+ # RUNNING container
while read id dest dest_fullname description; do
run_podman cp $srcdir/hostfile$id cpcontainer:$dest
run_podman exec cpcontainer cat $dest_fullname
@@ -67,6 +71,61 @@ load helpers
is "$output" 'Error: "/IdoNotExist/" could not be found on container cpcontainer: No such file or directory' \
"copy into nonexistent path in container"
+ run_podman kill cpcontainer
+ run_podman rm -f cpcontainer
+
+ # CREATED container
+ while read id dest dest_fullname description; do
+ run_podman create --name cpcontainer --workdir=/srv $cpimage sleep infinity
+ run_podman cp $srcdir/hostfile$id cpcontainer:$dest
+ run_podman start cpcontainer
+ run_podman exec cpcontainer cat $dest_fullname
+ is "$output" "${randomcontent[$id]}" "$description (cp -> ctr:$dest)"
+ run_podman kill cpcontainer
+ run_podman rm -f cpcontainer
+ done < <(parse_table "$tests")
+
+ run_podman rmi -f $cpimage
+}
+
+
+@test "podman cp file from host to container tmpfs mount" {
+ srcdir=$PODMAN_TMPDIR/cp-test-file-host-to-ctr
+ mkdir -p $srcdir
+ content=tmpfile-content$(random_string 20)
+ echo $content > $srcdir/file
+
+ # RUNNING container
+ run_podman run -d --mount type=tmpfs,dst=/tmp --name cpcontainer $IMAGE sleep infinity
+ run_podman cp $srcdir/file cpcontainer:/tmp
+ run_podman exec cpcontainer cat /tmp/file
+ is "$output" "${content}" "cp to running container's tmpfs"
+ run_podman kill cpcontainer
+ run_podman rm -f cpcontainer
+
+ # CREATED container (with copy up)
+ run_podman create --mount type=tmpfs,dst=/tmp --name cpcontainer $IMAGE sleep infinity
+ run_podman cp $srcdir/file cpcontainer:/tmp
+ run_podman start cpcontainer
+ run_podman exec cpcontainer cat /tmp/file
+ is "$output" "${content}" "cp to created container's tmpfs"
+ run_podman kill cpcontainer
+ run_podman rm -f cpcontainer
+}
+
+
+@test "podman cp file from host to container and check ownership" {
+ srcdir=$PODMAN_TMPDIR/cp-test-file-host-to-ctr
+ mkdir -p $srcdir
+ content=cp-user-test-$(random_string 10)
+ echo "content" > $srcdir/hostfile
+ userid=$(id -u)
+
+ run_podman run --user=$userid --userns=keep-id -d --name cpcontainer $IMAGE sleep infinity
+ run_podman cp $srcdir/hostfile cpcontainer:/tmp/hostfile
+ run_podman exec cpcontainer stat -c "%u" /tmp/hostfile
+ is "$output" "$userid" "copied file is chowned to the container user"
+ run_podman kill cpcontainer
run_podman rm -f cpcontainer
}
@@ -87,6 +146,10 @@ load helpers
run_podman exec cpcontainer sh -c "echo ${randomcontent[1]} > /srv/containerfile1"
run_podman exec cpcontainer sh -c "mkdir /srv/subdir; echo ${randomcontent[2]} > /srv/subdir/containerfile2"
+ # Commit the image for testing non-running containers
+ run_podman commit -q cpcontainer
+ cpimage="$output"
+
# format is: <id> | <source arg to cp> | <destination arg (appended to $srcdir) to cp> | <full dest path (appended to $srcdir)> | <test name>
tests="
0 | /tmp/containerfile | | /containerfile | copy to srcdir/
@@ -98,109 +161,214 @@ load helpers
2 | subdir/containerfile2 | / | /containerfile2 | copy from workdir/subdir (rel path) to srcdir
"
- # Copy one of the files to the host, cat, confirm the file
- # is there and matches what we expect
+ # RUNNING container
while read id src dest dest_fullname description; do
# dest may be "''" for empty table cells
if [[ $dest == "''" ]];then
unset dest
fi
run_podman cp cpcontainer:$src "$srcdir$dest"
- run cat $srcdir$dest_fullname
- is "$output" "${randomcontent[$id]}" "$description (cp ctr:$src to \$srcdir$dest)"
- rm $srcdir/$dest_fullname
+ is "$(< $srcdir$dest_fullname)" "${randomcontent[$id]}" "$description (cp ctr:$src to \$srcdir$dest)"
+ rm $srcdir$dest_fullname
done < <(parse_table "$tests")
+ run_podman kill cpcontainer
+ run_podman rm -f cpcontainer
+ # Created container
+ run_podman create --name cpcontainer --workdir=/srv $cpimage
+ while read id src dest dest_fullname description; do
+ # dest may be "''" for empty table cells
+ if [[ $dest == "''" ]];then
+ unset dest
+ fi
+ run_podman cp cpcontainer:$src "$srcdir$dest"
+ is "$(< $srcdir$dest_fullname)" "${randomcontent[$id]}" "$description (cp ctr:$src to \$srcdir$dest)"
+ rm $srcdir$dest_fullname
+ done < <(parse_table "$tests")
run_podman rm -f cpcontainer
+
+ run_podman rmi -f $cpimage
}
@test "podman cp dir from host to container" {
- dirname=dir-test
- srcdir=$PODMAN_TMPDIR/$dirname
- mkdir -p $srcdir
+ srcdir=$PODMAN_TMPDIR
+ mkdir -p $srcdir/dir/sub
local -a randomcontent=(
random-0-$(random_string 10)
random-1-$(random_string 15)
)
- echo "${randomcontent[0]}" > $srcdir/hostfile0
- echo "${randomcontent[1]}" > $srcdir/hostfile1
+ echo "${randomcontent[0]}" > $srcdir/dir/sub/hostfile0
+ echo "${randomcontent[1]}" > $srcdir/dir/sub/hostfile1
# "." and "dir/." will copy the contents, so make sure that a dir ending
# with dot is treated correctly.
- mkdir -p $srcdir.
- cp $srcdir/* $srcdir./
+ mkdir -p $srcdir/dir.
+ cp -r $srcdir/dir/* $srcdir/dir.
run_podman run -d --name cpcontainer --workdir=/srv $IMAGE sleep infinity
run_podman exec cpcontainer mkdir /srv/subdir
+ # Commit the image for testing non-running containers
+ run_podman commit -q cpcontainer
+ cpimage="$output"
+
# format is: <source arg to cp (appended to srcdir)> | <destination arg to cp> | <full dest path> | <test name>
tests="
- | / | /dir-test | copy to root
- . | / | /dir-test. | copy dotdir to root
- / | /tmp | /tmp/dir-test | copy to tmp
- /. | /usr/ | /usr/ | copy contents of dir to usr/
- | . | /srv/dir-test | copy to workdir (rel path)
- | subdir/. | /srv/subdir/dir-test | copy to workdir subdir (rel path)
+ dir | / | /dir/sub | copy dir to root
+ dir. | / | /dir./sub | copy dir. to root
+ dir/ | /tmp | /tmp/dir/sub | copy dir/ to tmp
+ dir/. | /usr/ | /usr/sub | copy dir/. usr/
+ dir/sub | . | /srv/sub | copy dir/sub to workdir (rel path)
+ dir/sub/. | subdir/. | /srv/subdir | copy dir/sub/. to workdir subdir (rel path)
+ dir | /newdir1 | /newdir1/sub | copy dir to newdir1
+ dir/ | /newdir2 | /newdir2/sub | copy dir/ to newdir2
+ dir/. | /newdir3 | /newdir3/sub | copy dir/. to newdir3
"
+ # RUNNING container
while read src dest dest_fullname description; do
# src may be "''" for empty table cells
if [[ $src == "''" ]];then
unset src
fi
- run_podman cp $srcdir$src cpcontainer:$dest
- run_podman exec cpcontainer ls $dest_fullname
- run_podman exec cpcontainer cat $dest_fullname/hostfile0
- is "$output" "${randomcontent[0]}" "$description (cp -> ctr:$dest)"
- run_podman exec cpcontainer cat $dest_fullname/hostfile1
- is "$output" "${randomcontent[1]}" "$description (cp -> ctr:$dest)"
+ run_podman cp $srcdir/$src cpcontainer:$dest
+ run_podman exec cpcontainer cat $dest_fullname/hostfile0 $dest_fullname/hostfile1
+ is "${lines[0]}" "${randomcontent[0]}" "$description (cp -> ctr:$dest)"
+ is "${lines[1]}" "${randomcontent[1]}" "$description (cp -> ctr:$dest)"
done < <(parse_table "$tests")
-
+ run_podman kill cpcontainer
run_podman rm -f cpcontainer
+
+ # CREATED container
+ while read src dest dest_fullname description; do
+ # src may be "''" for empty table cells
+ if [[ $src == "''" ]];then
+ unset src
+ fi
+ run_podman create --name cpcontainer --workdir=/srv $cpimage sleep infinity
+ run_podman cp $srcdir/$src cpcontainer:$dest
+ run_podman start cpcontainer
+ run_podman exec cpcontainer cat $dest_fullname/hostfile0 $dest_fullname/hostfile1
+ is "${lines[0]}" "${randomcontent[0]}" "$description (cp -> ctr:$dest)"
+ is "${lines[1]}" "${randomcontent[1]}" "$description (cp -> ctr:$dest)"
+ run_podman kill cpcontainer
+ run_podman rm -f cpcontainer
+ done < <(parse_table "$tests")
+
+ run_podman rmi -f $cpimage
}
@test "podman cp dir from container to host" {
- srcdir=$PODMAN_TMPDIR/dir-test
- mkdir -p $srcdir
+ destdir=$PODMAN_TMPDIR/cp-test-dir-ctr-to-host
+ mkdir -p $destdir
+ # Create 2 files with random content in the container.
+ local -a randomcontent=(
+ random-0-$(random_string 10)
+ random-1-$(random_string 15)
+ )
run_podman run -d --name cpcontainer --workdir=/srv $IMAGE sleep infinity
- run_podman exec cpcontainer sh -c 'mkdir /srv/subdir; echo "This first file is on the container" > /srv/subdir/containerfile1'
- run_podman exec cpcontainer sh -c 'echo "This second file is on the container as well" > /srv/subdir/containerfile2'
+ run_podman exec cpcontainer sh -c "mkdir /srv/subdir; echo ${randomcontent[0]} > /srv/subdir/containerfile0"
+ run_podman exec cpcontainer sh -c "echo ${randomcontent[1]} > /srv/subdir/containerfile1"
# "." and "dir/." will copy the contents, so make sure that a dir ending
# with dot is treated correctly.
run_podman exec cpcontainer sh -c 'mkdir /tmp/subdir.; cp /srv/subdir/* /tmp/subdir./'
- run_podman cp cpcontainer:/srv $srcdir
- run cat $srcdir/srv/subdir/containerfile1
- is "$output" "This first file is on the container"
- run cat $srcdir/srv/subdir/containerfile2
- is "$output" "This second file is on the container as well"
- rm -rf $srcdir/srv/subdir
-
- run_podman cp cpcontainer:/srv/. $srcdir
- run ls $srcdir/subdir
- run cat $srcdir/subdir/containerfile1
- is "$output" "This first file is on the container"
- run cat $srcdir/subdir/containerfile2
- is "$output" "This second file is on the container as well"
- rm -rf $srcdir/subdir
-
- run_podman cp cpcontainer:/srv/subdir/. $srcdir
- run cat $srcdir/containerfile1
- is "$output" "This first file is on the container"
- run cat $srcdir/containerfile2
- is "$output" "This second file is on the container as well"
- rm -rf $srcdir/subdir
-
- run_podman cp cpcontainer:/tmp/subdir. $srcdir
- run cat $srcdir/subdir./containerfile1
- is "$output" "This first file is on the container"
- run cat $srcdir/subdir./containerfile2
- is "$output" "This second file is on the container as well"
- rm -rf $srcdir/subdir.
+ # Commit the image for testing non-running containers
+ run_podman commit -q cpcontainer
+ cpimage="$output"
+
+ # format is: <source arg to cp (appended to /srv)> | <dest> | <full dest path> | <test name>
+ tests="
+/srv | | /srv/subdir | copy /srv
+/srv | /newdir | /newdir/subdir | copy /srv to /newdir
+/srv/ | | /srv/subdir | copy /srv/
+/srv/. | | /subdir | copy /srv/.
+/srv/. | /newdir | /newdir/subdir | copy /srv/. to /newdir
+/srv/subdir/. | | | copy /srv/subdir/.
+/tmp/subdir. | | /subdir. | copy /tmp/subdir.
+"
+
+ # RUNNING container
+ while read src dest dest_fullname description; do
+ if [[ $src == "''" ]];then
+ unset src
+ fi
+ if [[ $dest == "''" ]];then
+ unset dest
+ fi
+ if [[ $dest_fullname == "''" ]];then
+ unset dest_fullname
+ fi
+ run_podman cp cpcontainer:$src $destdir$dest
+ is "$(< $destdir$dest_fullname/containerfile0)" "${randomcontent[0]}" "$description"
+ is "$(< $destdir$dest_fullname/containerfile1)" "${randomcontent[1]}" "$description"
+ rm -rf $destdir/*
+ done < <(parse_table "$tests")
+ run_podman kill cpcontainer
+ run_podman rm -f cpcontainer
+
+ # CREATED container
+ run_podman create --name cpcontainer --workdir=/srv $cpimage
+ while read src dest dest_fullname description; do
+ if [[ $src == "''" ]];then
+ unset src
+ fi
+ if [[ $dest == "''" ]];then
+ unset dest
+ fi
+ if [[ $dest_fullname == "''" ]];then
+ unset dest_fullname
+ fi
+ run_podman cp cpcontainer:$src $destdir$dest
+ is "$(< $destdir$dest_fullname/containerfile0)" "${randomcontent[0]}" "$description"
+ is "$(< $destdir$dest_fullname/containerfile1)" "${randomcontent[1]}" "$description"
+ rm -rf $destdir/*
+ done < <(parse_table "$tests")
+ run_podman rm -f cpcontainer
+
+ run_podman rmi -f $cpimage
+}
+
+
+@test "podman cp symlinked directory from container" {
+ destdir=$PODMAN_TMPDIR/cp-weird-symlink
+ mkdir -p $destdir
+
+ # Create 3 files with random content in the container.
+ local -a randomcontent=(
+ random-0-$(random_string 10)
+ random-1-$(random_string 15)
+ )
+ run_podman run -d --name cpcontainer $IMAGE sleep infinity
+ run_podman exec cpcontainer sh -c "echo ${randomcontent[0]} > /tmp/containerfile0"
+ run_podman exec cpcontainer sh -c "echo ${randomcontent[1]} > /tmp/containerfile1"
+ run_podman exec cpcontainer sh -c "mkdir /tmp/sub && cd /tmp/sub && ln -s .. weirdlink"
+
+ # Commit the image for testing non-running containers
+ run_podman commit -q cpcontainer
+ cpimage="$output"
+
+ # RUNNING container
+ # NOTE: /dest does not exist yet but is expected to be created during copy
+ run_podman cp cpcontainer:/tmp/sub/weirdlink $destdir/dest
+ run cat $destdir/dest/containerfile0 $destdir/dest/containerfile1
+ is "${lines[0]}" "${randomcontent[0]}" "eval symlink - running container"
+ is "${lines[1]}" "${randomcontent[1]}" "eval symlink - running container"
+
+ run_podman kill cpcontainer
+ run_podman rm -f cpcontainer
+ run rm -rf $srcdir/dest
+
+ # CREATED container
+ run_podman create --name cpcontainer $cpimage
+ run_podman cp cpcontainer:/tmp/sub/weirdlink $destdir/dest
+ run cat $destdir/dest/containerfile0 $destdir/dest/containerfile1
+ is "${lines[0]}" "${randomcontent[0]}" "eval symlink - created container"
+ is "${lines[1]}" "${randomcontent[1]}" "eval symlink - created container"
run_podman rm -f cpcontainer
}
@@ -228,9 +396,7 @@ load helpers
run_podman create --name cpcontainer -v $volume1:/tmp/volume -v $volume2:/tmp/volume/sub-volume $IMAGE
run_podman cp $srcdir/hostfile cpcontainer:/tmp/volume/sub-volume
-
- run cat $volume2_mount/hostfile
- is "$output" "This file should be in volume2"
+ is "$(< $volume2_mount/hostfile)" "This file should be in volume2"
# Volume 1 must be empty.
run ls $volume1_mount
@@ -254,9 +420,7 @@ load helpers
run_podman create --name cpcontainer -v $volume:/tmp/volume -v $mountdir:/tmp/volume/mount $IMAGE
run_podman cp $srcdir/hostfile cpcontainer:/tmp/volume/mount
-
- run cat $mountdir/hostfile
- is "$output" "This file should be in the mount"
+ is "$(< $mountdir/hostfile)" "This file should be in the mount"
run_podman rm -f cpcontainer
run_podman volume rm $volume
@@ -284,7 +448,7 @@ load helpers
# cp no longer supports wildcarding
run_podman 125 cp 'cpcontainer:/tmp/*' $dstdir
- run_podman rm cpcontainer
+ run_podman rm -f cpcontainer
}
@@ -308,7 +472,7 @@ load helpers
# make sure there are no files in dstdir
is "$(/bin/ls -1 $dstdir)" "" "incorrectly copied symlink from host"
- run_podman rm cpcontainer
+ run_podman rm -f cpcontainer
}
@@ -332,7 +496,7 @@ load helpers
# make sure there are no files in dstdir
is "$(/bin/ls -1 $dstdir)" "" "incorrectly copied symlink from host"
- run_podman rm cpcontainer
+ run_podman rm -f cpcontainer
}
@@ -352,7 +516,7 @@ load helpers
# dstdir must be empty
is "$(/bin/ls -1 $dstdir)" "" "incorrectly copied symlink from host"
- run_podman rm cpcontainer
+ run_podman rm -f cpcontainer
}
@@ -409,6 +573,7 @@ load helpers
run_podman exec cpcontainer cat /tmp/d3/x
is "$output" "$rand_content3" "cp creates file named x"
+ run_podman kill cpcontainer
run_podman rm -f cpcontainer
}
@@ -446,6 +611,7 @@ load helpers
run_podman exec cpcontainer cat $graphroot/$rand_filename
is "$output" "$rand_content" "Contents of file copied into container"
+ run_podman kill cpcontainer
run_podman rm -f cpcontainer
}
@@ -494,6 +660,7 @@ load helpers
run_podman 125 cp - cpcontainer:/tmp/IdoNotExist < $tar_file
is "$output" 'Error: destination must be a directory when copying from stdin'
+ run_podman kill cpcontainer
run_podman rm -f cpcontainer
}
@@ -527,8 +694,7 @@ load helpers
fi
tar xvf $srcdir/stdout.tar -C $srcdir
- run cat $srcdir/file.txt
- is "$output" "$rand_content"
+ is "$(< $srcdir/file.txt)" "$rand_content"
run 1 ls $srcdir/empty.txt
rm -f $srcdir/*
@@ -539,11 +705,10 @@ load helpers
fi
tar xvf $srcdir/stdout.tar -C $srcdir
- run cat $srcdir/tmp/file.txt
- is "$output" "$rand_content"
- run cat $srcdir/tmp/empty.txt
- is "$output" ""
+ is "$(< $srcdir/tmp/file.txt)" "$rand_content"
+ is "$(< $srcdir/tmp/empty.txt)" ""
+ run_podman kill cpcontainer
run_podman rm -f cpcontainer
}
diff --git a/test/system/070-build.bats b/test/system/070-build.bats
index 89f3f5c64..d413b0c10 100644
--- a/test/system/070-build.bats
+++ b/test/system/070-build.bats
@@ -168,6 +168,9 @@ EOF
CAT_SECRET="cat /run/secrets/$secret_filename"
fi
+ # For --dns-search: a domain that is unlikely to exist
+ local nosuchdomain=nx$(random_string 10).net
+
# Command to run on container startup with no args
cat >$tmpdir/mycmd <<EOF
#!/bin/sh
@@ -188,11 +191,17 @@ EOF
https_proxy=https-proxy-in-env-file
EOF
+ # Build args: one explicit (foo=bar), one implicit (foo)
+ local arg_implicit_value=implicit_$(random_string 15)
+ local arg_explicit_value=explicit_$(random_string 15)
+
# NOTE: it's important to not create the workdir.
# Podman will make sure to create a missing workdir
# if needed. See #9040.
cat >$tmpdir/Containerfile <<EOF
FROM $IMAGE
+ARG arg_explicit
+ARG arg_implicit
LABEL $label_name=$label_value
WORKDIR $workdir
@@ -217,18 +226,47 @@ RUN chown 2:3 /bin/mydefaultcmd
RUN $CAT_SECRET
+RUN echo explicit-build-arg=\$arg_explicit
+RUN echo implicit-build-arg=\$arg_implicit
+
CMD ["/bin/mydefaultcmd","$s_echo"]
+RUN cat /etc/resolv.conf
EOF
+ # The goal is to test that a missing value will be inherited from
+ # environment - but that can't work with remote, so for simplicity
+ # just make it explicit in that case too.
+ local build_arg_implicit="--build-arg arg_implicit"
+ if is_remote; then
+ build_arg_implicit+="=$arg_implicit_value"
+ fi
+
# cd to the dir, so we test relative paths (important for podman-remote)
cd $PODMAN_TMPDIR
+ export arg_explicit="THIS SHOULD BE OVERRIDDEN BY COMMAND LINE!"
+ export arg_implicit=${arg_implicit_value}
run_podman ${MOUNTS_CONF} build \
+ --build-arg arg_explicit=${arg_explicit_value} \
+ $build_arg_implicit \
+ --dns-search $nosuchdomain \
-t build_test -f build-test/Containerfile build-test
local iid="${lines[-1]}"
+ if [[ $output =~ missing.*build.argument ]]; then
+ die "podman did not see the given --build-arg(s)"
+ fi
+
# Make sure 'podman build' had the secret mounted
is "$output" ".*$secret_contents.*" "podman build has /run/secrets mounted"
+ # --build-arg should be set, both via 'foo=bar' and via just 'foo' ($foo)
+ is "$output" ".*explicit-build-arg=${arg_explicit_value}" \
+ "--build-arg arg_explicit=explicit-value works"
+ is "$output" ".*implicit-build-arg=${arg_implicit_value}" \
+ "--build-arg arg_implicit works (inheriting from environment)"
+ is "$output" ".*search $nosuchdomain" \
+ "--dns-search added to /etc/resolv.conf"
+
if is_remote; then
ENVHOST=""
else
@@ -305,8 +343,10 @@ Cmd[0] | /bin/mydefaultcmd
Cmd[1] | $s_echo
WorkingDir | $workdir
Labels.$label_name | $label_value
-Labels.\"io.buildah.version\" | $buildah_version
"
+ # FIXME: 2021-02-24: Fixed in buildah #3036; reenable this once podman
+ # vendors in a newer buildah!
+ # Labels.\"io.buildah.version\" | $buildah_version
parse_table "$tests" | while read field expect; do
actual=$(jq -r ".[0].Config.$field" <<<"$output")
@@ -360,6 +400,82 @@ Labels.\"io.buildah.version\" | $buildah_version
run_podman rmi -f build_test
}
+@test "podman build - COPY with ignore" {
+ local tmpdir=$PODMAN_TMPDIR/build-test-$(random_string 10)
+ mkdir -p $tmpdir/subdir
+
+ # Create a bunch of files. Declare this as an array to avoid duplication
+ # because we iterate over that list below, checking for each file.
+ # A leading "-" indicates that the file SHOULD NOT exist in the built image
+ local -a files=(
+ -test1 -test1.txt
+ test2 test2.txt
+ subdir/sub1 subdir/sub1.txt
+ -subdir/sub2 -subdir/sub2.txt
+ this-file-does-not-match-anything-in-ignore-file
+ comment
+ )
+ for f in ${files[@]}; do
+ # The magic '##-' strips off the '-' prefix
+ echo "$f" > $tmpdir/${f##-}
+ done
+
+ # Directory that doesn't exist in the image; COPY should create it
+ local newdir=/newdir-$(random_string 12)
+ cat >$tmpdir/Containerfile <<EOF
+FROM $IMAGE
+COPY ./ $newdir/
+EOF
+
+ # Run twice: first with a custom --ignorefile, then with a default one.
+ # This ordering is deliberate: if we were to run with .dockerignore
+ # first, and forget to rm it, and then run with --ignorefile, _and_
+ # there was a bug in podman where --ignorefile was a NOP (eg #9570),
+ # the test might pass because of the existence of .dockerfile.
+ for ignorefile in ignoreme-$(random_string 5) .dockerignore; do
+ # Patterns to ignore. Mostly copied from buildah/tests/bud/dockerignore
+ cat >$tmpdir/$ignorefile <<EOF
+# comment
+test*
+!test2*
+subdir
+!*/sub1*
+EOF
+
+ # Build an image. For .dockerignore
+ local -a ignoreflag
+ unset ignoreflag
+ if [[ $ignorefile != ".dockerignore" ]]; then
+ ignoreflag="--ignorefile $tmpdir/$ignorefile"
+ fi
+ run_podman build -t build_test ${ignoreflag} $tmpdir
+
+ # Delete the ignore file! Otherwise, in the next iteration of the loop,
+ # we could end up with an existing .dockerignore that invisibly
+ # takes precedence over --ignorefile
+ rm -f $tmpdir/$ignorefile
+
+ # It would be much more readable, and probably safer, to iterate
+ # over each file, running 'podman run ... ls -l $f'. But each podman run
+ # takes a second or so, and we are mindful of each second.
+ run_podman run --rm build_test find $newdir -type f
+ for f in ${files[@]}; do
+ if [[ $f =~ ^- ]]; then
+ f=${f##-}
+ if [[ $output =~ $f ]]; then
+ die "File '$f' found in image; it should have been ignored via $ignorefile"
+ fi
+ else
+ is "$output" ".*$newdir/$f" \
+ "File '$f' should exist in container (no match in $ignorefile)"
+ fi
+ done
+
+ # Clean up
+ run_podman rmi -f build_test
+ done
+}
+
@test "podman build - stdin test" {
# Random workdir, and random string to verify build output
workdir=/$(random_string 10)
diff --git a/test/system/120-load.bats b/test/system/120-load.bats
index 902cd9f5e..936449bdb 100644
--- a/test/system/120-load.bats
+++ b/test/system/120-load.bats
@@ -26,6 +26,13 @@ verify_iid_and_name() {
is "$new_img_name" "$1" "Name & tag of restored image"
}
+@test "podman load invalid file" {
+ # Regression test for #9672 to make sure invalid input yields errors.
+ invalid=$PODMAN_TMPDIR/invalid
+ echo "I am an invalid file and should cause a podman-load error" > $invalid
+ run_podman 125 load -i $invalid
+}
+
@test "podman save to pipe and load" {
# Generate a random name and tag (must be lower-case)
local random_name=x0$(random_string 12 | tr A-Z a-z)
diff --git a/test/system/410-selinux.bats b/test/system/410-selinux.bats
index 7482d3e55..49743ff33 100644
--- a/test/system/410-selinux.bats
+++ b/test/system/410-selinux.bats
@@ -39,17 +39,17 @@ function check_label() {
}
@test "podman selinux: container with label=disable" {
- skip_if_rootless
-
check_label "--security-opt label=disable" "spc_t"
}
@test "podman selinux: privileged container" {
- skip_if_rootless
-
check_label "--privileged --userns=host" "spc_t"
}
+@test "podman selinux: init container" {
+ check_label "--systemd=always" "container_init_t"
+}
+
@test "podman selinux: pid=host" {
# FIXME FIXME FIXME: Remove these lines once all VMs have >= 2.146.0
# (this is ugly, but better than an unconditional skip)
@@ -74,6 +74,19 @@ function check_label() {
check_label "--security-opt label=level:s0:c1,c2" "container_t" "s0:c1,c2"
}
+@test "podman selinux: inspect kvm labels" {
+ skip_if_no_selinux
+ skip_if_remote "runtime flag is not passed over remote"
+
+ tmpdir=$PODMAN_TMPDIR/kata-test
+ mkdir -p $tmpdir
+ KATA=${tmpdir}/kata-runtime
+ ln -s /bin/true ${KATA}
+ run_podman create --runtime=${KATA} --name myc $IMAGE
+ run_podman inspect --format='{{ .ProcessLabel }}' myc
+ is "$output" ".*container_kvm_t"
+}
+
# pr #6752
@test "podman selinux: inspect multiple labels" {
skip_if_no_selinux
diff --git a/test/system/build-testimage b/test/system/build-testimage
index 53ade57f0..aac08e307 100755
--- a/test/system/build-testimage
+++ b/test/system/build-testimage
@@ -12,6 +12,9 @@
# still need a fedora image for that.
#
+# Buildah binary
+BUILDAH=${BUILDAH:-buildah}
+
# Tag for this new image
YMD=$(date +%Y%m%d)
@@ -58,7 +61,8 @@ chmod 755 pause
# - check for updates @ https://hub.docker.com/_/alpine
# busybox-extras provides httpd needed in 500-networking.bats
cat >Containerfile <<EOF
-FROM docker.io/library/alpine:3.12.0
+ARG ARCH=please-override-arch
+FROM docker.io/\${ARCH}/alpine:3.12.0
RUN apk add busybox-extras
ADD testimage-id pause /home/podman/
LABEL created_by=$create_script
@@ -69,26 +73,44 @@ EOF
# --squash-all : needed by 'tree' test in 070-build.bats
podman rmi -f testimage &> /dev/null || true
-podman build --squash-all -t testimage .
+
+# We need to use buildah because (as of 2021-02-23) only buildah has --manifest
+# and because Dan says arch emulation is not currently working on podman
+# (no further details).
+# Arch emulation on Fedora requires the qemu-user-static package.
+for arch in amd64 ppc64le s390x;do
+ ${BUILDAH} bud \
+ --arch=$arch \
+ --build-arg ARCH=$arch \
+ --manifest=testimage \
+ --squash \
+ .
+done
# Clean up
cd /tmp
rm -rf $tmpdir
-# Tag and push to quay.
-podman tag testimage quay.io/libpod/testimage:$YMD
-podman push quay.io/libpod/testimage:$YMD
+# Tag image and push (all arches) to quay.
+remote_tag=quay.io/libpod/testimage:$YMD
+podman tag testimage ${remote_tag}
+${BUILDAH} manifest push --all ${remote_tag} docker://${remote_tag}
-# Side note: there should always be a testimage tagged ':00000000'
-# (eight zeroes) in the same location; this is used by tests which
-# need to pull a non-locally-cached image. This image will rarely
-# if ever need to change, nor in fact does it even have to be a
-# copy of this testimage since all we use it for is 'true'.
+# Side note: there should always be a testimage tagged ':0000000<X>'
+# (eight digits, zero-padded sequence ID) in the same location; this is
+# used by tests which need to pull a non-locally-cached image. This
+# image will rarely if ever need to change, nor in fact does it even
+# have to be a copy of this testimage since all we use it for is 'true'.
+# However, it does need to be multiarch :-(
#
-# As of 2020-09-02 it is simply busybox, because it is super small:
+# As of 2021-02-24 it is simply busybox, because it is super small,
+# but it's complicated because of multiarch:
#
-# podman pull docker.io/library/busybox:1.32.0
-# podman tag docker.io/library/busybox:1.32.0 \
-# quay.io/libpod/testimage:00000000
-# podman push quay.io/libpod/testimage:00000000
+# img=quay.io/libpod/testimage:00000001
+# buildah manifest create $img
+# for arch in amd64 ppc64le s390x;do
+# buildah pull --arch $arch docker.io/$arch/busybox:1.32.0
+# buildah manifest add $img docker.io/$arch/busybox:1.32.0
+# done
+# buildah manifest push --all $img docker://$img
#
diff --git a/test/system/helpers.bash b/test/system/helpers.bash
index 0572c6866..38e317709 100644
--- a/test/system/helpers.bash
+++ b/test/system/helpers.bash
@@ -7,9 +7,14 @@ PODMAN=${PODMAN:-podman}
PODMAN_TEST_IMAGE_REGISTRY=${PODMAN_TEST_IMAGE_REGISTRY:-"quay.io"}
PODMAN_TEST_IMAGE_USER=${PODMAN_TEST_IMAGE_USER:-"libpod"}
PODMAN_TEST_IMAGE_NAME=${PODMAN_TEST_IMAGE_NAME:-"testimage"}
-PODMAN_TEST_IMAGE_TAG=${PODMAN_TEST_IMAGE_TAG:-"20200929"}
+PODMAN_TEST_IMAGE_TAG=${PODMAN_TEST_IMAGE_TAG:-"20210223"}
PODMAN_TEST_IMAGE_FQN="$PODMAN_TEST_IMAGE_REGISTRY/$PODMAN_TEST_IMAGE_USER/$PODMAN_TEST_IMAGE_NAME:$PODMAN_TEST_IMAGE_TAG"
+# Remote image that we *DO NOT* fetch or keep by default; used for testing pull
+# This changed from 0 to 1 on 2021-02-24 due to multiarch considerations; it
+# should change only very rarely.
+PODMAN_NONLOCAL_IMAGE_FQN="$PODMAN_TEST_IMAGE_REGISTRY/$PODMAN_TEST_IMAGE_USER/$PODMAN_TEST_IMAGE_NAME:00000001"
+
# Because who wants to spell that out each time?
IMAGE=$PODMAN_TEST_IMAGE_FQN
@@ -149,7 +154,7 @@ function run_podman() {
echo "$_LOG_PROMPT $PODMAN $*"
# BATS hangs if a subprocess remains and keeps FD 3 open; this happens
# if podman crashes unexpectedly without cleaning up subprocesses.
- run timeout --foreground -v --kill=10 $PODMAN_TIMEOUT $PODMAN "$@" 3>/dev/null
+ run timeout --foreground -v --kill=10 $PODMAN_TIMEOUT $PODMAN $_PODMAN_TEST_OPTS "$@" 3>/dev/null
# without "quotes", multiple lines are glommed together into one
if [ -n "$output" ]; then
echo "$output"
diff --git a/test/upgrade/README.md b/test/upgrade/README.md
new file mode 100644
index 000000000..2979a66d7
--- /dev/null
+++ b/test/upgrade/README.md
@@ -0,0 +1,87 @@
+Background
+==========
+
+For years we've been needing a way to test podman upgrades; this
+became much more critical on December 7, 2020, when Matt disclosed
+a bug he had found over the weekend
+([#8613](https://github.com/containers/podman/issues/8613))
+in which reuse of a previously-defined field name would
+result in fatal JSON decode failures if current-podman were
+to try reading containers created with podman <= 1.8 (FIXME: confirm)
+
+Upgrade testing is a daunting problem; but in the December 12
+Cabal meeting Dan suggested using podman-in-podman. This PR
+is the result of fleshing out that idea.
+
+Overview
+========
+
+The BATS script in this directory fetches and runs an old-podman
+container image from quay.io/podman, uses it to create and run
+a number of containers, then uses new-podman to interact with
+those containers.
+
+As of 2021-02-23 the available old-podman versions are:
+
+```console
+$ ./bin/podman search --list-tags quay.io/podman/stable | awk '$2 ~ /^v/ { print $2}' | sort | column -c 75
+v1.4.2 v1.5.0 v1.6 v1.9.0 v2.0.2 v2.1.1
+v1.4.4 v1.5.1 v1.6.2 v1.9.1 v2.0.6 v2.2.1
+```
+
+Test invocation is:
+```console
+$ sudo env PODMAN=bin/podman PODMAN_UPGRADE_FROM=v1.9.0 PODMAN_UPGRADE_TEST_DEBUG= bats test/upgrade
+```
+(Path assumes you're cd'ed to top-level podman repo). `PODMAN_UPGRADE_FROM`
+can be any of the versions above. `PODMAN_UPGRADE_TEST_DEBUG` is empty
+here, but listed so you can set it `=1` and leave the podman_parent
+container running. Interacting with this container is left as an
+exercise for the reader.
+
+The script will pull the given podman image, invoke it with a scratch
+root directory, and have it do a small set of podman stuff (pull an
+image, create/run some containers). This podman process stays running
+because if it exits, it kills containers running inside the container.
+
+We then invoke the current (host-installed) podman, using the same
+scratch root directory, and perform operations on those images and
+containers. Most of those operations are done in individual @tests.
+
+The goal is to have this upgrade test run in CI, iterating over a
+loop of known old versions. This list would need to be hand-maintained
+and updated on new releases. There might also need to be extra
+configuration defined, such as per-version commands (see below).
+
+Findings
+========
+
+Well, first, `v1.6.2` won't work on default f32/f33: the image
+does not include `crun`, so it can't work at all:
+
+ ERRO[0000] oci runtime "runc" does not support CGroups V2: use system migrate to mitigate
+
+I realize that it's kind of stupid not to test 1.6, since that's
+precisely the test that would've caught #8613 early, but I just
+don't think it's worth the hassle of setting up cgroupsv1 VMs.
+
+For posterity, in an earlier incantation of this script I tried
+booting f32 into cgroupsv1 and ran into the following warnings
+when running new-podman on old-containers:
+```
+ERRO[0000] error joining network namespace for container 322b66d94640e31b2e6921565445cf0dade4ec13cabc16ee5f29292bdc038341: error retrieving network namespace at /var/run/netns/cni-577e2289-2c05-2e28-3c3d-002a5596e7da: failed to Statfs "/var/run/netns/cni-577e2289
+```
+
+Where To Go From Here
+=====================
+
+* Tests are still (2021-02-23) incomplete, with several failing outright.
+ See FIXMEs in the code.
+
+* Figuring out how/if to run rootless. I think this is possible, perhaps
+ even necessary, but will be tricky to get right because of home-directory
+ mounting.
+
+* Figuring out how/if to run variations with different config files
+ (e.g. running OLD-PODMAN that creates a user libpod.conf, tweaking
+ that in the test, then running NEW-PODMAN upgrate tests)
diff --git a/test/upgrade/helpers.bash b/test/upgrade/helpers.bash
new file mode 100644
index 000000000..41d9279e6
--- /dev/null
+++ b/test/upgrade/helpers.bash
@@ -0,0 +1,11 @@
+# -*- bash -*-
+
+load "../system/helpers"
+
+setup() {
+ :
+}
+
+teardown() {
+ :
+}
diff --git a/test/upgrade/test-upgrade.bats b/test/upgrade/test-upgrade.bats
new file mode 100644
index 000000000..dd827b398
--- /dev/null
+++ b/test/upgrade/test-upgrade.bats
@@ -0,0 +1,313 @@
+# -*- bats -*-
+
+load helpers
+
+# Create a var-lib-containers dir for this podman. We need to bind-mount
+# this into the container, and use --root and --runroot and --tmpdir
+# options both in the container podman and out here: that's the only
+# way to share image and container storage.
+if [ -z "${PODMAN_UPGRADE_WORKDIR}" ]; then
+ # Much as I'd love a descriptive name like "podman-upgrade-tests.XXXXX",
+ # keep it short ("pu") because of the 100-character path length limit
+ # for UNIX sockets (needed by conmon)
+ export PODMAN_UPGRADE_WORKDIR=$(mktemp -d --tmpdir=${BATS_TMPDIR:-${TMPDIR:-/tmp}} pu.XXXXXX)
+
+ touch $PODMAN_UPGRADE_WORKDIR/status
+fi
+
+# Generate a set of random strings used for content verification
+if [ -z "${RANDOM_STRING_1}" ]; then
+ export RANDOM_STRING_1=$(random_string 15)
+ export LABEL_CREATED=$(random_string 16)
+ export LABEL_FAILED=$(random_string 17)
+ export LABEL_RUNNING=$(random_string 18)
+
+ # FIXME: randomize this
+ HOST_PORT=34567
+fi
+
+# Version string of the podman we're actually testing, e.g. '3.0.0-dev-d1a26013'
+PODMAN_VERSION=$($PODMAN version |awk '/^Version:/ { V=$2 } /^Git Commit:/ { G=$3 } END { print V "-" substr(G,0,8) }')
+
+setup() {
+ skip_if_rootless
+
+ # The podman-in-podman image (old podman)
+ if [[ -z "$PODMAN_UPGRADE_FROM" ]]; then
+ echo "# \$PODMAN_UPGRADE_FROM is undefined (should be e.g. v1.9.0)" >&3
+ false
+ fi
+
+ if [ "$(< $PODMAN_UPGRADE_WORKDIR/status)" = "failed" ]; then
+ # FIXME: exit instead?
+ echo "*** setup failed - no point in running tests"
+ false
+ fi
+
+ export _PODMAN_TEST_OPTS="--root=$PODMAN_UPGRADE_WORKDIR/root --runroot=$PODMAN_UPGRADE_WORKDIR/runroot --tmpdir=$PODMAN_UPGRADE_WORKDIR/tmp"
+}
+
+###############################################################################
+# BEGIN setup
+
+@test "initial setup: start $PODMAN_UPGRADE_FROM containers" {
+ echo failed >| $PODMAN_UPGRADE_WORKDIR/status
+
+ OLD_PODMAN=quay.io/podman/stable:$PODMAN_UPGRADE_FROM
+ $PODMAN pull $OLD_PODMAN
+
+ # Shortcut name, because we're referencing it a lot
+ pmroot=$PODMAN_UPGRADE_WORKDIR
+
+ # WWW content to share
+ mkdir -p $pmroot/var/www
+ echo $RANDOM_STRING_1 >$pmroot/var/www/index.txt
+
+ # podman tmpdir
+ mkdir -p $pmroot/tmp
+
+ #
+ # Script to run >>OLD<< podman commands.
+ #
+ # These commands will be run inside a podman container. The "podman"
+ # command in this script will be the desired old-podman version.
+ #
+ pmscript=$pmroot/setup
+ cat >| $pmscript <<EOF
+#!/bin/bash
+
+# cgroup-manager=systemd does not work inside a container
+opts="--cgroup-manager=cgroupfs --events-backend=file $_PODMAN_TEST_OPTS"
+
+set -ex
+
+# Try try again, because network flakiness makes this a point of failure
+podman \$opts pull $IMAGE \
+ || (sleep 10; podman \$opts pull $IMAGE) \
+ || (sleep 30; podman \$opts pull $IMAGE)
+
+
+podman \$opts create --name mycreatedcontainer --label mylabel=$LABEL_CREATED \
+ $IMAGE false
+
+podman \$opts run --name mydonecontainer $IMAGE echo ++$RANDOM_STRING_1++
+
+podman \$opts run --name myfailedcontainer --label mylabel=$LABEL_FAILED \
+ $IMAGE sh -c 'exit 17' || true
+
+# FIXME: add "-p $HOST_PORT:80"
+# ...I tried and tried, and could not get this to work. I could never
+# connect to the port from the host, nor even from the podman_parent
+# container; I could never see the port listed in 'ps' nor 'inspect'.
+# And, finally, I ended up in a state where the container wouldn't
+# even start, and via complicated 'podman logs' found out:
+# httpd: bind: Address in use
+# So I just give up for now.
+#
+podman \$opts run -d --name myrunningcontainer --label mylabel=$LABEL_RUNNING \
+ -v $pmroot/var/www:/var/www \
+ -w /var/www \
+ $IMAGE /bin/busybox-extras httpd -f -p 80
+
+echo READY
+while :;do
+ if [ -e /stop ]; then
+ echo STOPPING
+ podman \$opts stop -t 0 myrunningcontainer || true
+ podman \$opts rm -f myrunningcontainer || true
+ exit 0
+ fi
+ sleep 0.5
+done
+EOF
+ chmod 555 $pmscript
+
+ # Clean up vestiges of previous run
+ $PODMAN rm -f podman_parent || true
+
+ # Not entirely a NOP! This is just so we get /run/crun created on a CI VM
+ $PODMAN run --rm $OLD_PODMAN true
+
+ #
+ # Use new-podman to run the above script under old-podman.
+ #
+ # DO NOT USE run_podman HERE! That would use $_PODMAN_TEST_OPTS
+ # and would write into our shared test dir, which would then
+ # pollute it for use by old-podman. We must keep that pristine
+ # so old-podman is the first to write to it.
+ #
+ $PODMAN run -d --name podman_parent --pid=host \
+ --privileged \
+ --net=host \
+ --cgroupns=host \
+ -v /dev/fuse:/dev/fuse \
+ -v /run/crun:/run/crun \
+ -v $pmroot:$pmroot \
+ $OLD_PODMAN $pmroot/setup
+
+ _PODMAN_TEST_OPTS= wait_for_ready podman_parent
+
+ echo OK >| $PODMAN_UPGRADE_WORKDIR/status
+}
+
+# END setup
+###############################################################################
+# BEGIN actual tests
+
+# This is a NOP; used only so the version string will show up in logs
+@test "upgrade: $PODMAN_UPGRADE_FROM -> $PODMAN_VERSION" {
+ :
+}
+
+@test "images" {
+ run_podman images -a --format '{{.Names}}'
+ is "$output" "\[$IMAGE\]" "podman images"
+}
+
+@test "ps : one container running" {
+ run_podman ps --format '{{.Image}}--{{.Names}}'
+ is "$output" "$IMAGE--myrunningcontainer" "ps: one container running"
+}
+
+@test "ps -a : shows all containers" {
+ # IMPORTANT: we can't use --sort=created, because that requires #8427
+ # on the *creating* podman end.
+ run_podman ps -a \
+ --format '{{.Names}}--{{.Status}}--{{.Ports}}--{{.Labels.mylabel}}' \
+ --sort=names
+ is "${lines[0]}" "mycreatedcontainer--Created----$LABEL_CREATED" "created"
+ is "${lines[1]}" "mydonecontainer--Exited (0).*----<no value>" "done"
+ is "${lines[2]}" "myfailedcontainer--Exited (17) .*----$LABEL_FAILED" "fail"
+ is "${lines[3]}" "myrunningcontainer--Up .*----$LABEL_RUNNING" "running"
+
+ # For debugging: dump containers and IDs
+ if [[ -n "$PODMAN_UPGRADE_TEST_DEBUG" ]]; then
+ run_podman ps -a
+ for l in "${lines[@]}"; do
+ echo "# $l" >&3
+ done
+ fi
+}
+
+
+@test "inspect - all container status" {
+ tests="
+running | running | 0
+created | configured | 0
+done | exited | 0
+failed | exited | 17
+"
+ while read cname state exitstatus; do
+ run_podman inspect --format '{{.State.Status}}--{{.State.ExitCode}}' my${cname}container
+ is "$output" "$state--$exitstatus" "status of my${cname}container"
+ done < <(parse_table "$tests")
+}
+
+@test "logs" {
+ run_podman logs mydonecontainer
+ is "$output" "++$RANDOM_STRING_1++" "podman logs on stopped container"
+
+# run_podman logs myrunningcontainer
+# is "$output" "READY" "podman logs on running container"
+}
+
+@test "exec" {
+ run_podman exec myrunningcontainer cat /var/www/index.txt
+ is "$output" "$RANDOM_STRING_1" "exec into myrunningcontainer"
+}
+
+@test "load" {
+ # FIXME, is this really necessary?
+ skip "TBI. Not sure if there's any point to this."
+}
+
+@test "mount" {
+ skip "TBI"
+}
+
+@test "pods" {
+ skip "TBI"
+}
+
+# FIXME: commit? kill? network? pause? restart? top? volumes? What else?
+
+
+@test "start" {
+ skip "FIXME: this leaves a mount behind: root/overlay/sha/merged"
+ run_podman --cgroup-manager=cgroupfs start -a mydonecontainer
+ is "$output" "++$RANDOM_STRING_1++" "start on already-run container"
+}
+
+@test "rm a stopped container" {
+ # FIXME FIXME FIXME!
+ #
+ # I have no idea what's going on here. For most of my testing in this
+ # section, the code here was simply 'podman rm myfailedcontainer', and
+ # it would succeed, but then way down, in 'cleanup' below, the 'rm -f'
+ # step would fail:
+ #
+ # # podman rm -f podman_parent
+ # error freeing lock for container <sha>: no such file or directory
+ # ...where <sha> is the ID of the podman_parent container.
+ #
+ # I started playing with this section, by adding 'rm mydonecontainer',
+ # and now it always fails, the same way, but with the container we're
+ # removing right here:
+ #
+ # error freeing lock for container <sha>: no such file or directory
+ # ...where <sha> is the ID of mydonecontainer.
+ #
+ # I don't know. I give up for now, and am skip'ing the whole thing.
+ # If you want to play with it, try commenting out the 'myfailed' lines,
+ # or just the 'mydone' ones, or, I don't know.
+ skip "FIXME: error freeing lock for container <sha>: no such file or dir"
+
+ # For debugging, so we can see what 'error freeing lock' refers to
+ run_podman ps -a
+
+ run_podman rm myfailedcontainer
+ is "$output" "[0-9a-f]\\{64\\}" "podman rm myfailedcontainer"
+
+ run_podman rm mydonecontainer
+ is "$output" "[0-9a-f]\\{64\\}" "podman rm mydonecontainer"
+}
+
+
+@test "stop and rm" {
+ # About a ten-second pause, then:
+ # Error: timed out waiting for file /tmp/pu.nf747w/tmp/exits/<sha>: internal libpod error
+ # It doesn't seem to be a socket-length issue: the paths are ~80-88 chars.
+ # Leaving podman_parent running, and exec'ing into it, it doesn't look
+ # like the file is being written to the wrong place.
+ skip "FIXME: this doesn't work: timed out waiting for file tmpdir/exits/sha"
+ run_podman stop myrunningcontainer
+ run_podman rm myrunningcontainer
+}
+
+@test "clean up parent" {
+ if [[ -n "$PODMAN_UPGRADE_TEST_DEBUG" ]]; then
+ skip "workdir is $PODMAN_UPGRADE_WORKDIR"
+ fi
+
+ # We're done with shared environment. By clearing this, we can now
+ # use run_podman for actions on the podman_parent container
+ unset _PODMAN_TEST_OPTS
+
+ # (Useful for debugging the 'rm -f' step below, which, when it fails, only
+ # gives a container ID. This 'ps' confirms that the CID is podman_parent)
+ run_podman ps -a
+
+ # Stop the container gracefully
+ run_podman exec podman_parent touch /stop
+ run_podman wait podman_parent
+
+ run_podman logs podman_parent
+ run_podman rm -f podman_parent
+
+ # FIXME: why does this remain mounted?
+ umount $PODMAN_UPGRADE_WORKDIR/root/overlay || true
+
+ rm -rf $PODMAN_UPGRADE_WORKDIR
+}
+
+# FIXME: now clean up
diff --git a/vendor/github.com/opentracing/opentracing-go/LICENSE b/vendor/github.com/checkpoint-restore/checkpointctl/LICENSE
index f0027349e..8dada3eda 100644
--- a/vendor/github.com/opentracing/opentracing-go/LICENSE
+++ b/vendor/github.com/checkpoint-restore/checkpointctl/LICENSE
@@ -186,7 +186,7 @@
same "printed page" as the copyright notice for easier
identification within third-party archives.
- Copyright 2016 The OpenTracing Authors
+ Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/checkpoint-restore/checkpointctl/lib/metadata.go b/vendor/github.com/checkpoint-restore/checkpointctl/lib/metadata.go
new file mode 100644
index 000000000..1c74903ad
--- /dev/null
+++ b/vendor/github.com/checkpoint-restore/checkpointctl/lib/metadata.go
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: Apache-2.0
+
+package metadata
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "os"
+ "path/filepath"
+ "time"
+
+ cnitypes "github.com/containernetworking/cni/pkg/types/current"
+ spec "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/pkg/errors"
+)
+
+type CheckpointedPod struct {
+ PodUID string `json:"io.kubernetes.pod.uid,omitempty"`
+ ID string `json:"SandboxID,omitempty"`
+ Name string `json:"io.kubernetes.pod.name,omitempty"`
+ TerminationGracePeriod int64 `json:"io.kubernetes.pod.terminationGracePeriod,omitempty"`
+ Namespace string `json:"io.kubernetes.pod.namespace,omitempty"`
+ ConfigSource string `json:"kubernetes.io/config.source,omitempty"`
+ ConfigSeen string `json:"kubernetes.io/config.seen,omitempty"`
+ Manager string `json:"io.container.manager,omitempty"`
+ Containers []CheckpointedContainer `json:"Containers"`
+ HostIP string `json:"hostIP,omitempty"`
+ PodIP string `json:"podIP,omitempty"`
+ PodIPs []string `json:"podIPs,omitempty"`
+}
+
+type CheckpointedContainer struct {
+ Name string `json:"io.kubernetes.container.name,omitempty"`
+ ID string `json:"id,omitempty"`
+ TerminationMessagePath string `json:"io.kubernetes.container.terminationMessagePath,omitempty"`
+ TerminationMessagePolicy string `json:"io.kubernetes.container.terminationMessagePolicy,omitempty"`
+ RestartCounter int32 `json:"io.kubernetes.container.restartCount,omitempty"`
+ TerminationMessagePathUID string `json:"terminationMessagePathUID,omitempty"`
+ Image string `json:"Image"`
+}
+
+type CheckpointMetadata struct {
+ Version int `json:"version"`
+ CheckpointedPods []CheckpointedPod
+}
+
+const (
+ // kubelet archive
+ CheckpointedPodsFile = "checkpointed.pods"
+ // container archive
+ ConfigDumpFile = "config.dump"
+ SpecDumpFile = "spec.dump"
+ NetworkStatusFile = "network.status"
+ CheckpointDirectory = "checkpoint"
+ RootFsDiffTar = "rootfs-diff.tar"
+ DeletedFilesFile = "deleted.files"
+ // pod archive
+ PodOptionsFile = "pod.options"
+ PodDumpFile = "pod.dump"
+)
+
+type CheckpointType int
+
+const (
+ // The checkpoint archive contains a kubelet checkpoint
+ // One or multiple pods and kubelet metadata (checkpointed.pods)
+ Kubelet CheckpointType = iota
+ // The checkpoint archive contains one pod including one or multiple containers
+ Pod
+ // The checkpoint archive contains a single container
+ Container
+ Unknown
+)
+
+// This is a reduced copy of what Podman uses to store checkpoint metadata
+type ContainerConfig struct {
+ ID string `json:"id"`
+ Name string `json:"name"`
+ RootfsImageName string `json:"rootfsImageName,omitempty"`
+ OCIRuntime string `json:"runtime,omitempty"`
+ CreatedTime time.Time `json:"createdTime"`
+}
+
+// This is metadata stored inside of a Pod checkpoint archive
+type CheckpointedPodOptions struct {
+ Version int `json:"version"`
+ Containers []string `json:"containers,omitempty"`
+ MountLabel string `json:"mountLabel"`
+ ProcessLabel string `json:"processLabel"`
+}
+
+func DetectCheckpointArchiveType(checkpointDirectory string) (CheckpointType, error) {
+ _, err := os.Stat(filepath.Join(checkpointDirectory, CheckpointedPodsFile))
+ if err != nil && !os.IsNotExist(err) {
+ return Unknown, errors.Wrapf(err, "Failed to access %q\n", CheckpointedPodsFile)
+ }
+ if os.IsNotExist(err) {
+ return Container, nil
+ }
+
+ return Kubelet, nil
+}
+
+func ReadContainerCheckpointSpecDump(checkpointDirectory string) (*spec.Spec, string, error) {
+ var specDump spec.Spec
+ specDumpFile, err := ReadJSONFile(&specDump, checkpointDirectory, SpecDumpFile)
+
+ return &specDump, specDumpFile, err
+}
+
+func ReadContainerCheckpointConfigDump(checkpointDirectory string) (*ContainerConfig, string, error) {
+ var containerConfig ContainerConfig
+ configDumpFile, err := ReadJSONFile(&containerConfig, checkpointDirectory, ConfigDumpFile)
+
+ return &containerConfig, configDumpFile, err
+}
+
+func ReadContainerCheckpointDeletedFiles(checkpointDirectory string) ([]string, string, error) {
+ var deletedFiles []string
+ deletedFilesFile, err := ReadJSONFile(&deletedFiles, checkpointDirectory, DeletedFilesFile)
+
+ return deletedFiles, deletedFilesFile, err
+}
+
+func ReadContainerCheckpointNetworkStatus(checkpointDirectory string) ([]*cnitypes.Result, string, error) {
+ var networkStatus []*cnitypes.Result
+ networkStatusFile, err := ReadJSONFile(&networkStatus, checkpointDirectory, NetworkStatusFile)
+
+ return networkStatus, networkStatusFile, err
+}
+
+func ReadKubeletCheckpoints(checkpointsDirectory string) (*CheckpointMetadata, string, error) {
+ var checkpointMetadata CheckpointMetadata
+ checkpointMetadataPath, err := ReadJSONFile(&checkpointMetadata, checkpointsDirectory, CheckpointedPodsFile)
+
+ return &checkpointMetadata, checkpointMetadataPath, err
+}
+
+func GetIPFromNetworkStatus(networkStatus []*cnitypes.Result) net.IP {
+ if len(networkStatus) == 0 {
+ return nil
+ }
+ // Take the first IP address
+ if len(networkStatus[0].IPs) == 0 {
+ return nil
+ }
+ IP := networkStatus[0].IPs[0].Address.IP
+
+ return IP
+}
+
+func GetMACFromNetworkStatus(networkStatus []*cnitypes.Result) net.HardwareAddr {
+ if len(networkStatus) == 0 {
+ return nil
+ }
+ // Take the first device with a defined sandbox
+ if len(networkStatus[0].Interfaces) == 0 {
+ return nil
+ }
+ var MAC net.HardwareAddr
+ MAC = nil
+ for _, n := range networkStatus[0].Interfaces {
+ if n.Sandbox != "" {
+ MAC, _ = net.ParseMAC(n.Mac)
+
+ break
+ }
+ }
+
+ return MAC
+}
+
+// WriteJSONFile marshalls and writes the given data to a JSON file
+func WriteJSONFile(v interface{}, dir, file string) (string, error) {
+ fileJSON, err := json.MarshalIndent(v, "", " ")
+ if err != nil {
+ return "", errors.Wrapf(err, "Error marshalling JSON")
+ }
+ file = filepath.Join(dir, file)
+ if err := ioutil.WriteFile(file, fileJSON, 0o600); err != nil {
+ return "", errors.Wrapf(err, "Error writing to %q", file)
+ }
+
+ return file, nil
+}
+
+func ReadJSONFile(v interface{}, dir, file string) (string, error) {
+ file = filepath.Join(dir, file)
+ content, err := ioutil.ReadFile(file)
+ if err != nil {
+ return "", errors.Wrapf(err, "failed to read %s", file)
+ }
+ if err = json.Unmarshal(content, v); err != nil {
+ return "", errors.Wrapf(err, "failed to unmarshal %s", file)
+ }
+
+ return file, nil
+}
+
+func WriteKubeletCheckpointsMetadata(checkpointMetadata *CheckpointMetadata, dir string) error {
+ _, err := WriteJSONFile(checkpointMetadata, dir, CheckpointedPodsFile)
+
+ return err
+}
+
+func ByteToString(b int64) string {
+ const unit = 1024
+ if b < unit {
+ return fmt.Sprintf("%d B", b)
+ }
+ div, exp := int64(unit), 0
+ for n := b / unit; n >= unit; n /= unit {
+ div *= unit
+ exp++
+ }
+
+ return fmt.Sprintf("%.1f %ciB",
+ float64(b)/float64(div), "KMGTPE"[exp])
+}
diff --git a/vendor/github.com/containers/buildah/add.go b/vendor/github.com/containers/buildah/add.go
index 0903fc7db..cd466ccb3 100644
--- a/vendor/github.com/containers/buildah/add.go
+++ b/vendor/github.com/containers/buildah/add.go
@@ -324,13 +324,33 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
return errors.Wrapf(err, "error processing excludes list %v", options.Excludes)
}
- // Copy each source in turn.
+ // Make sure that, if it's a symlink, we'll chroot to the target of the link;
+ // knowing that target requires that we resolve it within the chroot.
+ evalOptions := copier.EvalOptions{}
+ evaluated, err := copier.Eval(mountPoint, extractDirectory, evalOptions)
+ if err != nil {
+ return errors.Wrapf(err, "error checking on destination %v", extractDirectory)
+ }
+ extractDirectory = evaluated
+
+ // Set up ID maps.
var srcUIDMap, srcGIDMap []idtools.IDMap
if options.IDMappingOptions != nil {
srcUIDMap, srcGIDMap = convertRuntimeIDMaps(options.IDMappingOptions.UIDMap, options.IDMappingOptions.GIDMap)
}
destUIDMap, destGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap)
+ // Create the target directory if it doesn't exist yet.
+ mkdirOptions := copier.MkdirOptions{
+ UIDMap: destUIDMap,
+ GIDMap: destGIDMap,
+ ChownNew: chownDirs,
+ }
+ if err := copier.Mkdir(mountPoint, extractDirectory, mkdirOptions); err != nil {
+ return errors.Wrapf(err, "error ensuring target directory exists")
+ }
+
+ // Copy each source in turn.
for _, src := range sources {
var multiErr *multierror.Error
var getErr, closeErr, renameErr, putErr error
@@ -363,7 +383,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
ChmodFiles: nil,
IgnoreDevices: rsystem.RunningInUserNS(),
}
- putErr = copier.Put(mountPoint, extractDirectory, putOptions, io.TeeReader(pipeReader, hasher))
+ putErr = copier.Put(extractDirectory, extractDirectory, putOptions, io.TeeReader(pipeReader, hasher))
}
hashCloser.Close()
pipeReader.Close()
@@ -498,7 +518,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
ChmodFiles: nil,
IgnoreDevices: rsystem.RunningInUserNS(),
}
- putErr = copier.Put(mountPoint, extractDirectory, putOptions, io.TeeReader(pipeReader, hasher))
+ putErr = copier.Put(extractDirectory, extractDirectory, putOptions, io.TeeReader(pipeReader, hasher))
}
hashCloser.Close()
pipeReader.Close()
diff --git a/vendor/github.com/containers/buildah/buildah.go b/vendor/github.com/containers/buildah/buildah.go
index dd43ea99a..427950c5c 100644
--- a/vendor/github.com/containers/buildah/buildah.go
+++ b/vendor/github.com/containers/buildah/buildah.go
@@ -28,7 +28,7 @@ const (
Package = "buildah"
// Version for the Package. Bump version in contrib/rpm/buildah.spec
// too.
- Version = "1.19.6"
+ Version = "1.19.8"
// The value we use to identify what type of information, currently a
// serialized Builder structure, we are using as per-container state.
// This should only be changed when we make incompatible changes to
diff --git a/vendor/github.com/containers/buildah/copier/copier.go b/vendor/github.com/containers/buildah/copier/copier.go
index 63cdb1974..52d8133c7 100644
--- a/vendor/github.com/containers/buildah/copier/copier.go
+++ b/vendor/github.com/containers/buildah/copier/copier.go
@@ -70,6 +70,7 @@ func isArchivePath(path string) bool {
type requestType string
const (
+ requestEval requestType = "EVAL"
requestStat requestType = "STAT"
requestGet requestType = "GET"
requestPut requestType = "PUT"
@@ -95,6 +96,8 @@ type request struct {
func (req *request) Excludes() []string {
switch req.Request {
+ case requestEval:
+ return nil
case requestStat:
return req.StatOptions.Excludes
case requestGet:
@@ -112,6 +115,8 @@ func (req *request) Excludes() []string {
func (req *request) UIDMap() []idtools.IDMap {
switch req.Request {
+ case requestEval:
+ return nil
case requestStat:
return nil
case requestGet:
@@ -129,6 +134,8 @@ func (req *request) UIDMap() []idtools.IDMap {
func (req *request) GIDMap() []idtools.IDMap {
switch req.Request {
+ case requestEval:
+ return nil
case requestStat:
return nil
case requestGet:
@@ -148,6 +155,7 @@ func (req *request) GIDMap() []idtools.IDMap {
type response struct {
Error string `json:",omitempty"`
Stat statResponse
+ Eval evalResponse
Get getResponse
Put putResponse
Mkdir mkdirResponse
@@ -158,6 +166,11 @@ type statResponse struct {
Globs []*StatsForGlob
}
+// evalResponse encodes a response for a single Eval request.
+type evalResponse struct {
+ Evaluated string
+}
+
// StatsForGlob encode results for a single glob pattern passed to Stat().
type StatsForGlob struct {
Error string `json:",omitempty"` // error if the Glob pattern was malformed
@@ -192,6 +205,33 @@ type putResponse struct {
type mkdirResponse struct {
}
+// EvalOptions controls parts of Eval()'s behavior.
+type EvalOptions struct {
+}
+
+// Eval evaluates the directory's path, including any intermediate symbolic
+// links.
+// If root is specified and the current OS supports it, and the calling process
+// has the necessary privileges, evaluation is performed in a chrooted context.
+// If the directory is specified as an absolute path, it should either be the
+// root directory or a subdirectory of the root directory. Otherwise, the
+// directory is treated as a path relative to the root directory.
+func Eval(root string, directory string, options EvalOptions) (string, error) {
+ req := request{
+ Request: requestEval,
+ Root: root,
+ Directory: directory,
+ }
+ resp, err := copier(nil, nil, req)
+ if err != nil {
+ return "", err
+ }
+ if resp.Error != "" {
+ return "", errors.New(resp.Error)
+ }
+ return resp.Eval.Evaluated, nil
+}
+
// StatOptions controls parts of Stat()'s behavior.
type StatOptions struct {
CheckForArchives bool // check for and populate the IsArchive bit in returned values
@@ -243,6 +283,8 @@ type GetOptions struct {
StripXattrs bool // don't record extended attributes of items being copied. no effect on archives being extracted
KeepDirectoryNames bool // don't strip the top directory's basename from the paths of items in subdirectories
Rename map[string]string // rename items with the specified names, or under the specified names
+ NoDerefSymlinks bool // don't follow symlinks when globs match them
+ IgnoreUnreadable bool // ignore errors reading items, instead of returning an error
}
// Get produces an archive containing items that match the specified glob
@@ -557,6 +599,9 @@ func copierWithSubprocess(bulkReader io.Reader, bulkWriter io.Writer, req reques
return killAndReturn(err, "error encoding request for copier subprocess")
}
if err = decoder.Decode(&resp); err != nil {
+ if errors.Is(err, io.EOF) && errorBuffer.Len() > 0 {
+ return killAndReturn(errors.New(errorBuffer.String()), "error in copier subprocess")
+ }
return killAndReturn(err, "error decoding response from copier subprocess")
}
if err = encoder.Encode(&request{Request: requestQuit}); err != nil {
@@ -667,7 +712,7 @@ func copierMain() {
var err error
chrooted, err = chroot(req.Root)
if err != nil {
- fmt.Fprintf(os.Stderr, "error changing to intended-new-root directory %q: %v", req.Root, err)
+ fmt.Fprintf(os.Stderr, "%v", err)
os.Exit(1)
}
}
@@ -762,6 +807,9 @@ func copierHandler(bulkReader io.Reader, bulkWriter io.Writer, req request) (*re
switch req.Request {
default:
return nil, nil, errors.Errorf("not an implemented request type: %q", req.Request)
+ case requestEval:
+ resp := copierHandlerEval(req)
+ return resp, nil, nil
case requestStat:
resp := copierHandlerStat(req, pm)
return resp, nil, nil
@@ -870,6 +918,17 @@ func resolvePath(root, path string, pm *fileutils.PatternMatcher) (string, error
return workingPath, nil
}
+func copierHandlerEval(req request) *response {
+ errorResponse := func(fmtspec string, args ...interface{}) *response {
+ return &response{Error: fmt.Sprintf(fmtspec, args...), Eval: evalResponse{}}
+ }
+ resolvedTarget, err := resolvePath(req.Root, req.Directory, nil)
+ if err != nil {
+ return errorResponse("copier: eval: error resolving %q: %v", req.Directory, err)
+ }
+ return &response{Eval: evalResponse{Evaluated: filepath.Join(req.rootPrefix, resolvedTarget)}}
+}
+
func copierHandlerStat(req request, pm *fileutils.PatternMatcher) *response {
errorResponse := func(fmtspec string, args ...interface{}) *response {
return &response{Error: fmt.Sprintf(fmtspec, args...), Stat: statResponse{}}
@@ -977,6 +1036,14 @@ func copierHandlerStat(req request, pm *fileutils.PatternMatcher) *response {
return &response{Stat: statResponse{Globs: stats}}
}
+func errorIsPermission(err error) bool {
+ err = errors.Cause(err)
+ if err == nil {
+ return false
+ }
+ return os.IsPermission(err) || strings.Contains(err.Error(), "permission denied")
+}
+
func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMatcher, idMappings *idtools.IDMappings) (*response, func() error, error) {
statRequest := req
statRequest.Request = requestStat
@@ -1024,7 +1091,7 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa
// chase links. if we hit a dead end, we should just fail
followedLinks := 0
const maxFollowedLinks = 16
- for info.Mode()&os.ModeType == os.ModeSymlink && followedLinks < maxFollowedLinks {
+ for !req.GetOptions.NoDerefSymlinks && info.Mode()&os.ModeType == os.ModeSymlink && followedLinks < maxFollowedLinks {
path, err := os.Readlink(item)
if err != nil {
continue
@@ -1053,6 +1120,12 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa
options.ExpandArchives = false
walkfn := func(path string, info os.FileInfo, err error) error {
if err != nil {
+ if options.IgnoreUnreadable && errorIsPermission(err) {
+ if info != nil && info.IsDir() {
+ return filepath.SkipDir
+ }
+ return nil
+ }
return errors.Wrapf(err, "copier: get: error reading %q", path)
}
// compute the path of this item
@@ -1092,7 +1165,13 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa
symlinkTarget = target
}
// add the item to the outgoing tar stream
- return copierHandlerGetOne(info, symlinkTarget, rel, path, options, tw, hardlinkChecker, idMappings)
+ if err := copierHandlerGetOne(info, symlinkTarget, rel, path, options, tw, hardlinkChecker, idMappings); err != nil {
+ if req.GetOptions.IgnoreUnreadable && errorIsPermission(err) {
+ return nil
+ }
+ return err
+ }
+ return nil
}
// walk the directory tree, checking/adding items individually
if err := filepath.Walk(item, walkfn); err != nil {
@@ -1112,6 +1191,9 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa
// dereferenced, be sure to use the name of the
// link.
if err := copierHandlerGetOne(info, "", filepath.Base(queue[i]), item, req.GetOptions, tw, hardlinkChecker, idMappings); err != nil {
+ if req.GetOptions.IgnoreUnreadable && errorIsPermission(err) {
+ continue
+ }
return errors.Wrapf(err, "copier: get: %q", queue[i])
}
itemsCopied++
@@ -1139,7 +1221,8 @@ func handleRename(rename map[string]string, name string) string {
return path.Join(mappedPrefix, remainder)
}
if prefix[len(prefix)-1] == '/' {
- if mappedPrefix, ok := rename[prefix[:len(prefix)-1]]; ok {
+ prefix = prefix[:len(prefix)-1]
+ if mappedPrefix, ok := rename[prefix]; ok {
return path.Join(mappedPrefix, remainder)
}
}
@@ -1191,7 +1274,7 @@ func copierHandlerGetOne(srcfi os.FileInfo, symlinkTarget, name, contentPath str
if options.ExpandArchives && isArchivePath(contentPath) {
f, err := os.Open(contentPath)
if err != nil {
- return errors.Wrapf(err, "error opening %s", contentPath)
+ return errors.Wrapf(err, "error opening file for reading archive contents")
}
defer f.Close()
rc, _, err := compression.AutoDecompress(f)
@@ -1262,17 +1345,21 @@ func copierHandlerGetOne(srcfi os.FileInfo, symlinkTarget, name, contentPath str
hdr.Mode = int64(*options.ChmodFiles)
}
}
+ var f *os.File
+ if hdr.Typeflag == tar.TypeReg {
+ // open the file first so that we don't write a header for it if we can't actually read it
+ f, err = os.Open(contentPath)
+ if err != nil {
+ return errors.Wrapf(err, "error opening file for adding its contents to archive")
+ }
+ defer f.Close()
+ }
// output the header
if err = tw.WriteHeader(hdr); err != nil {
return errors.Wrapf(err, "error writing header for %s (%s)", contentPath, hdr.Name)
}
if hdr.Typeflag == tar.TypeReg {
// output the content
- f, err := os.Open(contentPath)
- if err != nil {
- return errors.Wrapf(err, "error opening %s", contentPath)
- }
- defer f.Close()
n, err := io.Copy(tw, f)
if err != nil {
return errors.Wrapf(err, "error copying %s", contentPath)
diff --git a/vendor/github.com/containers/buildah/copier/syscall_unix.go b/vendor/github.com/containers/buildah/copier/syscall_unix.go
index 2c2806d0a..aa40f327c 100644
--- a/vendor/github.com/containers/buildah/copier/syscall_unix.go
+++ b/vendor/github.com/containers/buildah/copier/syscall_unix.go
@@ -3,10 +3,10 @@
package copier
import (
- "fmt"
"os"
"time"
+ "github.com/pkg/errors"
"golang.org/x/sys/unix"
)
@@ -15,13 +15,13 @@ var canChroot = os.Getuid() == 0
func chroot(root string) (bool, error) {
if canChroot {
if err := os.Chdir(root); err != nil {
- return false, fmt.Errorf("error changing to intended-new-root directory %q: %v", root, err)
+ return false, errors.Wrapf(err, "error changing to intended-new-root directory %q", root)
}
if err := unix.Chroot(root); err != nil {
- return false, fmt.Errorf("error chrooting to directory %q: %v", root, err)
+ return false, errors.Wrapf(err, "error chrooting to directory %q", root)
}
if err := os.Chdir(string(os.PathSeparator)); err != nil {
- return false, fmt.Errorf("error changing to just-became-root directory %q: %v", root, err)
+ return false, errors.Wrapf(err, "error changing to just-became-root directory %q", root)
}
return true, nil
}
diff --git a/vendor/github.com/containers/buildah/pkg/overlay/overlay.go b/vendor/github.com/containers/buildah/pkg/overlay/overlay.go
index a3e5866ee..462561983 100644
--- a/vendor/github.com/containers/buildah/pkg/overlay/overlay.go
+++ b/vendor/github.com/containers/buildah/pkg/overlay/overlay.go
@@ -77,13 +77,11 @@ func mountHelper(contentDir, source, dest string, _, _ int, graphOptions []strin
// Read-write overlay mounts want a lower, upper and a work layer.
workDir := filepath.Join(contentDir, "work")
upperDir := filepath.Join(contentDir, "upper")
- st, err := os.Stat(dest)
- if err == nil {
- if err := os.Chmod(upperDir, st.Mode()); err != nil {
- return mount, err
- }
+ st, err := os.Stat(source)
+ if err != nil {
+ return mount, err
}
- if !os.IsNotExist(err) {
+ if err := os.Chmod(upperDir, st.Mode()); err != nil {
return mount, err
}
overlayOptions = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s,private", source, upperDir, workDir)
diff --git a/vendor/github.com/coreos/go-iptables/iptables/iptables.go b/vendor/github.com/coreos/go-iptables/iptables/iptables.go
index 1074275b0..8d6f68906 100644
--- a/vendor/github.com/coreos/go-iptables/iptables/iptables.go
+++ b/vendor/github.com/coreos/go-iptables/iptables/iptables.go
@@ -31,7 +31,6 @@ type Error struct {
exec.ExitError
cmd exec.Cmd
msg string
- proto Protocol
exitStatus *int //for overriding
}
@@ -51,9 +50,8 @@ func (e *Error) IsNotExist() bool {
if e.ExitStatus() != 1 {
return false
}
- cmdIptables := getIptablesCommand(e.proto)
- msgNoRuleExist := fmt.Sprintf("%s: Bad rule (does a matching rule exist in that chain?).\n", cmdIptables)
- msgNoChainExist := fmt.Sprintf("%s: No chain/target/match by that name.\n", cmdIptables)
+ msgNoRuleExist := "Bad rule (does a matching rule exist in that chain?).\n"
+ msgNoChainExist := "No chain/target/match by that name.\n"
return strings.Contains(e.msg, msgNoRuleExist) || strings.Contains(e.msg, msgNoChainExist)
}
@@ -75,6 +73,7 @@ type IPTables struct {
v2 int
v3 int
mode string // the underlying iptables operating mode, e.g. nf_tables
+ timeout int // time to wait for the iptables lock, default waits forever
}
// Stat represents a structured statistic entry.
@@ -91,19 +90,42 @@ type Stat struct {
Options string `json:"options"`
}
-// New creates a new IPTables.
-// For backwards compatibility, this always uses IPv4, i.e. "iptables".
-func New() (*IPTables, error) {
- return NewWithProtocol(ProtocolIPv4)
+type option func(*IPTables)
+
+func IPFamily(proto Protocol) option {
+ return func(ipt *IPTables) {
+ ipt.proto = proto
+ }
}
-// New creates a new IPTables for the given proto.
-// The proto will determine which command is used, either "iptables" or "ip6tables".
-func NewWithProtocol(proto Protocol) (*IPTables, error) {
- path, err := exec.LookPath(getIptablesCommand(proto))
+func Timeout(timeout int) option {
+ return func(ipt *IPTables) {
+ ipt.timeout = timeout
+ }
+}
+
+// New creates a new IPTables configured with the options passed as parameter.
+// For backwards compatibility, by default always uses IPv4 and timeout 0.
+// i.e. you can create an IPv6 IPTables using a timeout of 5 seconds passing
+// the IPFamily and Timeout options as follow:
+// ip6t := New(IPFamily(ProtocolIPv6), Timeout(5))
+func New(opts ...option) (*IPTables, error) {
+
+ ipt := &IPTables{
+ proto: ProtocolIPv4,
+ timeout: 0,
+ }
+
+ for _, opt := range opts {
+ opt(ipt)
+ }
+
+ path, err := exec.LookPath(getIptablesCommand(ipt.proto))
if err != nil {
return nil, err
}
+ ipt.path = path
+
vstring, err := getIptablesVersionString(path)
if err != nil {
return nil, fmt.Errorf("could not get iptables version: %v", err)
@@ -112,21 +134,23 @@ func NewWithProtocol(proto Protocol) (*IPTables, error) {
if err != nil {
return nil, fmt.Errorf("failed to extract iptables version from [%s]: %v", vstring, err)
}
+ ipt.v1 = v1
+ ipt.v2 = v2
+ ipt.v3 = v3
+ ipt.mode = mode
checkPresent, waitPresent, randomFullyPresent := getIptablesCommandSupport(v1, v2, v3)
+ ipt.hasCheck = checkPresent
+ ipt.hasWait = waitPresent
+ ipt.hasRandomFully = randomFullyPresent
- ipt := IPTables{
- path: path,
- proto: proto,
- hasCheck: checkPresent,
- hasWait: waitPresent,
- hasRandomFully: randomFullyPresent,
- v1: v1,
- v2: v2,
- v3: v3,
- mode: mode,
- }
- return &ipt, nil
+ return ipt, nil
+}
+
+// New creates a new IPTables for the given proto.
+// The proto will determine which command is used, either "iptables" or "ip6tables".
+func NewWithProtocol(proto Protocol) (*IPTables, error) {
+ return New(IPFamily(proto), Timeout(0))
}
// Proto returns the protocol used by this IPTables.
@@ -185,6 +209,14 @@ func (ipt *IPTables) Delete(table, chain string, rulespec ...string) error {
return ipt.run(cmd...)
}
+func (ipt *IPTables) DeleteIfExists(table, chain string, rulespec ...string) error {
+ exists, err := ipt.Exists(table, chain, rulespec...)
+ if err == nil && exists {
+ err = ipt.Delete(table, chain, rulespec...)
+ }
+ return err
+}
+
// List rules in specified table/chain
func (ipt *IPTables) List(table, chain string) ([]string, error) {
args := []string{"-t", table, "-S", chain}
@@ -222,6 +254,21 @@ func (ipt *IPTables) ListChains(table string) ([]string, error) {
return chains, nil
}
+// '-S' is fine with non existing rule index as long as the chain exists
+// therefore pass index 1 to reduce overhead for large chains
+func (ipt *IPTables) ChainExists(table, chain string) (bool, error) {
+ err := ipt.run("-t", table, "-S", chain, "1")
+ eerr, eok := err.(*Error)
+ switch {
+ case err == nil:
+ return true, nil
+ case eok && eerr.ExitStatus() == 1:
+ return false, nil
+ default:
+ return false, err
+ }
+}
+
// Stats lists rules including the byte and packet counts
func (ipt *IPTables) Stats(table, chain string) ([][]string, error) {
args := []string{"-t", table, "-L", chain, "-n", "-v", "-x"}
@@ -401,6 +448,18 @@ func (ipt *IPTables) DeleteChain(table, chain string) error {
return ipt.run("-t", table, "-X", chain)
}
+func (ipt *IPTables) ClearAndDeleteChain(table, chain string) error {
+ exists, err := ipt.ChainExists(table, chain)
+ if err != nil || !exists {
+ return err
+ }
+ err = ipt.run("-t", table, "-F", chain)
+ if err == nil {
+ err = ipt.run("-t", table, "-X", chain)
+ }
+ return err
+}
+
// ChangePolicy changes policy on chain to target
func (ipt *IPTables) ChangePolicy(table, chain, target string) error {
return ipt.run("-t", table, "-P", chain, target)
@@ -428,6 +487,9 @@ func (ipt *IPTables) runWithOutput(args []string, stdout io.Writer) error {
args = append([]string{ipt.path}, args...)
if ipt.hasWait {
args = append(args, "--wait")
+ if ipt.timeout != 0 {
+ args = append(args, strconv.Itoa(ipt.timeout))
+ }
} else {
fmu, err := newXtablesFileLock()
if err != nil {
@@ -452,7 +514,7 @@ func (ipt *IPTables) runWithOutput(args []string, stdout io.Writer) error {
if err := cmd.Run(); err != nil {
switch e := err.(type) {
case *exec.ExitError:
- return &Error{*e, cmd, stderr.String(), ipt.proto, nil}
+ return &Error{*e, cmd, stderr.String(), nil}
default:
return err
}
diff --git a/vendor/github.com/coreos/go-systemd/v22/activation/files.go b/vendor/github.com/coreos/go-systemd/v22/activation/files_unix.go
index 29dd18def..fc7db98fb 100644
--- a/vendor/github.com/coreos/go-systemd/v22/activation/files.go
+++ b/vendor/github.com/coreos/go-systemd/v22/activation/files_unix.go
@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+// +build !windows
+
// Package activation implements primitives for systemd socket activation.
package activation
diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/doc.go b/vendor/github.com/coreos/go-systemd/v22/activation/files_windows.go
index 51aa11b35..d391bf00c 100644
--- a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/doc.go
+++ b/vendor/github.com/coreos/go-systemd/v22/activation/files_windows.go
@@ -1,10 +1,10 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
+// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -12,5 +12,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// Package rpcmetrics implements an Observer that can be used to emit RPC metrics.
-package rpcmetrics
+package activation
+
+import "os"
+
+func Files(unsetEnv bool) []*os.File {
+ return nil
+}
diff --git a/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go b/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go
index 91584a166..e843a4613 100644
--- a/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go
+++ b/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go
@@ -16,6 +16,7 @@
package dbus
import (
+ "context"
"encoding/hex"
"fmt"
"os"
@@ -112,39 +113,63 @@ type Conn struct {
// New establishes a connection to any available bus and authenticates.
// Callers should call Close() when done with the connection.
+// Deprecated: use NewWithContext instead
func New() (*Conn, error) {
- conn, err := NewSystemConnection()
+ return NewWithContext(context.Background())
+}
+
+// NewWithContext same as New with context
+func NewWithContext(ctx context.Context) (*Conn, error) {
+ conn, err := NewSystemConnectionContext(ctx)
if err != nil && os.Geteuid() == 0 {
- return NewSystemdConnection()
+ return NewSystemdConnectionContext(ctx)
}
return conn, err
}
// NewSystemConnection establishes a connection to the system bus and authenticates.
// Callers should call Close() when done with the connection
+// Deprecated: use NewSystemConnectionContext instead
func NewSystemConnection() (*Conn, error) {
+ return NewSystemConnectionContext(context.Background())
+}
+
+// NewSystemConnectionContext same as NewSystemConnection with context
+func NewSystemConnectionContext(ctx context.Context) (*Conn, error) {
return NewConnection(func() (*dbus.Conn, error) {
- return dbusAuthHelloConnection(dbus.SystemBusPrivate)
+ return dbusAuthHelloConnection(ctx, dbus.SystemBusPrivate)
})
}
// NewUserConnection establishes a connection to the session bus and
// authenticates. This can be used to connect to systemd user instances.
// Callers should call Close() when done with the connection.
+// Deprecated: use NewUserConnectionContext instead
func NewUserConnection() (*Conn, error) {
+ return NewUserConnectionContext(context.Background())
+}
+
+// NewUserConnectionContext same as NewUserConnection with context
+func NewUserConnectionContext(ctx context.Context) (*Conn, error) {
return NewConnection(func() (*dbus.Conn, error) {
- return dbusAuthHelloConnection(dbus.SessionBusPrivate)
+ return dbusAuthHelloConnection(ctx, dbus.SessionBusPrivate)
})
}
// NewSystemdConnection establishes a private, direct connection to systemd.
// This can be used for communicating with systemd without a dbus daemon.
// Callers should call Close() when done with the connection.
+// Deprecated: use NewSystemdConnectionContext instead
func NewSystemdConnection() (*Conn, error) {
+ return NewSystemdConnectionContext(context.Background())
+}
+
+// NewSystemdConnectionContext same as NewSystemdConnection with context
+func NewSystemdConnectionContext(ctx context.Context) (*Conn, error) {
return NewConnection(func() (*dbus.Conn, error) {
// We skip Hello when talking directly to systemd.
- return dbusAuthConnection(func(opts ...dbus.ConnOption) (*dbus.Conn, error) {
- return dbus.Dial("unix:path=/run/systemd/private")
+ return dbusAuthConnection(ctx, func(opts ...dbus.ConnOption) (*dbus.Conn, error) {
+ return dbus.Dial("unix:path=/run/systemd/private", opts...)
})
})
}
@@ -201,8 +226,8 @@ func (c *Conn) GetManagerProperty(prop string) (string, error) {
return variant.String(), nil
}
-func dbusAuthConnection(createBus func(opts ...dbus.ConnOption) (*dbus.Conn, error)) (*dbus.Conn, error) {
- conn, err := createBus()
+func dbusAuthConnection(ctx context.Context, createBus func(opts ...dbus.ConnOption) (*dbus.Conn, error)) (*dbus.Conn, error) {
+ conn, err := createBus(dbus.WithContext(ctx))
if err != nil {
return nil, err
}
@@ -221,8 +246,8 @@ func dbusAuthConnection(createBus func(opts ...dbus.ConnOption) (*dbus.Conn, err
return conn, nil
}
-func dbusAuthHelloConnection(createBus func(opts ...dbus.ConnOption) (*dbus.Conn, error)) (*dbus.Conn, error) {
- conn, err := dbusAuthConnection(createBus)
+func dbusAuthHelloConnection(ctx context.Context, createBus func(opts ...dbus.ConnOption) (*dbus.Conn, error)) (*dbus.Conn, error) {
+ conn, err := dbusAuthConnection(ctx, createBus)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go b/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go
index e38659d7b..679f244e8 100644
--- a/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go
+++ b/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go
@@ -15,6 +15,7 @@
package dbus
import (
+ "context"
"errors"
"fmt"
"path"
@@ -38,14 +39,14 @@ func (c *Conn) jobComplete(signal *dbus.Signal) {
c.jobListener.Unlock()
}
-func (c *Conn) startJob(ch chan<- string, job string, args ...interface{}) (int, error) {
+func (c *Conn) startJob(ctx context.Context, ch chan<- string, job string, args ...interface{}) (int, error) {
if ch != nil {
c.jobListener.Lock()
defer c.jobListener.Unlock()
}
var p dbus.ObjectPath
- err := c.sysobj.Call(job, 0, args...).Store(&p)
+ err := c.sysobj.CallWithContext(ctx, job, 0, args...).Store(&p)
if err != nil {
return 0, err
}
@@ -90,43 +91,85 @@ func (c *Conn) startJob(ch chan<- string, job string, args ...interface{}) (int,
// should not be considered authoritative.
//
// If an error does occur, it will be returned to the user alongside a job ID of 0.
+// Deprecated: use StartUnitContext instead
func (c *Conn) StartUnit(name string, mode string, ch chan<- string) (int, error) {
- return c.startJob(ch, "org.freedesktop.systemd1.Manager.StartUnit", name, mode)
+ return c.StartUnitContext(context.Background(), name, mode, ch)
+}
+
+// StartUnitContext same as StartUnit with context
+func (c *Conn) StartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) {
+ return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.StartUnit", name, mode)
}
// StopUnit is similar to StartUnit but stops the specified unit rather
// than starting it.
+// Deprecated: use StopUnitContext instead
func (c *Conn) StopUnit(name string, mode string, ch chan<- string) (int, error) {
- return c.startJob(ch, "org.freedesktop.systemd1.Manager.StopUnit", name, mode)
+ return c.StopUnitContext(context.Background(), name, mode, ch)
+}
+
+// StopUnitContext same as StopUnit with context
+func (c *Conn) StopUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) {
+ return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.StopUnit", name, mode)
}
// ReloadUnit reloads a unit. Reloading is done only if the unit is already running and fails otherwise.
+// Deprecated: use ReloadUnitContext instead
func (c *Conn) ReloadUnit(name string, mode string, ch chan<- string) (int, error) {
- return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadUnit", name, mode)
+ return c.ReloadUnitContext(context.Background(), name, mode, ch)
+}
+
+// ReloadUnitContext same as ReloadUnit with context
+func (c *Conn) ReloadUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) {
+ return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.ReloadUnit", name, mode)
}
// RestartUnit restarts a service. If a service is restarted that isn't
// running it will be started.
+// Deprecated: use RestartUnitContext instead
func (c *Conn) RestartUnit(name string, mode string, ch chan<- string) (int, error) {
- return c.startJob(ch, "org.freedesktop.systemd1.Manager.RestartUnit", name, mode)
+ return c.RestartUnitContext(context.Background(), name, mode, ch)
+}
+
+// RestartUnitContext same as RestartUnit with context
+func (c *Conn) RestartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) {
+ return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.RestartUnit", name, mode)
}
// TryRestartUnit is like RestartUnit, except that a service that isn't running
// is not affected by the restart.
+// Deprecated: use TryRestartUnitContext instead
func (c *Conn) TryRestartUnit(name string, mode string, ch chan<- string) (int, error) {
- return c.startJob(ch, "org.freedesktop.systemd1.Manager.TryRestartUnit", name, mode)
+ return c.TryRestartUnitContext(context.Background(), name, mode, ch)
+}
+
+// TryRestartUnitContext same as TryRestartUnit with context
+func (c *Conn) TryRestartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) {
+ return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.TryRestartUnit", name, mode)
}
// ReloadOrRestartUnit attempts a reload if the unit supports it and use a restart
// otherwise.
+// Deprecated: use ReloadOrRestartUnitContext instead
func (c *Conn) ReloadOrRestartUnit(name string, mode string, ch chan<- string) (int, error) {
- return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrRestartUnit", name, mode)
+ return c.ReloadOrRestartUnitContext(context.Background(), name, mode, ch)
+}
+
+// ReloadOrRestartUnitContext same as ReloadOrRestartUnit with context
+func (c *Conn) ReloadOrRestartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) {
+ return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.ReloadOrRestartUnit", name, mode)
}
// ReloadOrTryRestartUnit attempts a reload if the unit supports it and use a "Try"
// flavored restart otherwise.
+// Deprecated: use ReloadOrTryRestartUnitContext instead
func (c *Conn) ReloadOrTryRestartUnit(name string, mode string, ch chan<- string) (int, error) {
- return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrTryRestartUnit", name, mode)
+ return c.ReloadOrTryRestartUnitContext(context.Background(), name, mode, ch)
+}
+
+// ReloadOrTryRestartUnitContext same as ReloadOrTryRestartUnit with context
+func (c *Conn) ReloadOrTryRestartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) {
+ return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.ReloadOrTryRestartUnit", name, mode)
}
// StartTransientUnit() may be used to create and start a transient unit, which
@@ -134,28 +177,52 @@ func (c *Conn) ReloadOrTryRestartUnit(name string, mode string, ch chan<- string
// system is rebooted. name is the unit name including suffix, and must be
// unique. mode is the same as in StartUnit(), properties contains properties
// of the unit.
+// Deprecated: use StartTransientUnitContext instead
func (c *Conn) StartTransientUnit(name string, mode string, properties []Property, ch chan<- string) (int, error) {
- return c.startJob(ch, "org.freedesktop.systemd1.Manager.StartTransientUnit", name, mode, properties, make([]PropertyCollection, 0))
+ return c.StartTransientUnitContext(context.Background(), name, mode, properties, ch)
+}
+
+// StartTransientUnitContext same as StartTransientUnit with context
+func (c *Conn) StartTransientUnitContext(ctx context.Context, name string, mode string, properties []Property, ch chan<- string) (int, error) {
+ return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.StartTransientUnit", name, mode, properties, make([]PropertyCollection, 0))
}
// KillUnit takes the unit name and a UNIX signal number to send. All of the unit's
// processes are killed.
+// Deprecated: use KillUnitContext instead
func (c *Conn) KillUnit(name string, signal int32) {
- c.sysobj.Call("org.freedesktop.systemd1.Manager.KillUnit", 0, name, "all", signal).Store()
+ c.KillUnitContext(context.Background(), name, signal)
+}
+
+// KillUnitContext same as KillUnit with context
+func (c *Conn) KillUnitContext(ctx context.Context, name string, signal int32) {
+ c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.KillUnit", 0, name, "all", signal).Store()
}
// ResetFailedUnit resets the "failed" state of a specific unit.
+// Deprecated: use ResetFailedUnitContext instead
func (c *Conn) ResetFailedUnit(name string) error {
- return c.sysobj.Call("org.freedesktop.systemd1.Manager.ResetFailedUnit", 0, name).Store()
+ return c.ResetFailedUnitContext(context.Background(), name)
+}
+
+// ResetFailedUnitContext same as ResetFailedUnit with context
+func (c *Conn) ResetFailedUnitContext(ctx context.Context, name string) error {
+ return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ResetFailedUnit", 0, name).Store()
}
// SystemState returns the systemd state. Equivalent to `systemctl is-system-running`.
+// Deprecated: use SystemStateContext instead
func (c *Conn) SystemState() (*Property, error) {
+ return c.SystemStateContext(context.Background())
+}
+
+// SystemStateContext same as SystemState with context
+func (c *Conn) SystemStateContext(ctx context.Context) (*Property, error) {
var err error
var prop dbus.Variant
obj := c.sysconn.Object("org.freedesktop.systemd1", "/org/freedesktop/systemd1")
- err = obj.Call("org.freedesktop.DBus.Properties.Get", 0, "org.freedesktop.systemd1.Manager", "SystemState").Store(&prop)
+ err = obj.CallWithContext(ctx, "org.freedesktop.DBus.Properties.Get", 0, "org.freedesktop.systemd1.Manager", "SystemState").Store(&prop)
if err != nil {
return nil, err
}
@@ -164,7 +231,7 @@ func (c *Conn) SystemState() (*Property, error) {
}
// getProperties takes the unit path and returns all of its dbus object properties, for the given dbus interface
-func (c *Conn) getProperties(path dbus.ObjectPath, dbusInterface string) (map[string]interface{}, error) {
+func (c *Conn) getProperties(ctx context.Context, path dbus.ObjectPath, dbusInterface string) (map[string]interface{}, error) {
var err error
var props map[string]dbus.Variant
@@ -173,7 +240,7 @@ func (c *Conn) getProperties(path dbus.ObjectPath, dbusInterface string) (map[st
}
obj := c.sysconn.Object("org.freedesktop.systemd1", path)
- err = obj.Call("org.freedesktop.DBus.Properties.GetAll", 0, dbusInterface).Store(&props)
+ err = obj.CallWithContext(ctx, "org.freedesktop.DBus.Properties.GetAll", 0, dbusInterface).Store(&props)
if err != nil {
return nil, err
}
@@ -187,23 +254,41 @@ func (c *Conn) getProperties(path dbus.ObjectPath, dbusInterface string) (map[st
}
// GetUnitProperties takes the (unescaped) unit name and returns all of its dbus object properties.
+// Deprecated: use GetUnitPropertiesContext instead
func (c *Conn) GetUnitProperties(unit string) (map[string]interface{}, error) {
+ return c.GetUnitPropertiesContext(context.Background(), unit)
+}
+
+// GetUnitPropertiesContext same as GetUnitPropertiesContext with context
+func (c *Conn) GetUnitPropertiesContext(ctx context.Context, unit string) (map[string]interface{}, error) {
path := unitPath(unit)
- return c.getProperties(path, "org.freedesktop.systemd1.Unit")
+ return c.getProperties(ctx, path, "org.freedesktop.systemd1.Unit")
}
// GetUnitPathProperties takes the (escaped) unit path and returns all of its dbus object properties.
+// Deprecated: use GetUnitPathPropertiesContext instead
func (c *Conn) GetUnitPathProperties(path dbus.ObjectPath) (map[string]interface{}, error) {
- return c.getProperties(path, "org.freedesktop.systemd1.Unit")
+ return c.GetUnitPathPropertiesContext(context.Background(), path)
+}
+
+// GetUnitPathPropertiesContext same as GetUnitPathProperties with context
+func (c *Conn) GetUnitPathPropertiesContext(ctx context.Context, path dbus.ObjectPath) (map[string]interface{}, error) {
+ return c.getProperties(ctx, path, "org.freedesktop.systemd1.Unit")
}
// GetAllProperties takes the (unescaped) unit name and returns all of its dbus object properties.
+// Deprecated: use GetAllPropertiesContext instead
func (c *Conn) GetAllProperties(unit string) (map[string]interface{}, error) {
+ return c.GetAllPropertiesContext(context.Background(), unit)
+}
+
+// GetAllPropertiesContext same as GetAllProperties with context
+func (c *Conn) GetAllPropertiesContext(ctx context.Context, unit string) (map[string]interface{}, error) {
path := unitPath(unit)
- return c.getProperties(path, "")
+ return c.getProperties(ctx, path, "")
}
-func (c *Conn) getProperty(unit string, dbusInterface string, propertyName string) (*Property, error) {
+func (c *Conn) getProperty(ctx context.Context, unit string, dbusInterface string, propertyName string) (*Property, error) {
var err error
var prop dbus.Variant
@@ -213,7 +298,7 @@ func (c *Conn) getProperty(unit string, dbusInterface string, propertyName strin
}
obj := c.sysconn.Object("org.freedesktop.systemd1", path)
- err = obj.Call("org.freedesktop.DBus.Properties.Get", 0, dbusInterface, propertyName).Store(&prop)
+ err = obj.CallWithContext(ctx, "org.freedesktop.DBus.Properties.Get", 0, dbusInterface, propertyName).Store(&prop)
if err != nil {
return nil, err
}
@@ -221,21 +306,39 @@ func (c *Conn) getProperty(unit string, dbusInterface string, propertyName strin
return &Property{Name: propertyName, Value: prop}, nil
}
+// Deprecated: use GetUnitPropertyContext instead
func (c *Conn) GetUnitProperty(unit string, propertyName string) (*Property, error) {
- return c.getProperty(unit, "org.freedesktop.systemd1.Unit", propertyName)
+ return c.GetUnitPropertyContext(context.Background(), unit, propertyName)
+}
+
+// GetUnitPropertyContext same as GetUnitProperty with context
+func (c *Conn) GetUnitPropertyContext(ctx context.Context, unit string, propertyName string) (*Property, error) {
+ return c.getProperty(ctx, unit, "org.freedesktop.systemd1.Unit", propertyName)
}
// GetServiceProperty returns property for given service name and property name
+// Deprecated: use GetServicePropertyContext instead
func (c *Conn) GetServiceProperty(service string, propertyName string) (*Property, error) {
- return c.getProperty(service, "org.freedesktop.systemd1.Service", propertyName)
+ return c.GetServicePropertyContext(context.Background(), service, propertyName)
+}
+
+// GetServicePropertyContext same as GetServiceProperty with context
+func (c *Conn) GetServicePropertyContext(ctx context.Context, service string, propertyName string) (*Property, error) {
+ return c.getProperty(ctx, service, "org.freedesktop.systemd1.Service", propertyName)
}
// GetUnitTypeProperties returns the extra properties for a unit, specific to the unit type.
// Valid values for unitType: Service, Socket, Target, Device, Mount, Automount, Snapshot, Timer, Swap, Path, Slice, Scope
// return "dbus.Error: Unknown interface" if the unitType is not the correct type of the unit
+// Deprecated: use GetUnitTypePropertiesContext instead
func (c *Conn) GetUnitTypeProperties(unit string, unitType string) (map[string]interface{}, error) {
+ return c.GetUnitTypePropertiesContext(context.Background(), unit, unitType)
+}
+
+// GetUnitTypePropertiesContext same as GetUnitTypeProperties with context
+func (c *Conn) GetUnitTypePropertiesContext(ctx context.Context, unit string, unitType string) (map[string]interface{}, error) {
path := unitPath(unit)
- return c.getProperties(path, "org.freedesktop.systemd1."+unitType)
+ return c.getProperties(ctx, path, "org.freedesktop.systemd1."+unitType)
}
// SetUnitProperties() may be used to modify certain unit properties at runtime.
@@ -245,12 +348,24 @@ func (c *Conn) GetUnitTypeProperties(unit string, unitType string) (map[string]i
// case the settings only apply until the next reboot. name is the name of the unit
// to modify. properties are the settings to set, encoded as an array of property
// name and value pairs.
+// Deprecated: use SetUnitPropertiesContext instead
func (c *Conn) SetUnitProperties(name string, runtime bool, properties ...Property) error {
- return c.sysobj.Call("org.freedesktop.systemd1.Manager.SetUnitProperties", 0, name, runtime, properties).Store()
+ return c.SetUnitPropertiesContext(context.Background(), name, runtime, properties...)
+}
+
+// SetUnitPropertiesContext same as SetUnitProperties with context
+func (c *Conn) SetUnitPropertiesContext(ctx context.Context, name string, runtime bool, properties ...Property) error {
+ return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.SetUnitProperties", 0, name, runtime, properties).Store()
}
+// Deprecated: use GetUnitTypePropertyContext instead
func (c *Conn) GetUnitTypeProperty(unit string, unitType string, propertyName string) (*Property, error) {
- return c.getProperty(unit, "org.freedesktop.systemd1."+unitType, propertyName)
+ return c.GetUnitTypePropertyContext(context.Background(), unitType, unitType, propertyName)
+}
+
+// GetUnitTypePropertyContext same as GetUnitTypeProperty with context
+func (c *Conn) GetUnitTypePropertyContext(ctx context.Context, unit string, unitType string, propertyName string) (*Property, error) {
+ return c.getProperty(ctx, unit, "org.freedesktop.systemd1."+unitType, propertyName)
}
type UnitStatus struct {
@@ -299,22 +414,40 @@ func (c *Conn) listUnitsInternal(f storeFunc) ([]UnitStatus, error) {
// be more unit names loaded than actual units behind them.
// Also note that a unit is only loaded if it is active and/or enabled.
// Units that are both disabled and inactive will thus not be returned.
+// Deprecated: use ListUnitsContext instead
func (c *Conn) ListUnits() ([]UnitStatus, error) {
- return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnits", 0).Store)
+ return c.ListUnitsContext(context.Background())
+}
+
+// ListUnitsContext same as ListUnits with context
+func (c *Conn) ListUnitsContext(ctx context.Context) ([]UnitStatus, error) {
+ return c.listUnitsInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnits", 0).Store)
}
// ListUnitsFiltered returns an array with units filtered by state.
// It takes a list of units' statuses to filter.
+// Deprecated: use ListUnitsFilteredContext instead
func (c *Conn) ListUnitsFiltered(states []string) ([]UnitStatus, error) {
- return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitsFiltered", 0, states).Store)
+ return c.ListUnitsFilteredContext(context.Background(), states)
+}
+
+// ListUnitsFilteredContext same as ListUnitsFiltered with context
+func (c *Conn) ListUnitsFilteredContext(ctx context.Context, states []string) ([]UnitStatus, error) {
+ return c.listUnitsInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitsFiltered", 0, states).Store)
}
// ListUnitsByPatterns returns an array with units.
// It takes a list of units' statuses and names to filter.
// Note that units may be known by multiple names at the same time,
// and hence there might be more unit names loaded than actual units behind them.
+// Deprecated: use ListUnitsByPatternsContext instead
func (c *Conn) ListUnitsByPatterns(states []string, patterns []string) ([]UnitStatus, error) {
- return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitsByPatterns", 0, states, patterns).Store)
+ return c.ListUnitsByPatternsContext(context.Background(), states, patterns)
+}
+
+// ListUnitsByPatternsContext same as ListUnitsByPatterns with context
+func (c *Conn) ListUnitsByPatternsContext(ctx context.Context, states []string, patterns []string) ([]UnitStatus, error) {
+ return c.listUnitsInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitsByPatterns", 0, states, patterns).Store)
}
// ListUnitsByNames returns an array with units. It takes a list of units'
@@ -322,8 +455,14 @@ func (c *Conn) ListUnitsByPatterns(states []string, patterns []string) ([]UnitSt
// method, this method returns statuses even for inactive or non-existing
// units. Input array should contain exact unit names, but not patterns.
// Note: Requires systemd v230 or higher
+// Deprecated: use ListUnitsByNamesContext instead
func (c *Conn) ListUnitsByNames(units []string) ([]UnitStatus, error) {
- return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitsByNames", 0, units).Store)
+ return c.ListUnitsByNamesContext(context.Background(), units)
+}
+
+// ListUnitsByNamesContext same as ListUnitsByNames with context
+func (c *Conn) ListUnitsByNamesContext(ctx context.Context, units []string) ([]UnitStatus, error) {
+ return c.listUnitsInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitsByNames", 0, units).Store)
}
type UnitFile struct {
@@ -358,13 +497,25 @@ func (c *Conn) listUnitFilesInternal(f storeFunc) ([]UnitFile, error) {
}
// ListUnitFiles returns an array of all available units on disk.
+// Deprecated: use ListUnitFilesContext instead
func (c *Conn) ListUnitFiles() ([]UnitFile, error) {
- return c.listUnitFilesInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitFiles", 0).Store)
+ return c.ListUnitFilesContext(context.Background())
+}
+
+// ListUnitFilesContext same as ListUnitFiles with context
+func (c *Conn) ListUnitFilesContext(ctx context.Context) ([]UnitFile, error) {
+ return c.listUnitFilesInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitFiles", 0).Store)
}
// ListUnitFilesByPatterns returns an array of all available units on disk matched the patterns.
+// Deprecated: use ListUnitFilesByPatternsContext instead
func (c *Conn) ListUnitFilesByPatterns(states []string, patterns []string) ([]UnitFile, error) {
- return c.listUnitFilesInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitFilesByPatterns", 0, states, patterns).Store)
+ return c.ListUnitFilesByPatternsContext(context.Background(), states, patterns)
+}
+
+// ListUnitFilesByPatternsContext same as ListUnitFilesByPatterns with context
+func (c *Conn) ListUnitFilesByPatternsContext(ctx context.Context, states []string, patterns []string) ([]UnitFile, error) {
+ return c.listUnitFilesInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitFilesByPatterns", 0, states, patterns).Store)
}
type LinkUnitFileChange EnableUnitFileChange
@@ -383,9 +534,15 @@ type LinkUnitFileChange EnableUnitFileChange
// structures with three strings: the type of the change (one of symlink
// or unlink), the file name of the symlink and the destination of the
// symlink.
+// Deprecated: use LinkUnitFilesContext instead
func (c *Conn) LinkUnitFiles(files []string, runtime bool, force bool) ([]LinkUnitFileChange, error) {
+ return c.LinkUnitFilesContext(context.Background(), files, runtime, force)
+}
+
+// LinkUnitFilesContext same as LinkUnitFiles with context
+func (c *Conn) LinkUnitFilesContext(ctx context.Context, files []string, runtime bool, force bool) ([]LinkUnitFileChange, error) {
result := make([][]interface{}, 0)
- err := c.sysobj.Call("org.freedesktop.systemd1.Manager.LinkUnitFiles", 0, files, runtime, force).Store(&result)
+ err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.LinkUnitFiles", 0, files, runtime, force).Store(&result)
if err != nil {
return nil, err
}
@@ -425,11 +582,17 @@ func (c *Conn) LinkUnitFiles(files []string, runtime bool, force bool) ([]LinkUn
// structures with three strings: the type of the change (one of symlink
// or unlink), the file name of the symlink and the destination of the
// symlink.
+// Deprecated: use EnableUnitFilesContext instead
func (c *Conn) EnableUnitFiles(files []string, runtime bool, force bool) (bool, []EnableUnitFileChange, error) {
+ return c.EnableUnitFilesContext(context.Background(), files, runtime, force)
+}
+
+// EnableUnitFilesContext same as EnableUnitFiles with context
+func (c *Conn) EnableUnitFilesContext(ctx context.Context, files []string, runtime bool, force bool) (bool, []EnableUnitFileChange, error) {
var carries_install_info bool
result := make([][]interface{}, 0)
- err := c.sysobj.Call("org.freedesktop.systemd1.Manager.EnableUnitFiles", 0, files, runtime, force).Store(&carries_install_info, &result)
+ err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.EnableUnitFiles", 0, files, runtime, force).Store(&carries_install_info, &result)
if err != nil {
return false, nil, err
}
@@ -471,9 +634,15 @@ type EnableUnitFileChange struct {
// consists of structures with three strings: the type of the change (one of
// symlink or unlink), the file name of the symlink and the destination of the
// symlink.
+// Deprecated: use DisableUnitFilesContext instead
func (c *Conn) DisableUnitFiles(files []string, runtime bool) ([]DisableUnitFileChange, error) {
+ return c.DisableUnitFilesContext(context.Background(), files, runtime)
+}
+
+// DisableUnitFilesContext same as DisableUnitFiles with context
+func (c *Conn) DisableUnitFilesContext(ctx context.Context, files []string, runtime bool) ([]DisableUnitFileChange, error) {
result := make([][]interface{}, 0)
- err := c.sysobj.Call("org.freedesktop.systemd1.Manager.DisableUnitFiles", 0, files, runtime).Store(&result)
+ err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.DisableUnitFiles", 0, files, runtime).Store(&result)
if err != nil {
return nil, err
}
@@ -512,9 +681,15 @@ type DisableUnitFileChange struct {
// * runtime to specify whether the unit was enabled for runtime
// only (true, /run/systemd/..), or persistently (false, /etc/systemd/..)
// * force flag
+// Deprecated: use MaskUnitFilesContext instead
func (c *Conn) MaskUnitFiles(files []string, runtime bool, force bool) ([]MaskUnitFileChange, error) {
+ return c.MaskUnitFilesContext(context.Background(), files, runtime, force)
+}
+
+// MaskUnitFilesContext same as MaskUnitFiles with context
+func (c *Conn) MaskUnitFilesContext(ctx context.Context, files []string, runtime bool, force bool) ([]MaskUnitFileChange, error) {
result := make([][]interface{}, 0)
- err := c.sysobj.Call("org.freedesktop.systemd1.Manager.MaskUnitFiles", 0, files, runtime, force).Store(&result)
+ err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.MaskUnitFiles", 0, files, runtime, force).Store(&result)
if err != nil {
return nil, err
}
@@ -552,9 +727,15 @@ type MaskUnitFileChange struct {
// the usual unit search paths)
// * runtime to specify whether the unit was enabled for runtime
// only (true, /run/systemd/..), or persistently (false, /etc/systemd/..)
+// Deprecated: use UnmaskUnitFilesContext instead
func (c *Conn) UnmaskUnitFiles(files []string, runtime bool) ([]UnmaskUnitFileChange, error) {
+ return c.UnmaskUnitFilesContext(context.Background(), files, runtime)
+}
+
+// UnmaskUnitFilesContext same as UnmaskUnitFiles with context
+func (c *Conn) UnmaskUnitFilesContext(ctx context.Context, files []string, runtime bool) ([]UnmaskUnitFileChange, error) {
result := make([][]interface{}, 0)
- err := c.sysobj.Call("org.freedesktop.systemd1.Manager.UnmaskUnitFiles", 0, files, runtime).Store(&result)
+ err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.UnmaskUnitFiles", 0, files, runtime).Store(&result)
if err != nil {
return nil, err
}
@@ -586,8 +767,14 @@ type UnmaskUnitFileChange struct {
// Reload instructs systemd to scan for and reload unit files. This is
// equivalent to a 'systemctl daemon-reload'.
+// Deprecated: use ReloadContext instead
func (c *Conn) Reload() error {
- return c.sysobj.Call("org.freedesktop.systemd1.Manager.Reload", 0).Store()
+ return c.ReloadContext(context.Background())
+}
+
+// ReloadContext same as Reload with context
+func (c *Conn) ReloadContext(ctx context.Context) error {
+ return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.Reload", 0).Store()
}
func unitPath(name string) dbus.ObjectPath {
@@ -598,3 +785,48 @@ func unitPath(name string) dbus.ObjectPath {
func unitName(dpath dbus.ObjectPath) string {
return pathBusUnescape(path.Base(string(dpath)))
}
+
+// Currently queued job definition
+type JobStatus struct {
+ Id uint32 // The numeric job id
+ Unit string // The primary unit name for this job
+ JobType string // The job type as string
+ Status string // The job state as string
+ JobPath dbus.ObjectPath // The job object path
+ UnitPath dbus.ObjectPath // The unit object path
+}
+
+// ListJobs returns an array with all currently queued jobs
+// Deprecated: use ListJobsContext instead
+func (c *Conn) ListJobs() ([]JobStatus, error) {
+ return c.ListJobsContext(context.Background())
+}
+
+// ListJobsContext same as ListJobs with context
+func (c *Conn) ListJobsContext(ctx context.Context) ([]JobStatus, error) {
+ return c.listJobsInternal(ctx)
+}
+
+func (c *Conn) listJobsInternal(ctx context.Context) ([]JobStatus, error) {
+ result := make([][]interface{}, 0)
+ if err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListJobs", 0).Store(&result); err != nil {
+ return nil, err
+ }
+
+ resultInterface := make([]interface{}, len(result))
+ for i := range result {
+ resultInterface[i] = result[i]
+ }
+
+ status := make([]JobStatus, len(result))
+ statusInterface := make([]interface{}, len(status))
+ for i := range status {
+ statusInterface[i] = &status[i]
+ }
+
+ if err := dbus.Store(resultInterface, statusInterface...); err != nil {
+ return nil, err
+ }
+
+ return status, nil
+}
diff --git a/vendor/github.com/coreos/go-systemd/v22/journal/journal.go b/vendor/github.com/coreos/go-systemd/v22/journal/journal.go
index a0f4837a0..ac24c7767 100644
--- a/vendor/github.com/coreos/go-systemd/v22/journal/journal.go
+++ b/vendor/github.com/coreos/go-systemd/v22/journal/journal.go
@@ -23,20 +23,7 @@
package journal
import (
- "bytes"
- "encoding/binary"
- "errors"
"fmt"
- "io"
- "io/ioutil"
- "net"
- "os"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
- "syscall"
- "unsafe"
)
// Priority of a journal message
@@ -53,173 +40,7 @@ const (
PriDebug
)
-var (
- // This can be overridden at build-time:
- // https://github.com/golang/go/wiki/GcToolchainTricks#including-build-information-in-the-executable
- journalSocket = "/run/systemd/journal/socket"
-
- // unixConnPtr atomically holds the local unconnected Unix-domain socket.
- // Concrete safe pointer type: *net.UnixConn
- unixConnPtr unsafe.Pointer
- // onceConn ensures that unixConnPtr is initialized exactly once.
- onceConn sync.Once
-)
-
-func init() {
- onceConn.Do(initConn)
-}
-
-// Enabled checks whether the local systemd journal is available for logging.
-func Enabled() bool {
- onceConn.Do(initConn)
-
- if (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr)) == nil {
- return false
- }
-
- if _, err := net.Dial("unixgram", journalSocket); err != nil {
- return false
- }
-
- return true
-}
-
-// Send a message to the local systemd journal. vars is a map of journald
-// fields to values. Fields must be composed of uppercase letters, numbers,
-// and underscores, but must not start with an underscore. Within these
-// restrictions, any arbitrary field name may be used. Some names have special
-// significance: see the journalctl documentation
-// (http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html)
-// for more details. vars may be nil.
-func Send(message string, priority Priority, vars map[string]string) error {
- conn := (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr))
- if conn == nil {
- return errors.New("could not initialize socket to journald")
- }
-
- socketAddr := &net.UnixAddr{
- Name: journalSocket,
- Net: "unixgram",
- }
-
- data := new(bytes.Buffer)
- appendVariable(data, "PRIORITY", strconv.Itoa(int(priority)))
- appendVariable(data, "MESSAGE", message)
- for k, v := range vars {
- appendVariable(data, k, v)
- }
-
- _, _, err := conn.WriteMsgUnix(data.Bytes(), nil, socketAddr)
- if err == nil {
- return nil
- }
- if !isSocketSpaceError(err) {
- return err
- }
-
- // Large log entry, send it via tempfile and ancillary-fd.
- file, err := tempFd()
- if err != nil {
- return err
- }
- defer file.Close()
- _, err = io.Copy(file, data)
- if err != nil {
- return err
- }
- rights := syscall.UnixRights(int(file.Fd()))
- _, _, err = conn.WriteMsgUnix([]byte{}, rights, socketAddr)
- if err != nil {
- return err
- }
-
- return nil
-}
-
// Print prints a message to the local systemd journal using Send().
func Print(priority Priority, format string, a ...interface{}) error {
return Send(fmt.Sprintf(format, a...), priority, nil)
}
-
-func appendVariable(w io.Writer, name, value string) {
- if err := validVarName(name); err != nil {
- fmt.Fprintf(os.Stderr, "variable name %s contains invalid character, ignoring\n", name)
- }
- if strings.ContainsRune(value, '\n') {
- /* When the value contains a newline, we write:
- * - the variable name, followed by a newline
- * - the size (in 64bit little endian format)
- * - the data, followed by a newline
- */
- fmt.Fprintln(w, name)
- binary.Write(w, binary.LittleEndian, uint64(len(value)))
- fmt.Fprintln(w, value)
- } else {
- /* just write the variable and value all on one line */
- fmt.Fprintf(w, "%s=%s\n", name, value)
- }
-}
-
-// validVarName validates a variable name to make sure journald will accept it.
-// The variable name must be in uppercase and consist only of characters,
-// numbers and underscores, and may not begin with an underscore:
-// https://www.freedesktop.org/software/systemd/man/sd_journal_print.html
-func validVarName(name string) error {
- if name == "" {
- return errors.New("Empty variable name")
- } else if name[0] == '_' {
- return errors.New("Variable name begins with an underscore")
- }
-
- for _, c := range name {
- if !(('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_') {
- return errors.New("Variable name contains invalid characters")
- }
- }
- return nil
-}
-
-// isSocketSpaceError checks whether the error is signaling
-// an "overlarge message" condition.
-func isSocketSpaceError(err error) bool {
- opErr, ok := err.(*net.OpError)
- if !ok || opErr == nil {
- return false
- }
-
- sysErr, ok := opErr.Err.(*os.SyscallError)
- if !ok || sysErr == nil {
- return false
- }
-
- return sysErr.Err == syscall.EMSGSIZE || sysErr.Err == syscall.ENOBUFS
-}
-
-// tempFd creates a temporary, unlinked file under `/dev/shm`.
-func tempFd() (*os.File, error) {
- file, err := ioutil.TempFile("/dev/shm/", "journal.XXXXX")
- if err != nil {
- return nil, err
- }
- err = syscall.Unlink(file.Name())
- if err != nil {
- return nil, err
- }
- return file, nil
-}
-
-// initConn initializes the global `unixConnPtr` socket.
-// It is meant to be called exactly once, at program startup.
-func initConn() {
- autobind, err := net.ResolveUnixAddr("unixgram", "")
- if err != nil {
- return
- }
-
- sock, err := net.ListenUnixgram("unixgram", autobind)
- if err != nil {
- return
- }
-
- atomic.StorePointer(&unixConnPtr, unsafe.Pointer(sock))
-}
diff --git a/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go b/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go
new file mode 100644
index 000000000..7233ecfc7
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go
@@ -0,0 +1,208 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !windows
+
+// Package journal provides write bindings to the local systemd journal.
+// It is implemented in pure Go and connects to the journal directly over its
+// unix socket.
+//
+// To read from the journal, see the "sdjournal" package, which wraps the
+// sd-journal a C API.
+//
+// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html
+package journal
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net"
+ "os"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "syscall"
+ "unsafe"
+)
+
+var (
+ // This can be overridden at build-time:
+ // https://github.com/golang/go/wiki/GcToolchainTricks#including-build-information-in-the-executable
+ journalSocket = "/run/systemd/journal/socket"
+
+ // unixConnPtr atomically holds the local unconnected Unix-domain socket.
+ // Concrete safe pointer type: *net.UnixConn
+ unixConnPtr unsafe.Pointer
+ // onceConn ensures that unixConnPtr is initialized exactly once.
+ onceConn sync.Once
+)
+
+func init() {
+ onceConn.Do(initConn)
+}
+
+// Enabled checks whether the local systemd journal is available for logging.
+func Enabled() bool {
+ onceConn.Do(initConn)
+
+ if (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr)) == nil {
+ return false
+ }
+
+ if _, err := net.Dial("unixgram", journalSocket); err != nil {
+ return false
+ }
+
+ return true
+}
+
+// Send a message to the local systemd journal. vars is a map of journald
+// fields to values. Fields must be composed of uppercase letters, numbers,
+// and underscores, but must not start with an underscore. Within these
+// restrictions, any arbitrary field name may be used. Some names have special
+// significance: see the journalctl documentation
+// (http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html)
+// for more details. vars may be nil.
+func Send(message string, priority Priority, vars map[string]string) error {
+ conn := (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr))
+ if conn == nil {
+ return errors.New("could not initialize socket to journald")
+ }
+
+ socketAddr := &net.UnixAddr{
+ Name: journalSocket,
+ Net: "unixgram",
+ }
+
+ data := new(bytes.Buffer)
+ appendVariable(data, "PRIORITY", strconv.Itoa(int(priority)))
+ appendVariable(data, "MESSAGE", message)
+ for k, v := range vars {
+ appendVariable(data, k, v)
+ }
+
+ _, _, err := conn.WriteMsgUnix(data.Bytes(), nil, socketAddr)
+ if err == nil {
+ return nil
+ }
+ if !isSocketSpaceError(err) {
+ return err
+ }
+
+ // Large log entry, send it via tempfile and ancillary-fd.
+ file, err := tempFd()
+ if err != nil {
+ return err
+ }
+ defer file.Close()
+ _, err = io.Copy(file, data)
+ if err != nil {
+ return err
+ }
+ rights := syscall.UnixRights(int(file.Fd()))
+ _, _, err = conn.WriteMsgUnix([]byte{}, rights, socketAddr)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func appendVariable(w io.Writer, name, value string) {
+ if err := validVarName(name); err != nil {
+ fmt.Fprintf(os.Stderr, "variable name %s contains invalid character, ignoring\n", name)
+ }
+ if strings.ContainsRune(value, '\n') {
+ /* When the value contains a newline, we write:
+ * - the variable name, followed by a newline
+ * - the size (in 64bit little endian format)
+ * - the data, followed by a newline
+ */
+ fmt.Fprintln(w, name)
+ binary.Write(w, binary.LittleEndian, uint64(len(value)))
+ fmt.Fprintln(w, value)
+ } else {
+ /* just write the variable and value all on one line */
+ fmt.Fprintf(w, "%s=%s\n", name, value)
+ }
+}
+
+// validVarName validates a variable name to make sure journald will accept it.
+// The variable name must be in uppercase and consist only of characters,
+// numbers and underscores, and may not begin with an underscore:
+// https://www.freedesktop.org/software/systemd/man/sd_journal_print.html
+func validVarName(name string) error {
+ if name == "" {
+ return errors.New("Empty variable name")
+ } else if name[0] == '_' {
+ return errors.New("Variable name begins with an underscore")
+ }
+
+ for _, c := range name {
+ if !(('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_') {
+ return errors.New("Variable name contains invalid characters")
+ }
+ }
+ return nil
+}
+
+// isSocketSpaceError checks whether the error is signaling
+// an "overlarge message" condition.
+func isSocketSpaceError(err error) bool {
+ opErr, ok := err.(*net.OpError)
+ if !ok || opErr == nil {
+ return false
+ }
+
+ sysErr, ok := opErr.Err.(*os.SyscallError)
+ if !ok || sysErr == nil {
+ return false
+ }
+
+ return sysErr.Err == syscall.EMSGSIZE || sysErr.Err == syscall.ENOBUFS
+}
+
+// tempFd creates a temporary, unlinked file under `/dev/shm`.
+func tempFd() (*os.File, error) {
+ file, err := ioutil.TempFile("/dev/shm/", "journal.XXXXX")
+ if err != nil {
+ return nil, err
+ }
+ err = syscall.Unlink(file.Name())
+ if err != nil {
+ return nil, err
+ }
+ return file, nil
+}
+
+// initConn initializes the global `unixConnPtr` socket.
+// It is meant to be called exactly once, at program startup.
+func initConn() {
+ autobind, err := net.ResolveUnixAddr("unixgram", "")
+ if err != nil {
+ return
+ }
+
+ sock, err := net.ListenUnixgram("unixgram", autobind)
+ if err != nil {
+ return
+ }
+
+ atomic.StorePointer(&unixConnPtr, unsafe.Pointer(sock))
+}
diff --git a/vendor/github.com/coreos/go-systemd/v22/journal/journal_windows.go b/vendor/github.com/coreos/go-systemd/v22/journal/journal_windows.go
new file mode 100644
index 000000000..677aca68e
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/v22/journal/journal_windows.go
@@ -0,0 +1,35 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package journal provides write bindings to the local systemd journal.
+// It is implemented in pure Go and connects to the journal directly over its
+// unix socket.
+//
+// To read from the journal, see the "sdjournal" package, which wraps the
+// sd-journal a C API.
+//
+// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html
+package journal
+
+import (
+ "errors"
+)
+
+func Enabled() bool {
+ return false
+}
+
+func Send(message string, priority Priority, vars map[string]string) error {
+ return errors.New("could not initialize socket to journald")
+}
diff --git a/vendor/github.com/coreos/go-systemd/v22/sdjournal/journal.go b/vendor/github.com/coreos/go-systemd/v22/sdjournal/journal.go
index 7f840def8..c61a2025f 100644
--- a/vendor/github.com/coreos/go-systemd/v22/sdjournal/journal.go
+++ b/vendor/github.com/coreos/go-systemd/v22/sdjournal/journal.go
@@ -300,6 +300,24 @@ package sdjournal
// return sd_journal_get_catalog(j, ret);
// }
//
+// int
+// my_sd_id128_get_boot(void *f, sd_id128_t *boot_id)
+// {
+// int(*sd_id128_get_boot)(sd_id128_t *);
+//
+// sd_id128_get_boot = f;
+// return sd_id128_get_boot(boot_id);
+// }
+//
+// char *
+// my_sd_id128_to_string(void *f, sd_id128_t boot_id, char s[_SD_ARRAY_STATIC SD_ID128_STRING_MAX])
+// {
+// char *(*sd_id128_to_string)(sd_id128_t, char *);
+//
+// sd_id128_to_string = f;
+// return sd_id128_to_string(boot_id, s);
+// }
+//
import "C"
import (
"bytes"
@@ -928,7 +946,7 @@ func (j *Journal) SeekHead() error {
}
// SeekTail may be used to seek to the end of the journal, i.e. the most recent
-// available entry. This call must be followed by a call to Next before any
+// available entry. This call must be followed by a call to Previous before any
// call to Get* will return data about the last element.
func (j *Journal) SeekTail() error {
sd_journal_seek_tail, err := getFunction("sd_journal_seek_tail")
@@ -1118,3 +1136,33 @@ func (j *Journal) GetCatalog() (string, error) {
return catalog, nil
}
+
+// GetBootID get systemd boot id
+func (j *Journal) GetBootID() (string, error) {
+ sd_id128_get_boot, err := getFunction("sd_id128_get_boot")
+ if err != nil {
+ return "", err
+ }
+
+ var boot_id C.sd_id128_t
+ r := C.my_sd_id128_get_boot(sd_id128_get_boot, &boot_id)
+ if r < 0 {
+ return "", fmt.Errorf("failed to get boot id: %s", syscall.Errno(-r).Error())
+ }
+
+ sd_id128_to_string, err := getFunction("sd_id128_to_string")
+ if err != nil {
+ return "", err
+ }
+
+ c := (*C.char)(C.malloc(33))
+ defer C.free(unsafe.Pointer(c))
+ C.my_sd_id128_to_string(sd_id128_to_string, boot_id, c)
+
+ bootID := C.GoString(c)
+ if len(bootID) <= 0 {
+ return "", fmt.Errorf("get boot id %s is not valid", bootID)
+ }
+
+ return bootID, nil
+}
diff --git a/vendor/github.com/cri-o/ocicni/pkg/ocicni/ocicni.go b/vendor/github.com/cri-o/ocicni/pkg/ocicni/ocicni.go
index d9c1d37db..b38340126 100644
--- a/vendor/github.com/cri-o/ocicni/pkg/ocicni/ocicni.go
+++ b/vendor/github.com/cri-o/ocicni/pkg/ocicni/ocicni.go
@@ -198,6 +198,11 @@ func InitCNI(defaultNetName string, confDir string, binDirs ...string) (CNIPlugi
return initCNI(nil, "", defaultNetName, confDir, binDirs...)
}
+// InitCNIWithCache works like InitCNI except that it takes the cni cache directory as third param.
+func InitCNIWithCache(defaultNetName, confDir, cacheDir string, binDirs ...string) (CNIPlugin, error) {
+ return initCNI(nil, cacheDir, defaultNetName, confDir, binDirs...)
+}
+
// Internal function to allow faking out exec functions for testing
func initCNI(exec cniinvoke.Exec, cacheDir, defaultNetName string, confDir string, binDirs ...string) (CNIPlugin, error) {
if confDir == "" {
@@ -208,7 +213,7 @@ func initCNI(exec cniinvoke.Exec, cacheDir, defaultNetName string, confDir strin
}
plugin := &cniNetworkPlugin{
- cniConfig: libcni.NewCNIConfig(binDirs, exec),
+ cniConfig: libcni.NewCNIConfigWithCacheDir(binDirs, cacheDir, exec),
defaultNetName: netName{
name: defaultNetName,
// If defaultNetName is not assigned in initialization,
@@ -275,13 +280,19 @@ func loadNetworks(confDir string, cni *libcni.CNIConfig) (map[string]*cniNetwork
if strings.HasSuffix(confFile, ".conflist") {
confList, err = libcni.ConfListFromFile(confFile)
if err != nil {
- logrus.Errorf("Error loading CNI config list file %s: %v", confFile, err)
+ // do not log ENOENT errors
+ if !os.IsNotExist(err) {
+ logrus.Errorf("Error loading CNI config list file %s: %v", confFile, err)
+ }
continue
}
} else {
conf, err := libcni.ConfFromFile(confFile)
if err != nil {
- logrus.Errorf("Error loading CNI config file %s: %v", confFile, err)
+ // do not log ENOENT errors
+ if !os.IsNotExist(err) {
+ logrus.Errorf("Error loading CNI config file %s: %v", confFile, err)
+ }
continue
}
if conf.Network.Type == "" {
@@ -468,7 +479,7 @@ func (plugin *cniNetworkPlugin) forEachNetwork(podNetwork *PodNetwork, fromCache
}
}
- rt, err := buildCNIRuntimeConf(plugin.cacheDir, podNetwork, ifName, podNetwork.RuntimeConfig[network.Name])
+ rt, err := buildCNIRuntimeConf(podNetwork, ifName, podNetwork.RuntimeConfig[network.Name])
if err != nil {
logrus.Errorf("error building CNI runtime config: %v", err)
return err
@@ -489,8 +500,15 @@ func (plugin *cniNetworkPlugin) forEachNetwork(podNetwork *PodNetwork, fromCache
if cniNet == nil {
cniNet, err = plugin.getNetwork(network.Name)
if err != nil {
- logrus.Errorf(err.Error())
- return err
+ // try to load the networks again
+ if err2 := plugin.syncNetworkConfig(); err2 != nil {
+ logrus.Error(err2)
+ return err
+ }
+ cniNet, err = plugin.getNetwork(network.Name)
+ if err != nil {
+ return err
+ }
}
}
@@ -775,13 +793,12 @@ func (network *cniNetwork) deleteFromNetwork(ctx context.Context, rt *libcni.Run
return nil
}
-func buildCNIRuntimeConf(cacheDir string, podNetwork *PodNetwork, ifName string, runtimeConfig RuntimeConfig) (*libcni.RuntimeConf, error) {
+func buildCNIRuntimeConf(podNetwork *PodNetwork, ifName string, runtimeConfig RuntimeConfig) (*libcni.RuntimeConf, error) {
logrus.Infof("Got pod network %+v", podNetwork)
rt := &libcni.RuntimeConf{
ContainerID: podNetwork.ID,
NetNS: podNetwork.NetNS,
- CacheDir: cacheDir,
IfName: ifName,
Args: [][2]string{
{"IgnoreUnknown", "1"},
diff --git a/vendor/github.com/nxadm/tail/.gitignore b/vendor/github.com/nxadm/tail/.gitignore
index fa81aa93a..35d9351d3 100644
--- a/vendor/github.com/nxadm/tail/.gitignore
+++ b/vendor/github.com/nxadm/tail/.gitignore
@@ -1,2 +1,3 @@
.idea/
-.test/ \ No newline at end of file
+.test/
+examples/_* \ No newline at end of file
diff --git a/vendor/github.com/nxadm/tail/.travis.yml b/vendor/github.com/nxadm/tail/.travis.yml
deleted file mode 100644
index 95dd3bd78..000000000
--- a/vendor/github.com/nxadm/tail/.travis.yml
+++ /dev/null
@@ -1,16 +0,0 @@
-language: go
-
-script:
- - go test -race -v ./...
-
-go:
- - "1.9"
- - "1.10"
- - "1.11"
- - "1.12"
- - "1.13"
- - tip
-
-matrix:
- allow_failures:
- - go: tip
diff --git a/vendor/github.com/nxadm/tail/CHANGES.md b/vendor/github.com/nxadm/tail/CHANGES.md
index ef1b5fbed..224e54b44 100644
--- a/vendor/github.com/nxadm/tail/CHANGES.md
+++ b/vendor/github.com/nxadm/tail/CHANGES.md
@@ -1,4 +1,14 @@
-# Version v1.4.4
+# Version v1.4.7-v1.4.8
+* Documentation updates.
+* Small linter cleanups.
+* Added example in test.
+
+# Version v1.4.6
+
+* Document the usage of Cleanup when re-reading a file (thanks to @lesovsky) for issue #18.
+* Add example directories with example and tests for issues.
+
+# Version v1.4.4-v1.4.5
* Fix of checksum problem because of forced tag. No changes to the code.
diff --git a/vendor/github.com/nxadm/tail/README.md b/vendor/github.com/nxadm/tail/README.md
index dbb6c1727..f47939c74 100644
--- a/vendor/github.com/nxadm/tail/README.md
+++ b/vendor/github.com/nxadm/tail/README.md
@@ -1,36 +1,44 @@
-[![Build Status](https://travis-ci.org/nxadm/tail.svg?branch=master)](https://travis-ci.org/nxadm/tail)
+![ci](https://github.com/nxadm/tail/workflows/ci/badge.svg)[![Go Reference](https://pkg.go.dev/badge/github.com/nxadm/tail.svg)](https://pkg.go.dev/github.com/nxadm/tail)
-This is repo is forked from the dormant upstream repo at
-[hpcloud](https://github.com/hpcloud/tail). This fork adds support for go
-modules, updates the dependencies, adds features and fixes bugs. Go 1.9 is
-the oldest compiler release supported.
+# tail functionality in Go
-# Go package for tail-ing files
+nxadm/tail provides a Go library that emulates the features of the BSD `tail`
+program. The library comes with full support for truncation/move detection as
+it is designed to work with log rotation tools. The library works on all
+operating systems supported by Go, including POSIX systems like Linux and
+*BSD, and MS Windows. Go 1.9 is the oldest compiler release supported.
-A Go package striving to emulate the features of the BSD `tail` program.
+A simple example:
```Go
-t, err := tail.TailFile("/var/log/nginx.log", tail.Config{Follow: true})
+// Create a tail
+t, err := tail.TailFile(
+ "/var/log/nginx.log", tail.Config{Follow: true, ReOpen: true})
if err != nil {
panic(err)
}
+// Print the text of each received line
for line := range t.Lines {
fmt.Println(line.Text)
}
```
-See [API documentation](http://godoc.org/github.com/nxadm/tail).
-
-## Log rotation
-
-Tail comes with full support for truncation/move detection as it is
-designed to work with log rotation tools.
+See [API documentation](https://pkg.go.dev/github.com/nxadm/tail).
## Installing
go get github.com/nxadm/tail/...
-## Windows support
+## History
+
+This project is an active, drop-in replacement for the
+[abandoned](https://en.wikipedia.org/wiki/HPE_Helion) Go tail library at
+[hpcloud](https://github.com/hpcloud/tail). Next to
+[addressing open issues/PRs of the original project](https://github.com/nxadm/tail/issues/6),
+nxadm/tail continues the development by keeping up to date with the Go toolchain
+(e.g. go modules) and dependencies, completing the documentation, adding features
+and fixing bugs.
-This package [needs assistance](https://github.com/nxadm/tail/labels/Windows) for full Windows support.
+## Examples
+Examples, e.g. used to debug an issue, are kept in the [examples directory](/examples). \ No newline at end of file
diff --git a/vendor/github.com/nxadm/tail/appveyor.yml b/vendor/github.com/nxadm/tail/appveyor.yml
deleted file mode 100644
index e149bc62d..000000000
--- a/vendor/github.com/nxadm/tail/appveyor.yml
+++ /dev/null
@@ -1,11 +0,0 @@
-version: 0.{build}
-skip_tags: true
-cache: C:\Users\appveyor\AppData\Local\NuGet\Cache
-build_script:
-- SET GOPATH=c:\workspace
-- go test -v -race ./...
-test: off
-clone_folder: c:\workspace\src\github.com\nxadm\tail
-branches:
- only:
- - master
diff --git a/vendor/github.com/nxadm/tail/go.mod b/vendor/github.com/nxadm/tail/go.mod
index fb10d42af..5de9a6061 100644
--- a/vendor/github.com/nxadm/tail/go.mod
+++ b/vendor/github.com/nxadm/tail/go.mod
@@ -3,7 +3,6 @@ module github.com/nxadm/tail
go 1.13
require (
- github.com/fsnotify/fsnotify v1.4.7
- golang.org/x/sys v0.0.0-20190904154756-749cb33beabd // indirect
+ github.com/fsnotify/fsnotify v1.4.9
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7
)
diff --git a/vendor/github.com/nxadm/tail/go.sum b/vendor/github.com/nxadm/tail/go.sum
index b391f1904..3485daedb 100644
--- a/vendor/github.com/nxadm/tail/go.sum
+++ b/vendor/github.com/nxadm/tail/go.sum
@@ -1,6 +1,6 @@
-github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
-github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
-golang.org/x/sys v0.0.0-20190904154756-749cb33beabd h1:DBH9mDw0zluJT/R+nGuV3jWFWLFaHyYZWD4tOT+cjn0=
-golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
+github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
+golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9 h1:L2auWcuQIvxz9xSEqzESnV/QN/gNRXNApHi3fYwl2w0=
+golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
diff --git a/vendor/github.com/nxadm/tail/tail.go b/vendor/github.com/nxadm/tail/tail.go
index 58d3c4b95..37ea4411e 100644
--- a/vendor/github.com/nxadm/tail/tail.go
+++ b/vendor/github.com/nxadm/tail/tail.go
@@ -1,6 +1,12 @@
+// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
+//nxadm/tail provides a Go library that emulates the features of the BSD `tail`
+//program. The library comes with full support for truncation/move detection as
+//it is designed to work with log rotation tools. The library works on all
+//operating systems supported by Go, including POSIX systems like Linux and
+//*BSD, and MS Windows. Go 1.9 is the oldest compiler release supported.
package tail
import (
@@ -22,26 +28,31 @@ import (
)
var (
+ // ErrStop is returned when the tail of a file has been marked to be stopped.
ErrStop = errors.New("tail should now stop")
)
type Line struct {
- Text string
- Num int
- SeekInfo SeekInfo
- Time time.Time
- Err error // Error from tail
+ Text string // The contents of the file
+ Num int // The line number
+ SeekInfo SeekInfo // SeekInfo
+ Time time.Time // Present time
+ Err error // Error from tail
}
-// NewLine returns a Line with present time.
+// Deprecated: this function is no longer used internally and it has little of no
+// use in the API. As such, it will be removed from the API in a future major
+// release.
+//
+// NewLine returns a * pointer to a Line struct.
func NewLine(text string, lineNum int) *Line {
return &Line{text, lineNum, SeekInfo{}, time.Now(), nil}
}
-// SeekInfo represents arguments to `io.Seek`
+// SeekInfo represents arguments to io.Seek. See: https://golang.org/pkg/io/#SectionReader.Seek
type SeekInfo struct {
Offset int64
- Whence int // io.Seek*
+ Whence int
}
type logger interface {
@@ -59,26 +70,28 @@ type logger interface {
// Config is used to specify how a file must be tailed.
type Config struct {
// File-specifc
- Location *SeekInfo // Seek to this location before tailing
- ReOpen bool // Reopen recreated files (tail -F)
- MustExist bool // Fail early if the file does not exist
- Poll bool // Poll for file changes instead of using inotify
- Pipe bool // Is a named pipe (mkfifo)
- RateLimiter *ratelimiter.LeakyBucket
+ Location *SeekInfo // Tail from this location. If nil, start at the beginning of the file
+ ReOpen bool // Reopen recreated files (tail -F)
+ MustExist bool // Fail early if the file does not exist
+ Poll bool // Poll for file changes instead of using the default inotify
+ Pipe bool // The file is a named pipe (mkfifo)
// Generic IO
Follow bool // Continue looking for new lines (tail -f)
MaxLineSize int // If non-zero, split longer lines into multiple lines
- // Logger, when nil, is set to tail.DefaultLogger
- // To disable logging: set field to tail.DiscardingLogger
+ // Optionally, use a ratelimiter (e.g. created by the ratelimiter/NewLeakyBucket function)
+ RateLimiter *ratelimiter.LeakyBucket
+
+ // Optionally use a Logger. When nil, the Logger is set to tail.DefaultLogger.
+ // To disable logging, set it to tail.DiscardingLogger
Logger logger
}
type Tail struct {
- Filename string
- Lines chan *Line
- Config
+ Filename string // The filename
+ Lines chan *Line // A consumable channel of *Line
+ Config // Tail.Configuration
file *os.File
reader *bufio.Reader
@@ -93,16 +106,17 @@ type Tail struct {
}
var (
- // DefaultLogger is used when Config.Logger == nil
+ // DefaultLogger logs to os.Stderr and it is used when Config.Logger == nil
DefaultLogger = log.New(os.Stderr, "", log.LstdFlags)
// DiscardingLogger can be used to disable logging output
DiscardingLogger = log.New(ioutil.Discard, "", 0)
)
-// TailFile begins tailing the file. Output stream is made available
-// via the `Tail.Lines` channel. To handle errors during tailing,
-// invoke the `Wait` or `Err` method after finishing reading from the
-// `Lines` channel.
+// TailFile begins tailing the file. And returns a pointer to a Tail struct
+// and an error. An output stream is made available via the Tail.Lines
+// channel (e.g. to be looped and printed). To handle errors during tailing,
+// after finishing reading from the Lines channel, invoke the `Wait` or `Err`
+// method on the returned *Tail.
func TailFile(filename string, config Config) (*Tail, error) {
if config.ReOpen && !config.Follow {
util.Fatal("cannot set ReOpen without Follow.")
@@ -138,10 +152,9 @@ func TailFile(filename string, config Config) (*Tail, error) {
return t, nil
}
-// Tell returns the file's current position, like stdio's ftell().
-// But this value is not very accurate.
-// One line from the chan(tail.Lines) may have been read,
-// so it may have lost one line.
+// Tell returns the file's current position, like stdio's ftell() and an error.
+// Beware that this value may not be completely accurate because one line from
+// the chan(tail.Lines) may have been read already.
func (tail *Tail) Tell() (offset int64, err error) {
if tail.file == nil {
return
@@ -167,7 +180,8 @@ func (tail *Tail) Stop() error {
return tail.Wait()
}
-// StopAtEOF stops tailing as soon as the end of the file is reached.
+// StopAtEOF stops tailing as soon as the end of the file is reached. The function
+// returns an error,
func (tail *Tail) StopAtEOF() error {
tail.Kill(errStopAtEOF)
return tail.Wait()
@@ -435,6 +449,7 @@ func (tail *Tail) sendLine(line string) bool {
// Cleanup removes inotify watches added by the tail package. This function is
// meant to be invoked from a process's exit handler. Linux kernel may not
// automatically remove inotify watches after the process exits.
+// If you plan to re-read a file, don't call Cleanup in between.
func (tail *Tail) Cleanup() {
watch.Cleanup(tail.Filename)
}
diff --git a/vendor/github.com/nxadm/tail/tail_posix.go b/vendor/github.com/nxadm/tail/tail_posix.go
index 1b94520ec..23e071dea 100644
--- a/vendor/github.com/nxadm/tail/tail_posix.go
+++ b/vendor/github.com/nxadm/tail/tail_posix.go
@@ -1,3 +1,4 @@
+// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
// +build !windows
package tail
@@ -6,6 +7,11 @@ import (
"os"
)
+// Deprecated: this function is only useful internally and, as such,
+// it will be removed from the API in a future major release.
+//
+// OpenFile proxies a os.Open call for a file so it can be correctly tailed
+// on POSIX and non-POSIX OSes like MS Windows.
func OpenFile(name string) (file *os.File, err error) {
return os.Open(name)
}
diff --git a/vendor/github.com/nxadm/tail/tail_windows.go b/vendor/github.com/nxadm/tail/tail_windows.go
index 4aaceea28..da0d2f39c 100644
--- a/vendor/github.com/nxadm/tail/tail_windows.go
+++ b/vendor/github.com/nxadm/tail/tail_windows.go
@@ -1,12 +1,19 @@
+// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
// +build windows
package tail
import (
- "github.com/nxadm/tail/winfile"
"os"
+
+ "github.com/nxadm/tail/winfile"
)
+// Deprecated: this function is only useful internally and, as such,
+// it will be removed from the API in a future major release.
+//
+// OpenFile proxies a os.Open call for a file so it can be correctly tailed
+// on POSIX and non-POSIX OSes like MS Windows.
func OpenFile(name string) (file *os.File, err error) {
return winfile.OpenFile(name, os.O_RDONLY, 0)
}
diff --git a/vendor/github.com/nxadm/tail/util/util.go b/vendor/github.com/nxadm/tail/util/util.go
index 2ba0ed71c..b64caa212 100644
--- a/vendor/github.com/nxadm/tail/util/util.go
+++ b/vendor/github.com/nxadm/tail/util/util.go
@@ -1,3 +1,4 @@
+// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
diff --git a/vendor/github.com/nxadm/tail/watch/filechanges.go b/vendor/github.com/nxadm/tail/watch/filechanges.go
index f80aead9a..5b65f42ae 100644
--- a/vendor/github.com/nxadm/tail/watch/filechanges.go
+++ b/vendor/github.com/nxadm/tail/watch/filechanges.go
@@ -1,3 +1,4 @@
+// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
package watch
type FileChanges struct {
diff --git a/vendor/github.com/nxadm/tail/watch/inotify.go b/vendor/github.com/nxadm/tail/watch/inotify.go
index 439921810..cbd11ad8d 100644
--- a/vendor/github.com/nxadm/tail/watch/inotify.go
+++ b/vendor/github.com/nxadm/tail/watch/inotify.go
@@ -1,3 +1,4 @@
+// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
diff --git a/vendor/github.com/nxadm/tail/watch/inotify_tracker.go b/vendor/github.com/nxadm/tail/watch/inotify_tracker.go
index a94bcd4cb..cb9572a03 100644
--- a/vendor/github.com/nxadm/tail/watch/inotify_tracker.go
+++ b/vendor/github.com/nxadm/tail/watch/inotify_tracker.go
@@ -1,3 +1,4 @@
+// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
diff --git a/vendor/github.com/nxadm/tail/watch/polling.go b/vendor/github.com/nxadm/tail/watch/polling.go
index fb1706908..74e10aa42 100644
--- a/vendor/github.com/nxadm/tail/watch/polling.go
+++ b/vendor/github.com/nxadm/tail/watch/polling.go
@@ -1,3 +1,4 @@
+// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
diff --git a/vendor/github.com/nxadm/tail/watch/watch.go b/vendor/github.com/nxadm/tail/watch/watch.go
index 2e1783ef0..2b5112805 100644
--- a/vendor/github.com/nxadm/tail/watch/watch.go
+++ b/vendor/github.com/nxadm/tail/watch/watch.go
@@ -1,3 +1,4 @@
+// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
diff --git a/vendor/github.com/nxadm/tail/winfile/winfile.go b/vendor/github.com/nxadm/tail/winfile/winfile.go
index aa7e7bc5d..4562ac7c2 100644
--- a/vendor/github.com/nxadm/tail/winfile/winfile.go
+++ b/vendor/github.com/nxadm/tail/winfile/winfile.go
@@ -1,3 +1,4 @@
+// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
// +build windows
package winfile
diff --git a/vendor/github.com/onsi/ginkgo/.travis.yml b/vendor/github.com/onsi/ginkgo/.travis.yml
index 8b2883f97..ea0966d5b 100644
--- a/vendor/github.com/onsi/ginkgo/.travis.yml
+++ b/vendor/github.com/onsi/ginkgo/.travis.yml
@@ -1,8 +1,8 @@
language: go
go:
- - 1.14.x
- - 1.15.x
- tip
+ - 1.16.x
+ - 1.15.x
cache:
directories:
diff --git a/vendor/github.com/onsi/ginkgo/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/CHANGELOG.md
index bf51fe9cd..686189dd6 100644
--- a/vendor/github.com/onsi/ginkgo/CHANGELOG.md
+++ b/vendor/github.com/onsi/ginkgo/CHANGELOG.md
@@ -1,3 +1,8 @@
+## 1.15.1
+
+### Fixes
+- reporters/junit: Use `system-out` element instead of `passed` (#769) [9eda305]
+
## 1.15.0
### Features
diff --git a/vendor/github.com/onsi/ginkgo/README.md b/vendor/github.com/onsi/ginkgo/README.md
index 64e85eee0..5d56adba0 100644
--- a/vendor/github.com/onsi/ginkgo/README.md
+++ b/vendor/github.com/onsi/ginkgo/README.md
@@ -1,6 +1,7 @@
![Ginkgo: A Go BDD Testing Framework](https://onsi.github.io/ginkgo/images/ginkgo.png)
[![Build Status](https://travis-ci.org/onsi/ginkgo.svg?branch=master)](https://travis-ci.org/onsi/ginkgo)
+[![test](https://github.com/onsi/ginkgo/workflows/test/badge.svg?branch=master)](https://github.com/onsi/ginkgo/actions?query=workflow%3Atest+branch%3Amaster)
Jump to the [docs](https://onsi.github.io/ginkgo/) | [中文文档](https://ke-chain.github.io/ginkgodoc) to learn more. To start rolling your Ginkgo tests *now* [keep reading](#set-me-up)!
diff --git a/vendor/github.com/onsi/ginkgo/config/config.go b/vendor/github.com/onsi/ginkgo/config/config.go
index 8c177811e..8a2479824 100644
--- a/vendor/github.com/onsi/ginkgo/config/config.go
+++ b/vendor/github.com/onsi/ginkgo/config/config.go
@@ -20,7 +20,7 @@ import (
"fmt"
)
-const VERSION = "1.15.0"
+const VERSION = "1.15.1"
type GinkgoConfigType struct {
RandomSeed int64
diff --git a/vendor/github.com/onsi/ginkgo/go.mod b/vendor/github.com/onsi/ginkgo/go.mod
index 655060cf7..738a2f107 100644
--- a/vendor/github.com/onsi/ginkgo/go.mod
+++ b/vendor/github.com/onsi/ginkgo/go.mod
@@ -1,11 +1,10 @@
module github.com/onsi/ginkgo
+go 1.15
+
require (
- github.com/fsnotify/fsnotify v1.4.9 // indirect
- github.com/nxadm/tail v1.4.4
+ github.com/nxadm/tail v1.4.8
github.com/onsi/gomega v1.10.1
golang.org/x/sys v0.0.0-20210112080510-489259a85091
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e
)
-
-go 1.13
diff --git a/vendor/github.com/onsi/ginkgo/go.sum b/vendor/github.com/onsi/ginkgo/go.sum
index 56a493f9d..8fdaac400 100644
--- a/vendor/github.com/onsi/ginkgo/go.sum
+++ b/vendor/github.com/onsi/ginkgo/go.sum
@@ -14,8 +14,9 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
-github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
+github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
+github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
diff --git a/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go
index 963caaaff..01ddca6e1 100644
--- a/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go
+++ b/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go
@@ -33,17 +33,12 @@ type JUnitTestSuite struct {
type JUnitTestCase struct {
Name string `xml:"name,attr"`
ClassName string `xml:"classname,attr"`
- PassedMessage *JUnitPassedMessage `xml:"passed,omitempty"`
FailureMessage *JUnitFailureMessage `xml:"failure,omitempty"`
Skipped *JUnitSkipped `xml:"skipped,omitempty"`
Time float64 `xml:"time,attr"`
SystemOut string `xml:"system-out,omitempty"`
}
-type JUnitPassedMessage struct {
- Message string `xml:",chardata"`
-}
-
type JUnitFailureMessage struct {
Type string `xml:"type,attr"`
Message string `xml:",chardata"`
@@ -114,9 +109,7 @@ func (reporter *JUnitReporter) SpecDidComplete(specSummary *types.SpecSummary) {
ClassName: reporter.testSuiteName,
}
if reporter.ReporterConfig.ReportPassed && specSummary.State == types.SpecStatePassed {
- testCase.PassedMessage = &JUnitPassedMessage{
- Message: specSummary.CapturedOutput,
- }
+ testCase.SystemOut = specSummary.CapturedOutput
}
if specSummary.State == types.SpecStateFailed || specSummary.State == types.SpecStateTimedOut || specSummary.State == types.SpecStatePanicked {
testCase.FailureMessage = &JUnitFailureMessage{
diff --git a/vendor/github.com/onsi/gomega/.travis.yml b/vendor/github.com/onsi/gomega/.travis.yml
index 348e3014c..6543dc553 100644
--- a/vendor/github.com/onsi/gomega/.travis.yml
+++ b/vendor/github.com/onsi/gomega/.travis.yml
@@ -1,20 +1,18 @@
language: go
arch:
- - amd64
- - ppc64le
+ - amd64
+ - ppc64le
go:
- - 1.14.x
- - 1.15.x
- gotip
+ - 1.16.x
+ - 1.15.x
env:
- GO111MODULE=on
-install:
- - go get -v ./...
- - go build ./...
- - go get github.com/onsi/ginkgo
- - go install github.com/onsi/ginkgo/ginkgo
+install: skip
-script: make test
+script:
+ - go mod tidy && git diff --exit-code go.mod go.sum
+ - make test
diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md
index 16095fa3c..33bc75509 100644
--- a/vendor/github.com/onsi/gomega/CHANGELOG.md
+++ b/vendor/github.com/onsi/gomega/CHANGELOG.md
@@ -1,3 +1,16 @@
+## 1.11.0
+
+### Features
+- feature: add index to gstruct element func (#419) [334e00d]
+- feat(gexec) Add CompileTest functions. Close #410 (#411) [47c613f]
+
+### Fixes
+- Check more carefully for nils in WithTransform (#423) [3c60a15]
+- fix: typo in Makefile [b82522a]
+- Allow WithTransform function to accept a nil value (#422) [b75d2f2]
+- fix: print value type for interface{} containers (#409) [f08e2dc]
+- fix(BeElementOf): consistently flatten expected values [1fa9468]
+
## 1.10.5
### Fixes
diff --git a/vendor/github.com/onsi/gomega/Dockerfile b/vendor/github.com/onsi/gomega/Dockerfile
new file mode 100644
index 000000000..11c7e63e7
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/Dockerfile
@@ -0,0 +1 @@
+FROM golang:1.15
diff --git a/vendor/github.com/onsi/gomega/Makefile b/vendor/github.com/onsi/gomega/Makefile
index c92cd56e3..1c6d107e1 100644
--- a/vendor/github.com/onsi/gomega/Makefile
+++ b/vendor/github.com/onsi/gomega/Makefile
@@ -1,6 +1,33 @@
-test:
- [ -z "`gofmt -s -w -l -e .`" ]
- go vet
- ginkgo -p -r --randomizeAllSpecs --failOnPending --randomizeSuites --race
+###### Help ###################################################################
-.PHONY: test
+.DEFAULT_GOAL = help
+
+.PHONY: help
+
+help: ## list Makefile targets
+ @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
+
+###### Targets ################################################################
+
+test: version download fmt vet ginkgo ## Runs all build, static analysis, and test steps
+
+download: ## Download dependencies
+ go mod download
+
+vet: ## Run static code analysis
+ go vet ./...
+
+ginkgo: ## Run tests using Ginkgo
+ go run github.com/onsi/ginkgo/ginkgo -p -r --randomizeAllSpecs --failOnPending --randomizeSuites --race
+
+fmt: ## Checks that the code is formatted correcty
+ @@if [ -n "$$(gofmt -s -e -l -d .)" ]; then \
+ echo "gofmt check failed: run 'gofmt -s -e -l -w .'"; \
+ exit 1; \
+ fi
+
+docker_test: ## Run tests in a container via docker-compose
+ docker-compose build test && docker-compose run --rm test make test
+
+version: ## Display the version of Go
+ @@go version
diff --git a/vendor/github.com/onsi/gomega/docker-compose.yaml b/vendor/github.com/onsi/gomega/docker-compose.yaml
new file mode 100644
index 000000000..f37496143
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/docker-compose.yaml
@@ -0,0 +1,10 @@
+version: '3.0'
+
+services:
+ test:
+ build:
+ dockerfile: Dockerfile
+ context: .
+ working_dir: /app
+ volumes:
+ - ${PWD}:/app
diff --git a/vendor/github.com/onsi/gomega/format/format.go b/vendor/github.com/onsi/gomega/format/format.go
index e59d7d75b..4f7462ab1 100644
--- a/vendor/github.com/onsi/gomega/format/format.go
+++ b/vendor/github.com/onsi/gomega/format/format.go
@@ -7,6 +7,7 @@ Gomega's format package pretty-prints objects. It explores input objects recurs
package format
import (
+ "context"
"fmt"
"reflect"
"strconv"
@@ -44,16 +45,7 @@ var TruncateThreshold uint = 50
// after the first diff location in a truncated string assertion error message.
var CharactersAroundMismatchToInclude uint = 5
-// Ctx interface defined here to keep backwards compatibility with go < 1.7
-// It matches the context.Context interface
-type Ctx interface {
- Deadline() (deadline time.Time, ok bool)
- Done() <-chan struct{}
- Err() error
- Value(key interface{}) interface{}
-}
-
-var contextType = reflect.TypeOf((*Ctx)(nil)).Elem()
+var contextType = reflect.TypeOf((*context.Context)(nil)).Elem()
var timeType = reflect.TypeOf(time.Time{})
//The default indentation string emitted by the format package
@@ -181,7 +173,7 @@ Set PrintContextObjects to true to print the content of objects implementing con
func Object(object interface{}, indentation uint) string {
indent := strings.Repeat(Indent, int(indentation))
value := reflect.ValueOf(object)
- return fmt.Sprintf("%s<%s>: %s", indent, formatType(object), formatValue(value, indentation))
+ return fmt.Sprintf("%s<%s>: %s", indent, formatType(value), formatValue(value, indentation))
}
/*
@@ -201,25 +193,20 @@ func IndentString(s string, indentation uint) string {
return result
}
-func formatType(object interface{}) string {
- t := reflect.TypeOf(object)
- if t == nil {
+func formatType(v reflect.Value) string {
+ switch v.Kind() {
+ case reflect.Invalid:
return "nil"
- }
- switch t.Kind() {
case reflect.Chan:
- v := reflect.ValueOf(object)
- return fmt.Sprintf("%T | len:%d, cap:%d", object, v.Len(), v.Cap())
+ return fmt.Sprintf("%s | len:%d, cap:%d", v.Type(), v.Len(), v.Cap())
case reflect.Ptr:
- return fmt.Sprintf("%T | %p", object, object)
+ return fmt.Sprintf("%s | 0x%x", v.Type(), v.Pointer())
case reflect.Slice:
- v := reflect.ValueOf(object)
- return fmt.Sprintf("%T | len:%d, cap:%d", object, v.Len(), v.Cap())
+ return fmt.Sprintf("%s | len:%d, cap:%d", v.Type(), v.Len(), v.Cap())
case reflect.Map:
- v := reflect.ValueOf(object)
- return fmt.Sprintf("%T | len:%d", object, v.Len())
+ return fmt.Sprintf("%s | len:%d", v.Type(), v.Len())
default:
- return fmt.Sprintf("%T", object)
+ return fmt.Sprintf("%s", v.Type())
}
}
@@ -284,7 +271,7 @@ func formatValue(value reflect.Value, indentation uint) string {
}
return formatStruct(value, indentation)
case reflect.Interface:
- return formatValue(value.Elem(), indentation)
+ return formatInterface(value, indentation)
default:
if value.CanInterface() {
return fmt.Sprintf("%#v", value.Interface())
@@ -379,6 +366,10 @@ func formatStruct(v reflect.Value, indentation uint) string {
return fmt.Sprintf("{%s}", strings.Join(result, ", "))
}
+func formatInterface(v reflect.Value, indentation uint) string {
+ return fmt.Sprintf("<%s>%s", formatType(v.Elem()), formatValue(v.Elem(), indentation))
+}
+
func isNilValue(a reflect.Value) bool {
switch a.Kind() {
case reflect.Invalid:
diff --git a/vendor/github.com/onsi/gomega/gexec/build.go b/vendor/github.com/onsi/gomega/gexec/build.go
index 741d845f4..c7aba62b7 100644
--- a/vendor/github.com/onsi/gomega/gexec/build.go
+++ b/vendor/github.com/onsi/gomega/gexec/build.go
@@ -3,6 +3,8 @@
package gexec
import (
+ "crypto/md5"
+ "encoding/hex"
"errors"
"fmt"
"go/build"
@@ -46,6 +48,135 @@ func BuildIn(gopath string, packagePath string, args ...string) (compiledPath st
return doBuild(gopath, packagePath, nil, args...)
}
+func doBuild(gopath, packagePath string, env []string, args ...string) (compiledPath string, err error) {
+ executable, err := newExecutablePath(gopath, packagePath)
+ if err != nil {
+ return "", err
+ }
+
+ cmdArgs := append([]string{"build"}, args...)
+ cmdArgs = append(cmdArgs, "-o", executable, packagePath)
+
+ build := exec.Command("go", cmdArgs...)
+ build.Env = replaceGoPath(os.Environ(), gopath)
+ build.Env = append(build.Env, env...)
+
+ output, err := build.CombinedOutput()
+ if err != nil {
+ return "", fmt.Errorf("Failed to build %s:\n\nError:\n%s\n\nOutput:\n%s", packagePath, err, string(output))
+ }
+
+ return executable, nil
+}
+
+/*
+CompileTest uses go test to compile the test package at packagePath. The resulting binary is saved off in a temporary directory.
+A path pointing to this binary is returned.
+
+CompileTest uses the $GOPATH set in your environment. If $GOPATH is not set and you are using Go 1.8+,
+it will use the default GOPATH instead. It passes the variadic args on to `go test`.
+*/
+func CompileTest(packagePath string, args ...string) (compiledPath string, err error) {
+ return doCompileTest(build.Default.GOPATH, packagePath, nil, args...)
+}
+
+/*
+GetAndCompileTest is identical to CompileTest but `go get` the package before compiling tests.
+*/
+func GetAndCompileTest(packagePath string, args ...string) (compiledPath string, err error) {
+ if err := getForTest(build.Default.GOPATH, packagePath, nil); err != nil {
+ return "", err
+ }
+
+ return doCompileTest(build.Default.GOPATH, packagePath, nil, args...)
+}
+
+/*
+CompileTestWithEnvironment is identical to CompileTest but allows you to specify env vars to be set at build time.
+*/
+func CompileTestWithEnvironment(packagePath string, env []string, args ...string) (compiledPath string, err error) {
+ return doCompileTest(build.Default.GOPATH, packagePath, env, args...)
+}
+
+/*
+GetAndCompileTestWithEnvironment is identical to GetAndCompileTest but allows you to specify env vars to be set at build time.
+*/
+func GetAndCompileTestWithEnvironment(packagePath string, env []string, args ...string) (compiledPath string, err error) {
+ if err := getForTest(build.Default.GOPATH, packagePath, env); err != nil {
+ return "", err
+ }
+
+ return doCompileTest(build.Default.GOPATH, packagePath, env, args...)
+}
+
+/*
+CompileTestIn is identical to CompileTest but allows you to specify a custom $GOPATH (the first argument).
+*/
+func CompileTestIn(gopath string, packagePath string, args ...string) (compiledPath string, err error) {
+ return doCompileTest(gopath, packagePath, nil, args...)
+}
+
+/*
+GetAndCompileTestIn is identical to GetAndCompileTest but allows you to specify a custom $GOPATH (the first argument).
+*/
+func GetAndCompileTestIn(gopath string, packagePath string, args ...string) (compiledPath string, err error) {
+ if err := getForTest(gopath, packagePath, nil); err != nil {
+ return "", err
+ }
+
+ return doCompileTest(gopath, packagePath, nil, args...)
+}
+
+func isLocalPackage(packagePath string) bool {
+ return strings.HasPrefix(packagePath, ".")
+}
+
+func getForTest(gopath, packagePath string, env []string) error {
+ if isLocalPackage(packagePath) {
+ return nil
+ }
+
+ return doGet(gopath, packagePath, env, "-t")
+}
+
+func doGet(gopath, packagePath string, env []string, args ...string) error {
+ args = append(args, packagePath)
+ args = append([]string{"get"}, args...)
+
+ goGet := exec.Command("go", args...)
+ goGet.Dir = gopath
+ goGet.Env = replaceGoPath(os.Environ(), gopath)
+ goGet.Env = append(goGet.Env, env...)
+
+ output, err := goGet.CombinedOutput()
+ if err != nil {
+ return fmt.Errorf("Failed to get %s:\n\nError:\n%s\n\nOutput:\n%s", packagePath, err, string(output))
+ }
+
+ return nil
+}
+
+func doCompileTest(gopath, packagePath string, env []string, args ...string) (compiledPath string, err error) {
+ executable, err := newExecutablePath(gopath, packagePath, ".test")
+ if err != nil {
+ return "", err
+ }
+
+ cmdArgs := append([]string{"test", "-c"}, args...)
+ cmdArgs = append(cmdArgs, "-o", executable, packagePath)
+
+ build := exec.Command("go", cmdArgs...)
+ build.Env = replaceGoPath(os.Environ(), gopath)
+ build.Env = append(build.Env, env...)
+
+ output, err := build.CombinedOutput()
+ if err != nil {
+ return "", fmt.Errorf("Failed to build %s:\n\nError:\n%s\n\nOutput:\n%s", packagePath, err, string(output))
+ }
+
+ return executable, nil
+}
+
func replaceGoPath(environ []string, newGoPath string) []string {
newEnviron := []string{}
for _, v := range environ {
@@ -56,7 +187,7 @@ func replaceGoPath(environ []string, newGoPath string) []string {
return append(newEnviron, "GOPATH="+newGoPath)
}
-func doBuild(gopath, packagePath string, env []string, args ...string) (compiledPath string, err error) {
+func newExecutablePath(gopath, packagePath string, suffixes ...string) (string, error) {
tmpDir, err := temporaryDirectory()
if err != nil {
return "", err
@@ -66,23 +197,14 @@ func doBuild(gopath, packagePath string, env []string, args ...string) (compiled
return "", errors.New("$GOPATH not provided when building " + packagePath)
}
- executable := filepath.Join(tmpDir, path.Base(packagePath))
+ hash := md5.Sum([]byte(packagePath))
+ filename := fmt.Sprintf("%s-%x%s", path.Base(packagePath), hex.EncodeToString(hash[:]), strings.Join(suffixes, ""))
+ executable := filepath.Join(tmpDir, filename)
+
if runtime.GOOS == "windows" {
executable += ".exe"
}
- cmdArgs := append([]string{"build"}, args...)
- cmdArgs = append(cmdArgs, "-o", executable, packagePath)
-
- build := exec.Command("go", cmdArgs...)
- build.Env = replaceGoPath(os.Environ(), gopath)
- build.Env = append(build.Env, env...)
-
- output, err := build.CombinedOutput()
- if err != nil {
- return "", fmt.Errorf("Failed to build %s:\n\nError:\n%s\n\nOutput:\n%s", packagePath, err, string(output))
- }
-
return executable, nil
}
diff --git a/vendor/github.com/onsi/gomega/go.mod b/vendor/github.com/onsi/gomega/go.mod
index 6f853a579..8731e6ac2 100644
--- a/vendor/github.com/onsi/gomega/go.mod
+++ b/vendor/github.com/onsi/gomega/go.mod
@@ -3,8 +3,8 @@ module github.com/onsi/gomega
go 1.14
require (
- github.com/golang/protobuf v1.4.2
+ github.com/golang/protobuf v1.4.3
github.com/onsi/ginkgo v1.12.1
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb
- gopkg.in/yaml.v2 v2.3.0
+ gopkg.in/yaml.v2 v2.4.0
)
diff --git a/vendor/github.com/onsi/gomega/go.sum b/vendor/github.com/onsi/gomega/go.sum
index 54eeacd2b..df224db95 100644
--- a/vendor/github.com/onsi/gomega/go.sum
+++ b/vendor/github.com/onsi/gomega/go.sum
@@ -1,46 +1,38 @@
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
-github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
-github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
-github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM=
+github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
-github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1 h1:mFwc4LvZ0xpSvDZ3E+k8Yte0hLOMxXUlP+yXtJqkYfQ=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb h1:eBmm0M9fYhWpKZLjQUUKka/LtIxf46G4fxeEz5KJr9U=
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e h1:N7DeIrjYszNmSW409R3frPPwglRwMkXSBzwVbkOjLLA=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
@@ -56,11 +48,9 @@ google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyz
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
-gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
-gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go
index 1bc5288b8..41bcda74c 100644
--- a/vendor/github.com/onsi/gomega/gomega_dsl.go
+++ b/vendor/github.com/onsi/gomega/gomega_dsl.go
@@ -24,7 +24,7 @@ import (
"github.com/onsi/gomega/types"
)
-const GOMEGA_VERSION = "1.10.5"
+const GOMEGA_VERSION = "1.11.0"
const nilFailHandlerPanic = `You are trying to make an assertion, but Gomega's fail handler is nil.
If you're using Ginkgo then you probably forgot to put your assertion in an It().
diff --git a/vendor/github.com/onsi/gomega/matchers/be_element_of_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_element_of_matcher.go
index 1f9d7a8e6..9ee75a5d5 100644
--- a/vendor/github.com/onsi/gomega/matchers/be_element_of_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/be_element_of_matcher.go
@@ -18,23 +18,9 @@ func (matcher *BeElementOfMatcher) Match(actual interface{}) (success bool, err
return false, fmt.Errorf("BeElement matcher expects actual to be typed")
}
- length := len(matcher.Elements)
- valueAt := func(i int) interface{} {
- return matcher.Elements[i]
- }
- // Special handling of a single element of type Array or Slice
- if length == 1 && isArrayOrSlice(valueAt(0)) {
- element := valueAt(0)
- value := reflect.ValueOf(element)
- length = value.Len()
- valueAt = func(i int) interface{} {
- return value.Index(i).Interface()
- }
- }
-
var lastError error
- for i := 0; i < length; i++ {
- matcher := &EqualMatcher{Expected: valueAt(i)}
+ for _, m := range flatten(matcher.Elements) {
+ matcher := &EqualMatcher{Expected: m}
success, err := matcher.Match(actual)
if err != nil {
lastError = err
@@ -49,9 +35,9 @@ func (matcher *BeElementOfMatcher) Match(actual interface{}) (success bool, err
}
func (matcher *BeElementOfMatcher) FailureMessage(actual interface{}) (message string) {
- return format.Message(actual, "to be an element of", matcher.Elements)
+ return format.Message(actual, "to be an element of", presentable(matcher.Elements))
}
func (matcher *BeElementOfMatcher) NegatedFailureMessage(actual interface{}) (message string) {
- return format.Message(actual, "not to be an element of", matcher.Elements)
+ return format.Message(actual, "not to be an element of", presentable(matcher.Elements))
}
diff --git a/vendor/github.com/onsi/gomega/matchers/with_transform.go b/vendor/github.com/onsi/gomega/matchers/with_transform.go
index 8e58d8a0f..f3dec9101 100644
--- a/vendor/github.com/onsi/gomega/matchers/with_transform.go
+++ b/vendor/github.com/onsi/gomega/matchers/with_transform.go
@@ -40,15 +40,24 @@ func NewWithTransformMatcher(transform interface{}, matcher types.GomegaMatcher)
}
func (m *WithTransformMatcher) Match(actual interface{}) (bool, error) {
- // return error if actual's type is incompatible with Transform function's argument type
- actualType := reflect.TypeOf(actual)
- if !actualType.AssignableTo(m.transformArgType) {
- return false, fmt.Errorf("Transform function expects '%s' but we have '%s'", m.transformArgType, actualType)
+ // prepare a parameter to pass to the Transform function
+ var param reflect.Value
+ if actual != nil && reflect.TypeOf(actual).AssignableTo(m.transformArgType) {
+ // The dynamic type of actual is compatible with the transform argument.
+ param = reflect.ValueOf(actual)
+
+ } else if actual == nil && m.transformArgType.Kind() == reflect.Interface {
+ // The dynamic type of actual is unknown, so there's no way to make its
+ // reflect.Value. Create a nil of the transform argument, which is known.
+ param = reflect.Zero(m.transformArgType)
+
+ } else {
+ return false, fmt.Errorf("Transform function expects '%s' but we have '%T'", m.transformArgType, actual)
}
// call the Transform function with `actual`
fn := reflect.ValueOf(m.Transform)
- result := fn.Call([]reflect.Value{reflect.ValueOf(actual)})
+ result := fn.Call([]reflect.Value{param})
m.transformedValue = result[0].Interface() // expect exactly one value
return m.Matcher.Match(m.transformedValue)
diff --git a/vendor/github.com/opentracing/opentracing-go/.gitignore b/vendor/github.com/opentracing/opentracing-go/.gitignore
deleted file mode 100644
index c57100a59..000000000
--- a/vendor/github.com/opentracing/opentracing-go/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-coverage.txt
diff --git a/vendor/github.com/opentracing/opentracing-go/.travis.yml b/vendor/github.com/opentracing/opentracing-go/.travis.yml
deleted file mode 100644
index b950e4296..000000000
--- a/vendor/github.com/opentracing/opentracing-go/.travis.yml
+++ /dev/null
@@ -1,20 +0,0 @@
-language: go
-
-matrix:
- include:
- - go: "1.13.x"
- - go: "1.14.x"
- - go: "tip"
- env:
- - LINT=true
- - COVERAGE=true
-
-install:
- - if [ "$LINT" == true ]; then go get -u golang.org/x/lint/golint/... ; else echo 'skipping lint'; fi
- - go get -u github.com/stretchr/testify/...
-
-script:
- - make test
- - go build ./...
- - if [ "$LINT" == true ]; then make lint ; else echo 'skipping lint'; fi
- - if [ "$COVERAGE" == true ]; then make cover && bash <(curl -s https://codecov.io/bash) ; else echo 'skipping coverage'; fi
diff --git a/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md b/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md
deleted file mode 100644
index d3bfcf623..000000000
--- a/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md
+++ /dev/null
@@ -1,63 +0,0 @@
-Changes by Version
-==================
-
-
-1.2.0 (2020-07-01)
--------------------
-
-* Restore the ability to reset the current span in context to nil (#231) -- Yuri Shkuro
-* Use error.object per OpenTracing Semantic Conventions (#179) -- Rahman Syed
-* Convert nil pointer log field value to string "nil" (#230) -- Cyril Tovena
-* Add Go module support (#215) -- Zaba505
-* Make SetTag helper types in ext public (#229) -- Blake Edwards
-* Add log/fields helpers for keys from specification (#226) -- Dmitry Monakhov
-* Improve noop impementation (#223) -- chanxuehong
-* Add an extension to Tracer interface for custom go context creation (#220) -- Krzesimir Nowak
-* Fix typo in comments (#222) -- meteorlxy
-* Improve documentation for log.Object() to emphasize the requirement to pass immutable arguments (#219) -- 疯狂的小企鹅
-* [mock] Return ErrInvalidSpanContext if span context is not MockSpanContext (#216) -- Milad Irannejad
-
-
-1.1.0 (2019-03-23)
--------------------
-
-Notable changes:
-- The library is now released under Apache 2.0 license
-- Use Set() instead of Add() in HTTPHeadersCarrier is functionally a breaking change (fixes issue [#159](https://github.com/opentracing/opentracing-go/issues/159))
-- 'golang.org/x/net/context' is replaced with 'context' from the standard library
-
-List of all changes:
-
-- Export StartSpanFromContextWithTracer (#214) <Aaron Delaney>
-- Add IsGlobalTracerRegistered() to indicate if a tracer has been registered (#201) <Mike Goldsmith>
-- Use Set() instead of Add() in HTTPHeadersCarrier (#191) <jeremyxu2010>
-- Update license to Apache 2.0 (#181) <Andrea Kao>
-- Replace 'golang.org/x/net/context' with 'context' (#176) <Tony Ghita>
-- Port of Python opentracing/harness/api_check.py to Go (#146) <chris erway>
-- Fix race condition in MockSpan.Context() (#170) <Brad>
-- Add PeerHostIPv4.SetString() (#155) <NeoCN>
-- Add a Noop log field type to log to allow for optional fields (#150) <Matt Ho>
-
-
-1.0.2 (2017-04-26)
--------------------
-
-- Add more semantic tags (#139) <Rustam Zagirov>
-
-
-1.0.1 (2017-02-06)
--------------------
-
-- Correct spelling in comments <Ben Sigelman>
-- Address race in nextMockID() (#123) <bill fumerola>
-- log: avoid panic marshaling nil error (#131) <Anthony Voutas>
-- Deprecate InitGlobalTracer in favor of SetGlobalTracer (#128) <Yuri Shkuro>
-- Drop Go 1.5 that fails in Travis (#129) <Yuri Shkuro>
-- Add convenience methods Key() and Value() to log.Field <Ben Sigelman>
-- Add convenience methods to log.Field (2 years, 6 months ago) <Radu Berinde>
-
-1.0.0 (2016-09-26)
--------------------
-
-- This release implements OpenTracing Specification 1.0 (https://opentracing.io/spec)
-
diff --git a/vendor/github.com/opentracing/opentracing-go/Makefile b/vendor/github.com/opentracing/opentracing-go/Makefile
deleted file mode 100644
index 62abb63f5..000000000
--- a/vendor/github.com/opentracing/opentracing-go/Makefile
+++ /dev/null
@@ -1,20 +0,0 @@
-.DEFAULT_GOAL := test-and-lint
-
-.PHONY: test-and-lint
-test-and-lint: test lint
-
-.PHONY: test
-test:
- go test -v -cover -race ./...
-
-.PHONY: cover
-cover:
- go test -v -coverprofile=coverage.txt -covermode=atomic -race ./...
-
-.PHONY: lint
-lint:
- go fmt ./...
- golint ./...
- @# Run again with magic to exit non-zero if golint outputs anything.
- @! (golint ./... | read dummy)
- go vet ./...
diff --git a/vendor/github.com/opentracing/opentracing-go/README.md b/vendor/github.com/opentracing/opentracing-go/README.md
deleted file mode 100644
index 6ef1d7c9d..000000000
--- a/vendor/github.com/opentracing/opentracing-go/README.md
+++ /dev/null
@@ -1,171 +0,0 @@
-[![Gitter chat](http://img.shields.io/badge/gitter-join%20chat%20%E2%86%92-brightgreen.svg)](https://gitter.im/opentracing/public) [![Build Status](https://travis-ci.org/opentracing/opentracing-go.svg?branch=master)](https://travis-ci.org/opentracing/opentracing-go) [![GoDoc](https://godoc.org/github.com/opentracing/opentracing-go?status.svg)](http://godoc.org/github.com/opentracing/opentracing-go)
-[![Sourcegraph Badge](https://sourcegraph.com/github.com/opentracing/opentracing-go/-/badge.svg)](https://sourcegraph.com/github.com/opentracing/opentracing-go?badge)
-
-# OpenTracing API for Go
-
-This package is a Go platform API for OpenTracing.
-
-## Required Reading
-
-In order to understand the Go platform API, one must first be familiar with the
-[OpenTracing project](https://opentracing.io) and
-[terminology](https://opentracing.io/specification/) more specifically.
-
-## API overview for those adding instrumentation
-
-Everyday consumers of this `opentracing` package really only need to worry
-about a couple of key abstractions: the `StartSpan` function, the `Span`
-interface, and binding a `Tracer` at `main()`-time. Here are code snippets
-demonstrating some important use cases.
-
-#### Singleton initialization
-
-The simplest starting point is `./default_tracer.go`. As early as possible, call
-
-```go
- import "github.com/opentracing/opentracing-go"
- import ".../some_tracing_impl"
-
- func main() {
- opentracing.SetGlobalTracer(
- // tracing impl specific:
- some_tracing_impl.New(...),
- )
- ...
- }
-```
-
-#### Non-Singleton initialization
-
-If you prefer direct control to singletons, manage ownership of the
-`opentracing.Tracer` implementation explicitly.
-
-#### Creating a Span given an existing Go `context.Context`
-
-If you use `context.Context` in your application, OpenTracing's Go library will
-happily rely on it for `Span` propagation. To start a new (blocking child)
-`Span`, you can use `StartSpanFromContext`.
-
-```go
- func xyz(ctx context.Context, ...) {
- ...
- span, ctx := opentracing.StartSpanFromContext(ctx, "operation_name")
- defer span.Finish()
- span.LogFields(
- log.String("event", "soft error"),
- log.String("type", "cache timeout"),
- log.Int("waited.millis", 1500))
- ...
- }
-```
-
-#### Starting an empty trace by creating a "root span"
-
-It's always possible to create a "root" `Span` with no parent or other causal
-reference.
-
-```go
- func xyz() {
- ...
- sp := opentracing.StartSpan("operation_name")
- defer sp.Finish()
- ...
- }
-```
-
-#### Creating a (child) Span given an existing (parent) Span
-
-```go
- func xyz(parentSpan opentracing.Span, ...) {
- ...
- sp := opentracing.StartSpan(
- "operation_name",
- opentracing.ChildOf(parentSpan.Context()))
- defer sp.Finish()
- ...
- }
-```
-
-#### Serializing to the wire
-
-```go
- func makeSomeRequest(ctx context.Context) ... {
- if span := opentracing.SpanFromContext(ctx); span != nil {
- httpClient := &http.Client{}
- httpReq, _ := http.NewRequest("GET", "http://myservice/", nil)
-
- // Transmit the span's TraceContext as HTTP headers on our
- // outbound request.
- opentracing.GlobalTracer().Inject(
- span.Context(),
- opentracing.HTTPHeaders,
- opentracing.HTTPHeadersCarrier(httpReq.Header))
-
- resp, err := httpClient.Do(httpReq)
- ...
- }
- ...
- }
-```
-
-#### Deserializing from the wire
-
-```go
- http.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) {
- var serverSpan opentracing.Span
- appSpecificOperationName := ...
- wireContext, err := opentracing.GlobalTracer().Extract(
- opentracing.HTTPHeaders,
- opentracing.HTTPHeadersCarrier(req.Header))
- if err != nil {
- // Optionally record something about err here
- }
-
- // Create the span referring to the RPC client if available.
- // If wireContext == nil, a root span will be created.
- serverSpan = opentracing.StartSpan(
- appSpecificOperationName,
- ext.RPCServerOption(wireContext))
-
- defer serverSpan.Finish()
-
- ctx := opentracing.ContextWithSpan(context.Background(), serverSpan)
- ...
- }
-```
-
-#### Conditionally capture a field using `log.Noop`
-
-In some situations, you may want to dynamically decide whether or not
-to log a field. For example, you may want to capture additional data,
-such as a customer ID, in non-production environments:
-
-```go
- func Customer(order *Order) log.Field {
- if os.Getenv("ENVIRONMENT") == "dev" {
- return log.String("customer", order.Customer.ID)
- }
- return log.Noop()
- }
-```
-
-#### Goroutine-safety
-
-The entire public API is goroutine-safe and does not require external
-synchronization.
-
-## API pointers for those implementing a tracing system
-
-Tracing system implementors may be able to reuse or copy-paste-modify the `basictracer` package, found [here](https://github.com/opentracing/basictracer-go). In particular, see `basictracer.New(...)`.
-
-## API compatibility
-
-For the time being, "mild" backwards-incompatible changes may be made without changing the major version number. As OpenTracing and `opentracing-go` mature, backwards compatibility will become more of a priority.
-
-## Tracer test suite
-
-A test suite is available in the [harness](https://godoc.org/github.com/opentracing/opentracing-go/harness) package that can assist Tracer implementors to assert that their Tracer is working correctly.
-
-## Licensing
-
-[Apache 2.0 License](./LICENSE).
diff --git a/vendor/github.com/opentracing/opentracing-go/ext.go b/vendor/github.com/opentracing/opentracing-go/ext.go
deleted file mode 100644
index e11977ebe..000000000
--- a/vendor/github.com/opentracing/opentracing-go/ext.go
+++ /dev/null
@@ -1,24 +0,0 @@
-package opentracing
-
-import (
- "context"
-)
-
-// TracerContextWithSpanExtension is an extension interface that the
-// implementation of the Tracer interface may want to implement. It
-// allows to have some control over the go context when the
-// ContextWithSpan is invoked.
-//
-// The primary purpose of this extension are adapters from opentracing
-// API to some other tracing API.
-type TracerContextWithSpanExtension interface {
- // ContextWithSpanHook gets called by the ContextWithSpan
- // function, when the Tracer implementation also implements
- // this interface. It allows to put extra information into the
- // context and make it available to the callers of the
- // ContextWithSpan.
- //
- // This hook is invoked before the ContextWithSpan function
- // actually puts the span into the context.
- ContextWithSpanHook(ctx context.Context, span Span) context.Context
-}
diff --git a/vendor/github.com/opentracing/opentracing-go/ext/field.go b/vendor/github.com/opentracing/opentracing-go/ext/field.go
deleted file mode 100644
index 8282bd758..000000000
--- a/vendor/github.com/opentracing/opentracing-go/ext/field.go
+++ /dev/null
@@ -1,17 +0,0 @@
-package ext
-
-import (
- "github.com/opentracing/opentracing-go"
- "github.com/opentracing/opentracing-go/log"
-)
-
-// LogError sets the error=true tag on the Span and logs err as an "error" event.
-func LogError(span opentracing.Span, err error, fields ...log.Field) {
- Error.Set(span, true)
- ef := []log.Field{
- log.Event("error"),
- log.Error(err),
- }
- ef = append(ef, fields...)
- span.LogFields(ef...)
-}
diff --git a/vendor/github.com/opentracing/opentracing-go/ext/tags.go b/vendor/github.com/opentracing/opentracing-go/ext/tags.go
deleted file mode 100644
index a414b5951..000000000
--- a/vendor/github.com/opentracing/opentracing-go/ext/tags.go
+++ /dev/null
@@ -1,215 +0,0 @@
-package ext
-
-import "github.com/opentracing/opentracing-go"
-
-// These constants define common tag names recommended for better portability across
-// tracing systems and languages/platforms.
-//
-// The tag names are defined as typed strings, so that in addition to the usual use
-//
-// span.setTag(TagName, value)
-//
-// they also support value type validation via this additional syntax:
-//
-// TagName.Set(span, value)
-//
-var (
- //////////////////////////////////////////////////////////////////////
- // SpanKind (client/server or producer/consumer)
- //////////////////////////////////////////////////////////////////////
-
- // SpanKind hints at relationship between spans, e.g. client/server
- SpanKind = spanKindTagName("span.kind")
-
- // SpanKindRPCClient marks a span representing the client-side of an RPC
- // or other remote call
- SpanKindRPCClientEnum = SpanKindEnum("client")
- SpanKindRPCClient = opentracing.Tag{Key: string(SpanKind), Value: SpanKindRPCClientEnum}
-
- // SpanKindRPCServer marks a span representing the server-side of an RPC
- // or other remote call
- SpanKindRPCServerEnum = SpanKindEnum("server")
- SpanKindRPCServer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindRPCServerEnum}
-
- // SpanKindProducer marks a span representing the producer-side of a
- // message bus
- SpanKindProducerEnum = SpanKindEnum("producer")
- SpanKindProducer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindProducerEnum}
-
- // SpanKindConsumer marks a span representing the consumer-side of a
- // message bus
- SpanKindConsumerEnum = SpanKindEnum("consumer")
- SpanKindConsumer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindConsumerEnum}
-
- //////////////////////////////////////////////////////////////////////
- // Component name
- //////////////////////////////////////////////////////////////////////
-
- // Component is a low-cardinality identifier of the module, library,
- // or package that is generating a span.
- Component = StringTagName("component")
-
- //////////////////////////////////////////////////////////////////////
- // Sampling hint
- //////////////////////////////////////////////////////////////////////
-
- // SamplingPriority determines the priority of sampling this Span.
- SamplingPriority = Uint16TagName("sampling.priority")
-
- //////////////////////////////////////////////////////////////////////
- // Peer tags. These tags can be emitted by either client-side or
- // server-side to describe the other side/service in a peer-to-peer
- // communications, like an RPC call.
- //////////////////////////////////////////////////////////////////////
-
- // PeerService records the service name of the peer.
- PeerService = StringTagName("peer.service")
-
- // PeerAddress records the address name of the peer. This may be a "ip:port",
- // a bare "hostname", a FQDN or even a database DSN substring
- // like "mysql://username@127.0.0.1:3306/dbname"
- PeerAddress = StringTagName("peer.address")
-
- // PeerHostname records the host name of the peer
- PeerHostname = StringTagName("peer.hostname")
-
- // PeerHostIPv4 records IP v4 host address of the peer
- PeerHostIPv4 = IPv4TagName("peer.ipv4")
-
- // PeerHostIPv6 records IP v6 host address of the peer
- PeerHostIPv6 = StringTagName("peer.ipv6")
-
- // PeerPort records port number of the peer
- PeerPort = Uint16TagName("peer.port")
-
- //////////////////////////////////////////////////////////////////////
- // HTTP Tags
- //////////////////////////////////////////////////////////////////////
-
- // HTTPUrl should be the URL of the request being handled in this segment
- // of the trace, in standard URI format. The protocol is optional.
- HTTPUrl = StringTagName("http.url")
-
- // HTTPMethod is the HTTP method of the request, and is case-insensitive.
- HTTPMethod = StringTagName("http.method")
-
- // HTTPStatusCode is the numeric HTTP status code (200, 404, etc) of the
- // HTTP response.
- HTTPStatusCode = Uint16TagName("http.status_code")
-
- //////////////////////////////////////////////////////////////////////
- // DB Tags
- //////////////////////////////////////////////////////////////////////
-
- // DBInstance is database instance name.
- DBInstance = StringTagName("db.instance")
-
- // DBStatement is a database statement for the given database type.
- // It can be a query or a prepared statement (i.e., before substitution).
- DBStatement = StringTagName("db.statement")
-
- // DBType is a database type. For any SQL database, "sql".
- // For others, the lower-case database category, e.g. "redis"
- DBType = StringTagName("db.type")
-
- // DBUser is a username for accessing database.
- DBUser = StringTagName("db.user")
-
- //////////////////////////////////////////////////////////////////////
- // Message Bus Tag
- //////////////////////////////////////////////////////////////////////
-
- // MessageBusDestination is an address at which messages can be exchanged
- MessageBusDestination = StringTagName("message_bus.destination")
-
- //////////////////////////////////////////////////////////////////////
- // Error Tag
- //////////////////////////////////////////////////////////////////////
-
- // Error indicates that operation represented by the span resulted in an error.
- Error = BoolTagName("error")
-)
-
-// ---
-
-// SpanKindEnum represents common span types
-type SpanKindEnum string
-
-type spanKindTagName string
-
-// Set adds a string tag to the `span`
-func (tag spanKindTagName) Set(span opentracing.Span, value SpanKindEnum) {
- span.SetTag(string(tag), value)
-}
-
-type rpcServerOption struct {
- clientContext opentracing.SpanContext
-}
-
-func (r rpcServerOption) Apply(o *opentracing.StartSpanOptions) {
- if r.clientContext != nil {
- opentracing.ChildOf(r.clientContext).Apply(o)
- }
- SpanKindRPCServer.Apply(o)
-}
-
-// RPCServerOption returns a StartSpanOption appropriate for an RPC server span
-// with `client` representing the metadata for the remote peer Span if available.
-// In case client == nil, due to the client not being instrumented, this RPC
-// server span will be a root span.
-func RPCServerOption(client opentracing.SpanContext) opentracing.StartSpanOption {
- return rpcServerOption{client}
-}
-
-// ---
-
-// StringTagName is a common tag name to be set to a string value
-type StringTagName string
-
-// Set adds a string tag to the `span`
-func (tag StringTagName) Set(span opentracing.Span, value string) {
- span.SetTag(string(tag), value)
-}
-
-// ---
-
-// Uint32TagName is a common tag name to be set to a uint32 value
-type Uint32TagName string
-
-// Set adds a uint32 tag to the `span`
-func (tag Uint32TagName) Set(span opentracing.Span, value uint32) {
- span.SetTag(string(tag), value)
-}
-
-// ---
-
-// Uint16TagName is a common tag name to be set to a uint16 value
-type Uint16TagName string
-
-// Set adds a uint16 tag to the `span`
-func (tag Uint16TagName) Set(span opentracing.Span, value uint16) {
- span.SetTag(string(tag), value)
-}
-
-// ---
-
-// BoolTagName is a common tag name to be set to a bool value
-type BoolTagName string
-
-// Set adds a bool tag to the `span`
-func (tag BoolTagName) Set(span opentracing.Span, value bool) {
- span.SetTag(string(tag), value)
-}
-
-// IPv4TagName is a common tag name to be set to an ipv4 value
-type IPv4TagName string
-
-// Set adds IP v4 host address of the peer as an uint32 value to the `span`, keep this for backward and zipkin compatibility
-func (tag IPv4TagName) Set(span opentracing.Span, value uint32) {
- span.SetTag(string(tag), value)
-}
-
-// SetString records IP v4 host address of the peer as a .-separated tuple to the `span`. E.g., "127.0.0.1"
-func (tag IPv4TagName) SetString(span opentracing.Span, value string) {
- span.SetTag(string(tag), value)
-}
diff --git a/vendor/github.com/opentracing/opentracing-go/globaltracer.go b/vendor/github.com/opentracing/opentracing-go/globaltracer.go
deleted file mode 100644
index 4f7066a92..000000000
--- a/vendor/github.com/opentracing/opentracing-go/globaltracer.go
+++ /dev/null
@@ -1,42 +0,0 @@
-package opentracing
-
-type registeredTracer struct {
- tracer Tracer
- isRegistered bool
-}
-
-var (
- globalTracer = registeredTracer{NoopTracer{}, false}
-)
-
-// SetGlobalTracer sets the [singleton] opentracing.Tracer returned by
-// GlobalTracer(). Those who use GlobalTracer (rather than directly manage an
-// opentracing.Tracer instance) should call SetGlobalTracer as early as
-// possible in main(), prior to calling the `StartSpan` global func below.
-// Prior to calling `SetGlobalTracer`, any Spans started via the `StartSpan`
-// (etc) globals are noops.
-func SetGlobalTracer(tracer Tracer) {
- globalTracer = registeredTracer{tracer, true}
-}
-
-// GlobalTracer returns the global singleton `Tracer` implementation.
-// Before `SetGlobalTracer()` is called, the `GlobalTracer()` is a noop
-// implementation that drops all data handed to it.
-func GlobalTracer() Tracer {
- return globalTracer.tracer
-}
-
-// StartSpan defers to `Tracer.StartSpan`. See `GlobalTracer()`.
-func StartSpan(operationName string, opts ...StartSpanOption) Span {
- return globalTracer.tracer.StartSpan(operationName, opts...)
-}
-
-// InitGlobalTracer is deprecated. Please use SetGlobalTracer.
-func InitGlobalTracer(tracer Tracer) {
- SetGlobalTracer(tracer)
-}
-
-// IsGlobalTracerRegistered returns a `bool` to indicate if a tracer has been globally registered
-func IsGlobalTracerRegistered() bool {
- return globalTracer.isRegistered
-}
diff --git a/vendor/github.com/opentracing/opentracing-go/go.mod b/vendor/github.com/opentracing/opentracing-go/go.mod
deleted file mode 100644
index bf48bb5d7..000000000
--- a/vendor/github.com/opentracing/opentracing-go/go.mod
+++ /dev/null
@@ -1,5 +0,0 @@
-module github.com/opentracing/opentracing-go
-
-go 1.14
-
-require github.com/stretchr/testify v1.3.0
diff --git a/vendor/github.com/opentracing/opentracing-go/go.sum b/vendor/github.com/opentracing/opentracing-go/go.sum
deleted file mode 100644
index 4347755af..000000000
--- a/vendor/github.com/opentracing/opentracing-go/go.sum
+++ /dev/null
@@ -1,7 +0,0 @@
-github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
-github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
-github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
-github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
diff --git a/vendor/github.com/opentracing/opentracing-go/gocontext.go b/vendor/github.com/opentracing/opentracing-go/gocontext.go
deleted file mode 100644
index 1831bc9b2..000000000
--- a/vendor/github.com/opentracing/opentracing-go/gocontext.go
+++ /dev/null
@@ -1,65 +0,0 @@
-package opentracing
-
-import "context"
-
-type contextKey struct{}
-
-var activeSpanKey = contextKey{}
-
-// ContextWithSpan returns a new `context.Context` that holds a reference to
-// the span. If span is nil, a new context without an active span is returned.
-func ContextWithSpan(ctx context.Context, span Span) context.Context {
- if span != nil {
- if tracerWithHook, ok := span.Tracer().(TracerContextWithSpanExtension); ok {
- ctx = tracerWithHook.ContextWithSpanHook(ctx, span)
- }
- }
- return context.WithValue(ctx, activeSpanKey, span)
-}
-
-// SpanFromContext returns the `Span` previously associated with `ctx`, or
-// `nil` if no such `Span` could be found.
-//
-// NOTE: context.Context != SpanContext: the former is Go's intra-process
-// context propagation mechanism, and the latter houses OpenTracing's per-Span
-// identity and baggage information.
-func SpanFromContext(ctx context.Context) Span {
- val := ctx.Value(activeSpanKey)
- if sp, ok := val.(Span); ok {
- return sp
- }
- return nil
-}
-
-// StartSpanFromContext starts and returns a Span with `operationName`, using
-// any Span found within `ctx` as a ChildOfRef. If no such parent could be
-// found, StartSpanFromContext creates a root (parentless) Span.
-//
-// The second return value is a context.Context object built around the
-// returned Span.
-//
-// Example usage:
-//
-// SomeFunction(ctx context.Context, ...) {
-// sp, ctx := opentracing.StartSpanFromContext(ctx, "SomeFunction")
-// defer sp.Finish()
-// ...
-// }
-func StartSpanFromContext(ctx context.Context, operationName string, opts ...StartSpanOption) (Span, context.Context) {
- return StartSpanFromContextWithTracer(ctx, GlobalTracer(), operationName, opts...)
-}
-
-// StartSpanFromContextWithTracer starts and returns a span with `operationName`
-// using a span found within the context as a ChildOfRef. If that doesn't exist
-// it creates a root span. It also returns a context.Context object built
-// around the returned span.
-//
-// It's behavior is identical to StartSpanFromContext except that it takes an explicit
-// tracer as opposed to using the global tracer.
-func StartSpanFromContextWithTracer(ctx context.Context, tracer Tracer, operationName string, opts ...StartSpanOption) (Span, context.Context) {
- if parentSpan := SpanFromContext(ctx); parentSpan != nil {
- opts = append(opts, ChildOf(parentSpan.Context()))
- }
- span := tracer.StartSpan(operationName, opts...)
- return span, ContextWithSpan(ctx, span)
-}
diff --git a/vendor/github.com/opentracing/opentracing-go/log/field.go b/vendor/github.com/opentracing/opentracing-go/log/field.go
deleted file mode 100644
index f222ded79..000000000
--- a/vendor/github.com/opentracing/opentracing-go/log/field.go
+++ /dev/null
@@ -1,282 +0,0 @@
-package log
-
-import (
- "fmt"
- "math"
-)
-
-type fieldType int
-
-const (
- stringType fieldType = iota
- boolType
- intType
- int32Type
- uint32Type
- int64Type
- uint64Type
- float32Type
- float64Type
- errorType
- objectType
- lazyLoggerType
- noopType
-)
-
-// Field instances are constructed via LogBool, LogString, and so on.
-// Tracing implementations may then handle them via the Field.Marshal
-// method.
-//
-// "heavily influenced by" (i.e., partially stolen from)
-// https://github.com/uber-go/zap
-type Field struct {
- key string
- fieldType fieldType
- numericVal int64
- stringVal string
- interfaceVal interface{}
-}
-
-// String adds a string-valued key:value pair to a Span.LogFields() record
-func String(key, val string) Field {
- return Field{
- key: key,
- fieldType: stringType,
- stringVal: val,
- }
-}
-
-// Bool adds a bool-valued key:value pair to a Span.LogFields() record
-func Bool(key string, val bool) Field {
- var numericVal int64
- if val {
- numericVal = 1
- }
- return Field{
- key: key,
- fieldType: boolType,
- numericVal: numericVal,
- }
-}
-
-// Int adds an int-valued key:value pair to a Span.LogFields() record
-func Int(key string, val int) Field {
- return Field{
- key: key,
- fieldType: intType,
- numericVal: int64(val),
- }
-}
-
-// Int32 adds an int32-valued key:value pair to a Span.LogFields() record
-func Int32(key string, val int32) Field {
- return Field{
- key: key,
- fieldType: int32Type,
- numericVal: int64(val),
- }
-}
-
-// Int64 adds an int64-valued key:value pair to a Span.LogFields() record
-func Int64(key string, val int64) Field {
- return Field{
- key: key,
- fieldType: int64Type,
- numericVal: val,
- }
-}
-
-// Uint32 adds a uint32-valued key:value pair to a Span.LogFields() record
-func Uint32(key string, val uint32) Field {
- return Field{
- key: key,
- fieldType: uint32Type,
- numericVal: int64(val),
- }
-}
-
-// Uint64 adds a uint64-valued key:value pair to a Span.LogFields() record
-func Uint64(key string, val uint64) Field {
- return Field{
- key: key,
- fieldType: uint64Type,
- numericVal: int64(val),
- }
-}
-
-// Float32 adds a float32-valued key:value pair to a Span.LogFields() record
-func Float32(key string, val float32) Field {
- return Field{
- key: key,
- fieldType: float32Type,
- numericVal: int64(math.Float32bits(val)),
- }
-}
-
-// Float64 adds a float64-valued key:value pair to a Span.LogFields() record
-func Float64(key string, val float64) Field {
- return Field{
- key: key,
- fieldType: float64Type,
- numericVal: int64(math.Float64bits(val)),
- }
-}
-
-// Error adds an error with the key "error.object" to a Span.LogFields() record
-func Error(err error) Field {
- return Field{
- key: "error.object",
- fieldType: errorType,
- interfaceVal: err,
- }
-}
-
-// Object adds an object-valued key:value pair to a Span.LogFields() record
-// Please pass in an immutable object, otherwise there may be concurrency issues.
-// Such as passing in the map, log.Object may result in "fatal error: concurrent map iteration and map write".
-// Because span is sent asynchronously, it is possible that this map will also be modified.
-func Object(key string, obj interface{}) Field {
- return Field{
- key: key,
- fieldType: objectType,
- interfaceVal: obj,
- }
-}
-
-// Event creates a string-valued Field for span logs with key="event" and value=val.
-func Event(val string) Field {
- return String("event", val)
-}
-
-// Message creates a string-valued Field for span logs with key="message" and value=val.
-func Message(val string) Field {
- return String("message", val)
-}
-
-// LazyLogger allows for user-defined, late-bound logging of arbitrary data
-type LazyLogger func(fv Encoder)
-
-// Lazy adds a LazyLogger to a Span.LogFields() record; the tracing
-// implementation will call the LazyLogger function at an indefinite time in
-// the future (after Lazy() returns).
-func Lazy(ll LazyLogger) Field {
- return Field{
- fieldType: lazyLoggerType,
- interfaceVal: ll,
- }
-}
-
-// Noop creates a no-op log field that should be ignored by the tracer.
-// It can be used to capture optional fields, for example those that should
-// only be logged in non-production environment:
-//
-// func customerField(order *Order) log.Field {
-// if os.Getenv("ENVIRONMENT") == "dev" {
-// return log.String("customer", order.Customer.ID)
-// }
-// return log.Noop()
-// }
-//
-// span.LogFields(log.String("event", "purchase"), customerField(order))
-//
-func Noop() Field {
- return Field{
- fieldType: noopType,
- }
-}
-
-// Encoder allows access to the contents of a Field (via a call to
-// Field.Marshal).
-//
-// Tracer implementations typically provide an implementation of Encoder;
-// OpenTracing callers typically do not need to concern themselves with it.
-type Encoder interface {
- EmitString(key, value string)
- EmitBool(key string, value bool)
- EmitInt(key string, value int)
- EmitInt32(key string, value int32)
- EmitInt64(key string, value int64)
- EmitUint32(key string, value uint32)
- EmitUint64(key string, value uint64)
- EmitFloat32(key string, value float32)
- EmitFloat64(key string, value float64)
- EmitObject(key string, value interface{})
- EmitLazyLogger(value LazyLogger)
-}
-
-// Marshal passes a Field instance through to the appropriate
-// field-type-specific method of an Encoder.
-func (lf Field) Marshal(visitor Encoder) {
- switch lf.fieldType {
- case stringType:
- visitor.EmitString(lf.key, lf.stringVal)
- case boolType:
- visitor.EmitBool(lf.key, lf.numericVal != 0)
- case intType:
- visitor.EmitInt(lf.key, int(lf.numericVal))
- case int32Type:
- visitor.EmitInt32(lf.key, int32(lf.numericVal))
- case int64Type:
- visitor.EmitInt64(lf.key, int64(lf.numericVal))
- case uint32Type:
- visitor.EmitUint32(lf.key, uint32(lf.numericVal))
- case uint64Type:
- visitor.EmitUint64(lf.key, uint64(lf.numericVal))
- case float32Type:
- visitor.EmitFloat32(lf.key, math.Float32frombits(uint32(lf.numericVal)))
- case float64Type:
- visitor.EmitFloat64(lf.key, math.Float64frombits(uint64(lf.numericVal)))
- case errorType:
- if err, ok := lf.interfaceVal.(error); ok {
- visitor.EmitString(lf.key, err.Error())
- } else {
- visitor.EmitString(lf.key, "<nil>")
- }
- case objectType:
- visitor.EmitObject(lf.key, lf.interfaceVal)
- case lazyLoggerType:
- visitor.EmitLazyLogger(lf.interfaceVal.(LazyLogger))
- case noopType:
- // intentionally left blank
- }
-}
-
-// Key returns the field's key.
-func (lf Field) Key() string {
- return lf.key
-}
-
-// Value returns the field's value as interface{}.
-func (lf Field) Value() interface{} {
- switch lf.fieldType {
- case stringType:
- return lf.stringVal
- case boolType:
- return lf.numericVal != 0
- case intType:
- return int(lf.numericVal)
- case int32Type:
- return int32(lf.numericVal)
- case int64Type:
- return int64(lf.numericVal)
- case uint32Type:
- return uint32(lf.numericVal)
- case uint64Type:
- return uint64(lf.numericVal)
- case float32Type:
- return math.Float32frombits(uint32(lf.numericVal))
- case float64Type:
- return math.Float64frombits(uint64(lf.numericVal))
- case errorType, objectType, lazyLoggerType:
- return lf.interfaceVal
- case noopType:
- return nil
- default:
- return nil
- }
-}
-
-// String returns a string representation of the key and value.
-func (lf Field) String() string {
- return fmt.Sprint(lf.key, ":", lf.Value())
-}
diff --git a/vendor/github.com/opentracing/opentracing-go/log/util.go b/vendor/github.com/opentracing/opentracing-go/log/util.go
deleted file mode 100644
index d57e28aa5..000000000
--- a/vendor/github.com/opentracing/opentracing-go/log/util.go
+++ /dev/null
@@ -1,61 +0,0 @@
-package log
-
-import (
- "fmt"
- "reflect"
-)
-
-// InterleavedKVToFields converts keyValues a la Span.LogKV() to a Field slice
-// a la Span.LogFields().
-func InterleavedKVToFields(keyValues ...interface{}) ([]Field, error) {
- if len(keyValues)%2 != 0 {
- return nil, fmt.Errorf("non-even keyValues len: %d", len(keyValues))
- }
- fields := make([]Field, len(keyValues)/2)
- for i := 0; i*2 < len(keyValues); i++ {
- key, ok := keyValues[i*2].(string)
- if !ok {
- return nil, fmt.Errorf(
- "non-string key (pair #%d): %T",
- i, keyValues[i*2])
- }
- switch typedVal := keyValues[i*2+1].(type) {
- case bool:
- fields[i] = Bool(key, typedVal)
- case string:
- fields[i] = String(key, typedVal)
- case int:
- fields[i] = Int(key, typedVal)
- case int8:
- fields[i] = Int32(key, int32(typedVal))
- case int16:
- fields[i] = Int32(key, int32(typedVal))
- case int32:
- fields[i] = Int32(key, typedVal)
- case int64:
- fields[i] = Int64(key, typedVal)
- case uint:
- fields[i] = Uint64(key, uint64(typedVal))
- case uint64:
- fields[i] = Uint64(key, typedVal)
- case uint8:
- fields[i] = Uint32(key, uint32(typedVal))
- case uint16:
- fields[i] = Uint32(key, uint32(typedVal))
- case uint32:
- fields[i] = Uint32(key, typedVal)
- case float32:
- fields[i] = Float32(key, typedVal)
- case float64:
- fields[i] = Float64(key, typedVal)
- default:
- if typedVal == nil || (reflect.ValueOf(typedVal).Kind() == reflect.Ptr && reflect.ValueOf(typedVal).IsNil()) {
- fields[i] = String(key, "nil")
- continue
- }
- // When in doubt, coerce to a string
- fields[i] = String(key, fmt.Sprint(typedVal))
- }
- }
- return fields, nil
-}
diff --git a/vendor/github.com/opentracing/opentracing-go/noop.go b/vendor/github.com/opentracing/opentracing-go/noop.go
deleted file mode 100644
index f9b680a21..000000000
--- a/vendor/github.com/opentracing/opentracing-go/noop.go
+++ /dev/null
@@ -1,64 +0,0 @@
-package opentracing
-
-import "github.com/opentracing/opentracing-go/log"
-
-// A NoopTracer is a trivial, minimum overhead implementation of Tracer
-// for which all operations are no-ops.
-//
-// The primary use of this implementation is in libraries, such as RPC
-// frameworks, that make tracing an optional feature controlled by the
-// end user. A no-op implementation allows said libraries to use it
-// as the default Tracer and to write instrumentation that does
-// not need to keep checking if the tracer instance is nil.
-//
-// For the same reason, the NoopTracer is the default "global" tracer
-// (see GlobalTracer and SetGlobalTracer functions).
-//
-// WARNING: NoopTracer does not support baggage propagation.
-type NoopTracer struct{}
-
-type noopSpan struct{}
-type noopSpanContext struct{}
-
-var (
- defaultNoopSpanContext SpanContext = noopSpanContext{}
- defaultNoopSpan Span = noopSpan{}
- defaultNoopTracer Tracer = NoopTracer{}
-)
-
-const (
- emptyString = ""
-)
-
-// noopSpanContext:
-func (n noopSpanContext) ForeachBaggageItem(handler func(k, v string) bool) {}
-
-// noopSpan:
-func (n noopSpan) Context() SpanContext { return defaultNoopSpanContext }
-func (n noopSpan) SetBaggageItem(key, val string) Span { return n }
-func (n noopSpan) BaggageItem(key string) string { return emptyString }
-func (n noopSpan) SetTag(key string, value interface{}) Span { return n }
-func (n noopSpan) LogFields(fields ...log.Field) {}
-func (n noopSpan) LogKV(keyVals ...interface{}) {}
-func (n noopSpan) Finish() {}
-func (n noopSpan) FinishWithOptions(opts FinishOptions) {}
-func (n noopSpan) SetOperationName(operationName string) Span { return n }
-func (n noopSpan) Tracer() Tracer { return defaultNoopTracer }
-func (n noopSpan) LogEvent(event string) {}
-func (n noopSpan) LogEventWithPayload(event string, payload interface{}) {}
-func (n noopSpan) Log(data LogData) {}
-
-// StartSpan belongs to the Tracer interface.
-func (n NoopTracer) StartSpan(operationName string, opts ...StartSpanOption) Span {
- return defaultNoopSpan
-}
-
-// Inject belongs to the Tracer interface.
-func (n NoopTracer) Inject(sp SpanContext, format interface{}, carrier interface{}) error {
- return nil
-}
-
-// Extract belongs to the Tracer interface.
-func (n NoopTracer) Extract(format interface{}, carrier interface{}) (SpanContext, error) {
- return nil, ErrSpanContextNotFound
-}
diff --git a/vendor/github.com/opentracing/opentracing-go/propagation.go b/vendor/github.com/opentracing/opentracing-go/propagation.go
deleted file mode 100644
index b0c275eb0..000000000
--- a/vendor/github.com/opentracing/opentracing-go/propagation.go
+++ /dev/null
@@ -1,176 +0,0 @@
-package opentracing
-
-import (
- "errors"
- "net/http"
-)
-
-///////////////////////////////////////////////////////////////////////////////
-// CORE PROPAGATION INTERFACES:
-///////////////////////////////////////////////////////////////////////////////
-
-var (
- // ErrUnsupportedFormat occurs when the `format` passed to Tracer.Inject() or
- // Tracer.Extract() is not recognized by the Tracer implementation.
- ErrUnsupportedFormat = errors.New("opentracing: Unknown or unsupported Inject/Extract format")
-
- // ErrSpanContextNotFound occurs when the `carrier` passed to
- // Tracer.Extract() is valid and uncorrupted but has insufficient
- // information to extract a SpanContext.
- ErrSpanContextNotFound = errors.New("opentracing: SpanContext not found in Extract carrier")
-
- // ErrInvalidSpanContext errors occur when Tracer.Inject() is asked to
- // operate on a SpanContext which it is not prepared to handle (for
- // example, since it was created by a different tracer implementation).
- ErrInvalidSpanContext = errors.New("opentracing: SpanContext type incompatible with tracer")
-
- // ErrInvalidCarrier errors occur when Tracer.Inject() or Tracer.Extract()
- // implementations expect a different type of `carrier` than they are
- // given.
- ErrInvalidCarrier = errors.New("opentracing: Invalid Inject/Extract carrier")
-
- // ErrSpanContextCorrupted occurs when the `carrier` passed to
- // Tracer.Extract() is of the expected type but is corrupted.
- ErrSpanContextCorrupted = errors.New("opentracing: SpanContext data corrupted in Extract carrier")
-)
-
-///////////////////////////////////////////////////////////////////////////////
-// BUILTIN PROPAGATION FORMATS:
-///////////////////////////////////////////////////////////////////////////////
-
-// BuiltinFormat is used to demarcate the values within package `opentracing`
-// that are intended for use with the Tracer.Inject() and Tracer.Extract()
-// methods.
-type BuiltinFormat byte
-
-const (
- // Binary represents SpanContexts as opaque binary data.
- //
- // For Tracer.Inject(): the carrier must be an `io.Writer`.
- //
- // For Tracer.Extract(): the carrier must be an `io.Reader`.
- Binary BuiltinFormat = iota
-
- // TextMap represents SpanContexts as key:value string pairs.
- //
- // Unlike HTTPHeaders, the TextMap format does not restrict the key or
- // value character sets in any way.
- //
- // For Tracer.Inject(): the carrier must be a `TextMapWriter`.
- //
- // For Tracer.Extract(): the carrier must be a `TextMapReader`.
- TextMap
-
- // HTTPHeaders represents SpanContexts as HTTP header string pairs.
- //
- // Unlike TextMap, the HTTPHeaders format requires that the keys and values
- // be valid as HTTP headers as-is (i.e., character casing may be unstable
- // and special characters are disallowed in keys, values should be
- // URL-escaped, etc).
- //
- // For Tracer.Inject(): the carrier must be a `TextMapWriter`.
- //
- // For Tracer.Extract(): the carrier must be a `TextMapReader`.
- //
- // See HTTPHeadersCarrier for an implementation of both TextMapWriter
- // and TextMapReader that defers to an http.Header instance for storage.
- // For example, Inject():
- //
- // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
- // err := span.Tracer().Inject(
- // span.Context(), opentracing.HTTPHeaders, carrier)
- //
- // Or Extract():
- //
- // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
- // clientContext, err := tracer.Extract(
- // opentracing.HTTPHeaders, carrier)
- //
- HTTPHeaders
-)
-
-// TextMapWriter is the Inject() carrier for the TextMap builtin format. With
-// it, the caller can encode a SpanContext for propagation as entries in a map
-// of unicode strings.
-type TextMapWriter interface {
- // Set a key:value pair to the carrier. Multiple calls to Set() for the
- // same key leads to undefined behavior.
- //
- // NOTE: The backing store for the TextMapWriter may contain data unrelated
- // to SpanContext. As such, Inject() and Extract() implementations that
- // call the TextMapWriter and TextMapReader interfaces must agree on a
- // prefix or other convention to distinguish their own key:value pairs.
- Set(key, val string)
-}
-
-// TextMapReader is the Extract() carrier for the TextMap builtin format. With it,
-// the caller can decode a propagated SpanContext as entries in a map of
-// unicode strings.
-type TextMapReader interface {
- // ForeachKey returns TextMap contents via repeated calls to the `handler`
- // function. If any call to `handler` returns a non-nil error, ForeachKey
- // terminates and returns that error.
- //
- // NOTE: The backing store for the TextMapReader may contain data unrelated
- // to SpanContext. As such, Inject() and Extract() implementations that
- // call the TextMapWriter and TextMapReader interfaces must agree on a
- // prefix or other convention to distinguish their own key:value pairs.
- //
- // The "foreach" callback pattern reduces unnecessary copying in some cases
- // and also allows implementations to hold locks while the map is read.
- ForeachKey(handler func(key, val string) error) error
-}
-
-// TextMapCarrier allows the use of regular map[string]string
-// as both TextMapWriter and TextMapReader.
-type TextMapCarrier map[string]string
-
-// ForeachKey conforms to the TextMapReader interface.
-func (c TextMapCarrier) ForeachKey(handler func(key, val string) error) error {
- for k, v := range c {
- if err := handler(k, v); err != nil {
- return err
- }
- }
- return nil
-}
-
-// Set implements Set() of opentracing.TextMapWriter
-func (c TextMapCarrier) Set(key, val string) {
- c[key] = val
-}
-
-// HTTPHeadersCarrier satisfies both TextMapWriter and TextMapReader.
-//
-// Example usage for server side:
-//
-// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
-// clientContext, err := tracer.Extract(opentracing.HTTPHeaders, carrier)
-//
-// Example usage for client side:
-//
-// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
-// err := tracer.Inject(
-// span.Context(),
-// opentracing.HTTPHeaders,
-// carrier)
-//
-type HTTPHeadersCarrier http.Header
-
-// Set conforms to the TextMapWriter interface.
-func (c HTTPHeadersCarrier) Set(key, val string) {
- h := http.Header(c)
- h.Set(key, val)
-}
-
-// ForeachKey conforms to the TextMapReader interface.
-func (c HTTPHeadersCarrier) ForeachKey(handler func(key, val string) error) error {
- for k, vals := range c {
- for _, v := range vals {
- if err := handler(k, v); err != nil {
- return err
- }
- }
- }
- return nil
-}
diff --git a/vendor/github.com/opentracing/opentracing-go/span.go b/vendor/github.com/opentracing/opentracing-go/span.go
deleted file mode 100644
index 0d3fb5341..000000000
--- a/vendor/github.com/opentracing/opentracing-go/span.go
+++ /dev/null
@@ -1,189 +0,0 @@
-package opentracing
-
-import (
- "time"
-
- "github.com/opentracing/opentracing-go/log"
-)
-
-// SpanContext represents Span state that must propagate to descendant Spans and across process
-// boundaries (e.g., a <trace_id, span_id, sampled> tuple).
-type SpanContext interface {
- // ForeachBaggageItem grants access to all baggage items stored in the
- // SpanContext.
- // The handler function will be called for each baggage key/value pair.
- // The ordering of items is not guaranteed.
- //
- // The bool return value indicates if the handler wants to continue iterating
- // through the rest of the baggage items; for example if the handler is trying to
- // find some baggage item by pattern matching the name, it can return false
- // as soon as the item is found to stop further iterations.
- ForeachBaggageItem(handler func(k, v string) bool)
-}
-
-// Span represents an active, un-finished span in the OpenTracing system.
-//
-// Spans are created by the Tracer interface.
-type Span interface {
- // Sets the end timestamp and finalizes Span state.
- //
- // With the exception of calls to Context() (which are always allowed),
- // Finish() must be the last call made to any span instance, and to do
- // otherwise leads to undefined behavior.
- Finish()
- // FinishWithOptions is like Finish() but with explicit control over
- // timestamps and log data.
- FinishWithOptions(opts FinishOptions)
-
- // Context() yields the SpanContext for this Span. Note that the return
- // value of Context() is still valid after a call to Span.Finish(), as is
- // a call to Span.Context() after a call to Span.Finish().
- Context() SpanContext
-
- // Sets or changes the operation name.
- //
- // Returns a reference to this Span for chaining.
- SetOperationName(operationName string) Span
-
- // Adds a tag to the span.
- //
- // If there is a pre-existing tag set for `key`, it is overwritten.
- //
- // Tag values can be numeric types, strings, or bools. The behavior of
- // other tag value types is undefined at the OpenTracing level. If a
- // tracing system does not know how to handle a particular value type, it
- // may ignore the tag, but shall not panic.
- //
- // Returns a reference to this Span for chaining.
- SetTag(key string, value interface{}) Span
-
- // LogFields is an efficient and type-checked way to record key:value
- // logging data about a Span, though the programming interface is a little
- // more verbose than LogKV(). Here's an example:
- //
- // span.LogFields(
- // log.String("event", "soft error"),
- // log.String("type", "cache timeout"),
- // log.Int("waited.millis", 1500))
- //
- // Also see Span.FinishWithOptions() and FinishOptions.BulkLogData.
- LogFields(fields ...log.Field)
-
- // LogKV is a concise, readable way to record key:value logging data about
- // a Span, though unfortunately this also makes it less efficient and less
- // type-safe than LogFields(). Here's an example:
- //
- // span.LogKV(
- // "event", "soft error",
- // "type", "cache timeout",
- // "waited.millis", 1500)
- //
- // For LogKV (as opposed to LogFields()), the parameters must appear as
- // key-value pairs, like
- //
- // span.LogKV(key1, val1, key2, val2, key3, val3, ...)
- //
- // The keys must all be strings. The values may be strings, numeric types,
- // bools, Go error instances, or arbitrary structs.
- //
- // (Note to implementors: consider the log.InterleavedKVToFields() helper)
- LogKV(alternatingKeyValues ...interface{})
-
- // SetBaggageItem sets a key:value pair on this Span and its SpanContext
- // that also propagates to descendants of this Span.
- //
- // SetBaggageItem() enables powerful functionality given a full-stack
- // opentracing integration (e.g., arbitrary application data from a mobile
- // app can make it, transparently, all the way into the depths of a storage
- // system), and with it some powerful costs: use this feature with care.
- //
- // IMPORTANT NOTE #1: SetBaggageItem() will only propagate baggage items to
- // *future* causal descendants of the associated Span.
- //
- // IMPORTANT NOTE #2: Use this thoughtfully and with care. Every key and
- // value is copied into every local *and remote* child of the associated
- // Span, and that can add up to a lot of network and cpu overhead.
- //
- // Returns a reference to this Span for chaining.
- SetBaggageItem(restrictedKey, value string) Span
-
- // Gets the value for a baggage item given its key. Returns the empty string
- // if the value isn't found in this Span.
- BaggageItem(restrictedKey string) string
-
- // Provides access to the Tracer that created this Span.
- Tracer() Tracer
-
- // Deprecated: use LogFields or LogKV
- LogEvent(event string)
- // Deprecated: use LogFields or LogKV
- LogEventWithPayload(event string, payload interface{})
- // Deprecated: use LogFields or LogKV
- Log(data LogData)
-}
-
-// LogRecord is data associated with a single Span log. Every LogRecord
-// instance must specify at least one Field.
-type LogRecord struct {
- Timestamp time.Time
- Fields []log.Field
-}
-
-// FinishOptions allows Span.FinishWithOptions callers to override the finish
-// timestamp and provide log data via a bulk interface.
-type FinishOptions struct {
- // FinishTime overrides the Span's finish time, or implicitly becomes
- // time.Now() if FinishTime.IsZero().
- //
- // FinishTime must resolve to a timestamp that's >= the Span's StartTime
- // (per StartSpanOptions).
- FinishTime time.Time
-
- // LogRecords allows the caller to specify the contents of many LogFields()
- // calls with a single slice. May be nil.
- //
- // None of the LogRecord.Timestamp values may be .IsZero() (i.e., they must
- // be set explicitly). Also, they must be >= the Span's start timestamp and
- // <= the FinishTime (or time.Now() if FinishTime.IsZero()). Otherwise the
- // behavior of FinishWithOptions() is undefined.
- //
- // If specified, the caller hands off ownership of LogRecords at
- // FinishWithOptions() invocation time.
- //
- // If specified, the (deprecated) BulkLogData must be nil or empty.
- LogRecords []LogRecord
-
- // BulkLogData is DEPRECATED.
- BulkLogData []LogData
-}
-
-// LogData is DEPRECATED
-type LogData struct {
- Timestamp time.Time
- Event string
- Payload interface{}
-}
-
-// ToLogRecord converts a deprecated LogData to a non-deprecated LogRecord
-func (ld *LogData) ToLogRecord() LogRecord {
- var literalTimestamp time.Time
- if ld.Timestamp.IsZero() {
- literalTimestamp = time.Now()
- } else {
- literalTimestamp = ld.Timestamp
- }
- rval := LogRecord{
- Timestamp: literalTimestamp,
- }
- if ld.Payload == nil {
- rval.Fields = []log.Field{
- log.String("event", ld.Event),
- }
- } else {
- rval.Fields = []log.Field{
- log.String("event", ld.Event),
- log.Object("payload", ld.Payload),
- }
- }
- return rval
-}
diff --git a/vendor/github.com/opentracing/opentracing-go/tracer.go b/vendor/github.com/opentracing/opentracing-go/tracer.go
deleted file mode 100644
index 715f0cedf..000000000
--- a/vendor/github.com/opentracing/opentracing-go/tracer.go
+++ /dev/null
@@ -1,304 +0,0 @@
-package opentracing
-
-import "time"
-
-// Tracer is a simple, thin interface for Span creation and SpanContext
-// propagation.
-type Tracer interface {
-
- // Create, start, and return a new Span with the given `operationName` and
- // incorporate the given StartSpanOption `opts`. (Note that `opts` borrows
- // from the "functional options" pattern, per
- // http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis)
- //
- // A Span with no SpanReference options (e.g., opentracing.ChildOf() or
- // opentracing.FollowsFrom()) becomes the root of its own trace.
- //
- // Examples:
- //
- // var tracer opentracing.Tracer = ...
- //
- // // The root-span case:
- // sp := tracer.StartSpan("GetFeed")
- //
- // // The vanilla child span case:
- // sp := tracer.StartSpan(
- // "GetFeed",
- // opentracing.ChildOf(parentSpan.Context()))
- //
- // // All the bells and whistles:
- // sp := tracer.StartSpan(
- // "GetFeed",
- // opentracing.ChildOf(parentSpan.Context()),
- // opentracing.Tag{"user_agent", loggedReq.UserAgent},
- // opentracing.StartTime(loggedReq.Timestamp),
- // )
- //
- StartSpan(operationName string, opts ...StartSpanOption) Span
-
- // Inject() takes the `sm` SpanContext instance and injects it for
- // propagation within `carrier`. The actual type of `carrier` depends on
- // the value of `format`.
- //
- // OpenTracing defines a common set of `format` values (see BuiltinFormat),
- // and each has an expected carrier type.
- //
- // Other packages may declare their own `format` values, much like the keys
- // used by `context.Context` (see https://godoc.org/context#WithValue).
- //
- // Example usage (sans error handling):
- //
- // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
- // err := tracer.Inject(
- // span.Context(),
- // opentracing.HTTPHeaders,
- // carrier)
- //
- // NOTE: All opentracing.Tracer implementations MUST support all
- // BuiltinFormats.
- //
- // Implementations may return opentracing.ErrUnsupportedFormat if `format`
- // is not supported by (or not known by) the implementation.
- //
- // Implementations may return opentracing.ErrInvalidCarrier or any other
- // implementation-specific error if the format is supported but injection
- // fails anyway.
- //
- // See Tracer.Extract().
- Inject(sm SpanContext, format interface{}, carrier interface{}) error
-
- // Extract() returns a SpanContext instance given `format` and `carrier`.
- //
- // OpenTracing defines a common set of `format` values (see BuiltinFormat),
- // and each has an expected carrier type.
- //
- // Other packages may declare their own `format` values, much like the keys
- // used by `context.Context` (see
- // https://godoc.org/golang.org/x/net/context#WithValue).
- //
- // Example usage (with StartSpan):
- //
- //
- // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
- // clientContext, err := tracer.Extract(opentracing.HTTPHeaders, carrier)
- //
- // // ... assuming the ultimate goal here is to resume the trace with a
- // // server-side Span:
- // var serverSpan opentracing.Span
- // if err == nil {
- // span = tracer.StartSpan(
- // rpcMethodName, ext.RPCServerOption(clientContext))
- // } else {
- // span = tracer.StartSpan(rpcMethodName)
- // }
- //
- //
- // NOTE: All opentracing.Tracer implementations MUST support all
- // BuiltinFormats.
- //
- // Return values:
- // - A successful Extract returns a SpanContext instance and a nil error
- // - If there was simply no SpanContext to extract in `carrier`, Extract()
- // returns (nil, opentracing.ErrSpanContextNotFound)
- // - If `format` is unsupported or unrecognized, Extract() returns (nil,
- // opentracing.ErrUnsupportedFormat)
- // - If there are more fundamental problems with the `carrier` object,
- // Extract() may return opentracing.ErrInvalidCarrier,
- // opentracing.ErrSpanContextCorrupted, or implementation-specific
- // errors.
- //
- // See Tracer.Inject().
- Extract(format interface{}, carrier interface{}) (SpanContext, error)
-}
-
-// StartSpanOptions allows Tracer.StartSpan() callers and implementors a
-// mechanism to override the start timestamp, specify Span References, and make
-// a single Tag or multiple Tags available at Span start time.
-//
-// StartSpan() callers should look at the StartSpanOption interface and
-// implementations available in this package.
-//
-// Tracer implementations can convert a slice of `StartSpanOption` instances
-// into a `StartSpanOptions` struct like so:
-//
-// func StartSpan(opName string, opts ...opentracing.StartSpanOption) {
-// sso := opentracing.StartSpanOptions{}
-// for _, o := range opts {
-// o.Apply(&sso)
-// }
-// ...
-// }
-//
-type StartSpanOptions struct {
- // Zero or more causal references to other Spans (via their SpanContext).
- // If empty, start a "root" Span (i.e., start a new trace).
- References []SpanReference
-
- // StartTime overrides the Span's start time, or implicitly becomes
- // time.Now() if StartTime.IsZero().
- StartTime time.Time
-
- // Tags may have zero or more entries; the restrictions on map values are
- // identical to those for Span.SetTag(). May be nil.
- //
- // If specified, the caller hands off ownership of Tags at
- // StartSpan() invocation time.
- Tags map[string]interface{}
-}
-
-// StartSpanOption instances (zero or more) may be passed to Tracer.StartSpan.
-//
-// StartSpanOption borrows from the "functional options" pattern, per
-// http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis
-type StartSpanOption interface {
- Apply(*StartSpanOptions)
-}
-
-// SpanReferenceType is an enum type describing different categories of
-// relationships between two Spans. If Span-2 refers to Span-1, the
-// SpanReferenceType describes Span-1 from Span-2's perspective. For example,
-// ChildOfRef means that Span-1 created Span-2.
-//
-// NOTE: Span-1 and Span-2 do *not* necessarily depend on each other for
-// completion; e.g., Span-2 may be part of a background job enqueued by Span-1,
-// or Span-2 may be sitting in a distributed queue behind Span-1.
-type SpanReferenceType int
-
-const (
- // ChildOfRef refers to a parent Span that caused *and* somehow depends
- // upon the new child Span. Often (but not always), the parent Span cannot
- // finish until the child Span does.
- //
- // An timing diagram for a ChildOfRef that's blocked on the new Span:
- //
- // [-Parent Span---------]
- // [-Child Span----]
- //
- // See http://opentracing.io/spec/
- //
- // See opentracing.ChildOf()
- ChildOfRef SpanReferenceType = iota
-
- // FollowsFromRef refers to a parent Span that does not depend in any way
- // on the result of the new child Span. For instance, one might use
- // FollowsFromRefs to describe pipeline stages separated by queues,
- // or a fire-and-forget cache insert at the tail end of a web request.
- //
- // A FollowsFromRef Span is part of the same logical trace as the new Span:
- // i.e., the new Span is somehow caused by the work of its FollowsFromRef.
- //
- // All of the following could be valid timing diagrams for children that
- // "FollowFrom" a parent.
- //
- // [-Parent Span-] [-Child Span-]
- //
- //
- // [-Parent Span--]
- // [-Child Span-]
- //
- //
- // [-Parent Span-]
- // [-Child Span-]
- //
- // See http://opentracing.io/spec/
- //
- // See opentracing.FollowsFrom()
- FollowsFromRef
-)
-
-// SpanReference is a StartSpanOption that pairs a SpanReferenceType and a
-// referenced SpanContext. See the SpanReferenceType documentation for
-// supported relationships. If SpanReference is created with
-// ReferencedContext==nil, it has no effect. Thus it allows for a more concise
-// syntax for starting spans:
-//
-// sc, _ := tracer.Extract(someFormat, someCarrier)
-// span := tracer.StartSpan("operation", opentracing.ChildOf(sc))
-//
-// The `ChildOf(sc)` option above will not panic if sc == nil, it will just
-// not add the parent span reference to the options.
-type SpanReference struct {
- Type SpanReferenceType
- ReferencedContext SpanContext
-}
-
-// Apply satisfies the StartSpanOption interface.
-func (r SpanReference) Apply(o *StartSpanOptions) {
- if r.ReferencedContext != nil {
- o.References = append(o.References, r)
- }
-}
-
-// ChildOf returns a StartSpanOption pointing to a dependent parent span.
-// If sc == nil, the option has no effect.
-//
-// See ChildOfRef, SpanReference
-func ChildOf(sc SpanContext) SpanReference {
- return SpanReference{
- Type: ChildOfRef,
- ReferencedContext: sc,
- }
-}
-
-// FollowsFrom returns a StartSpanOption pointing to a parent Span that caused
-// the child Span but does not directly depend on its result in any way.
-// If sc == nil, the option has no effect.
-//
-// See FollowsFromRef, SpanReference
-func FollowsFrom(sc SpanContext) SpanReference {
- return SpanReference{
- Type: FollowsFromRef,
- ReferencedContext: sc,
- }
-}
-
-// StartTime is a StartSpanOption that sets an explicit start timestamp for the
-// new Span.
-type StartTime time.Time
-
-// Apply satisfies the StartSpanOption interface.
-func (t StartTime) Apply(o *StartSpanOptions) {
- o.StartTime = time.Time(t)
-}
-
-// Tags are a generic map from an arbitrary string key to an opaque value type.
-// The underlying tracing system is responsible for interpreting and
-// serializing the values.
-type Tags map[string]interface{}
-
-// Apply satisfies the StartSpanOption interface.
-func (t Tags) Apply(o *StartSpanOptions) {
- if o.Tags == nil {
- o.Tags = make(map[string]interface{})
- }
- for k, v := range t {
- o.Tags[k] = v
- }
-}
-
-// Tag may be passed as a StartSpanOption to add a tag to new spans,
-// or its Set method may be used to apply the tag to an existing Span,
-// for example:
-//
-// tracer.StartSpan("opName", Tag{"Key", value})
-//
-// or
-//
-// Tag{"key", value}.Set(span)
-type Tag struct {
- Key string
- Value interface{}
-}
-
-// Apply satisfies the StartSpanOption interface.
-func (t Tag) Apply(o *StartSpanOptions) {
- if o.Tags == nil {
- o.Tags = make(map[string]interface{})
- }
- o.Tags[t.Key] = t.Value
-}
-
-// Set applies the tag to an existing Span.
-func (t Tag) Set(s Span) {
- s.SetTag(t.Key, t.Value)
-}
diff --git a/vendor/github.com/rootless-containers/rootlesskit/pkg/api/api.go b/vendor/github.com/rootless-containers/rootlesskit/pkg/api/api.go
new file mode 100644
index 000000000..b6779bf70
--- /dev/null
+++ b/vendor/github.com/rootless-containers/rootlesskit/pkg/api/api.go
@@ -0,0 +1,36 @@
+package api
+
+import "net"
+
+const (
+ // Version of the REST API, not implementation version.
+ // See openapi.yaml for the definition.
+ Version = "1.1.0"
+)
+
+// ErrorJSON is returned with "application/json" content type and non-2XX status code
+type ErrorJSON struct {
+ Message string `json:"message"`
+}
+
+// Info is the structure returned by `GET /info`
+type Info struct {
+ APIVersion string `json:"apiVersion"` // REST API version
+ Version string `json:"version"` // Implementation version
+ StateDir string `json:"stateDir"`
+ ChildPID int `json:"childPID"`
+ NetworkDriver *NetworkDriverInfo `json:"networkDriver,omitempty"`
+ PortDriver *PortDriverInfo `json:"portDriver,omitempty"`
+}
+
+// NetworkDriverInfo in Info
+type NetworkDriverInfo struct {
+ Driver string `json:"driver"`
+ DNS []net.IP `json:"dns,omitempty"`
+}
+
+// PortDriverInfo in Info
+type PortDriverInfo struct {
+ Driver string `json:"driver"`
+ Protos []string `json:"protos"`
+}
diff --git a/vendor/github.com/rootless-containers/rootlesskit/pkg/api/openapi.yaml b/vendor/github.com/rootless-containers/rootlesskit/pkg/api/openapi.yaml
new file mode 100644
index 000000000..6a6550c33
--- /dev/null
+++ b/vendor/github.com/rootless-containers/rootlesskit/pkg/api/openapi.yaml
@@ -0,0 +1,161 @@
+# When you made a change to this YAML, please validate with https://editor.swagger.io
+openapi: 3.0.3
+info:
+ version: 1.1.0
+ title: RootlessKit API
+servers:
+ - url: 'http://rootlesskit/v1'
+ description: Local UNIX socket server. The host part of the URL is ignored.
+paths:
+# /info: API >= 1.1.0
+ /info:
+ get:
+ responses:
+ '200':
+ description: Info. Available since API 1.1.0.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Info'
+ /ports:
+ get:
+ responses:
+ '200':
+ description: An array of PortStatus
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/PortStatuses'
+ post:
+ requestBody:
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/PortSpec'
+ responses:
+ '201':
+ description: PortStatus with ID
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/PortStatus'
+ '/ports/{id}':
+ delete:
+ parameters:
+ - name: id
+ in: path
+ required: true
+ schema:
+ type: integer
+ format: int64
+ responses:
+ '200':
+ description: Null response
+components:
+ schemas:
+ Proto:
+ type: string
+ description: "protocol for listening. Corresponds to Go's net.Listen. The strings with \"4\" and \"6\" suffixes were introduced in API 1.1.0."
+ enum:
+ - tcp
+ - tcp4
+ - tcp6
+ - udp
+ - udp4
+ - udp6
+ - sctp
+ - sctp4
+ - sctp6
+ PortSpec:
+ required:
+ - proto
+ properties:
+ proto:
+ $ref: '#/components/schemas/Proto'
+ parentIP:
+ type: string
+ parentPort:
+ type: integer
+ format: int32
+ minimum: 1
+ maximum: 65535
+ childIP:
+ type: string
+# future version may support requests with parentPort<=0 for automatic port assignment
+ childPort:
+ type: integer
+ format: int32
+ minimum: 1
+ maximum: 65535
+ PortStatus:
+ required:
+ - id
+ properties:
+ id:
+ type: integer
+ format: int64
+ spec:
+ $ref: '#/components/schemas/PortSpec'
+ PortStatuses:
+ type: array
+ items:
+ $ref: '#/components/schemas/PortStatus'
+# Info: API >= 1.1.0
+ Info:
+ required:
+ - apiVersion
+ - version
+ - stateDir
+ - childPID
+ properties:
+ apiVersion:
+ type: string
+ description: "API version, without \"v\" prefix"
+ example: "1.1.0"
+ version:
+ type: string
+ description: "Implementation version, without \"v\" prefix"
+ example: "0.42.0-beta.1+dev"
+ stateDir:
+ type: string
+ description: "state dir"
+ example: "/run/user/1000/rootlesskit"
+ childPID:
+ type: integer
+ description: "child PID"
+ example: 10042
+ networkDriver:
+ $ref: '#/components/schemas/NetworkDriverInfo'
+ portDriver:
+ $ref: '#/components/schemas/PortDriverInfo'
+ NetworkDriverInfo:
+ required:
+ - driver
+ properties:
+ driver:
+ type: string
+ description: "network driver. Empty when --net=host."
+ example: "slirp4netns"
+# TODO: return TAP info
+ dns:
+ type: array
+ description: "DNS addresses"
+ items:
+ type: string
+ example: ["10.0.2.3"]
+ PortDriverInfo:
+ required:
+ - driver
+ - supportedProtos
+ properties:
+ driver:
+ type: string
+ description: "port driver"
+ example: "builtin"
+ protos:
+ type: array
+ description: "The supported protocol strings for listening ports"
+ example: ["tcp","udp"]
+ items:
+ $ref: '#/components/schemas/Proto'
diff --git a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/child/child.go b/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/child/child.go
index fc249c2d9..05dc0303c 100644
--- a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/child/child.go
+++ b/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/child/child.go
@@ -1,10 +1,11 @@
package child
import (
- "fmt"
"io"
"net"
"os"
+ "strconv"
+ "strings"
"github.com/pkg/errors"
"golang.org/x/sys/unix"
@@ -101,10 +102,16 @@ func (d *childDriver) handleConnectInit(c *net.UnixConn, req *msg.Request) error
func (d *childDriver) handleConnectRequest(c *net.UnixConn, req *msg.Request) error {
switch req.Proto {
case "tcp":
+ case "tcp4":
+ case "tcp6":
case "udp":
+ case "udp4":
+ case "udp6":
default:
return errors.Errorf("unknown proto: %q", req.Proto)
}
+ // dialProto does not need "4", "6" suffix
+ dialProto := strings.TrimSuffix(strings.TrimSuffix(req.Proto, "6"), "4")
var dialer net.Dialer
ip := req.IP
if ip == "" {
@@ -114,13 +121,9 @@ func (d *childDriver) handleConnectRequest(c *net.UnixConn, req *msg.Request) er
if p == nil {
return errors.Errorf("invalid IP: %q", ip)
}
- p = p.To4()
- if p == nil {
- return errors.Errorf("unsupported IP (v6?): %s", ip)
- }
ip = p.String()
}
- targetConn, err := dialer.Dial(req.Proto, fmt.Sprintf("%s:%d", ip, req.Port))
+ targetConn, err := dialer.Dial(dialProto, net.JoinHostPort(ip, strconv.Itoa(req.Port)))
if err != nil {
return err
}
diff --git a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/msg/msg.go b/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/msg/msg.go
index a8c8e0385..a60d99bd9 100644
--- a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/msg/msg.go
+++ b/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/msg/msg.go
@@ -19,7 +19,7 @@ const (
// Request and Response are encoded as JSON with uint32le length header.
type Request struct {
Type string // "init" or "connect"
- Proto string // "tcp" or "udp"
+ Proto string // "tcp", "tcp4", "tcp6", "udp", "udp4", "udp6"
IP string
Port int
}
diff --git a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/parent.go b/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/parent.go
index f6e5e56ed..e7ce641e1 100644
--- a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/parent.go
+++ b/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/parent.go
@@ -15,6 +15,7 @@ import (
"github.com/pkg/errors"
+ "github.com/rootless-containers/rootlesskit/pkg/api"
"github.com/rootless-containers/rootlesskit/pkg/port"
"github.com/rootless-containers/rootlesskit/pkg/port/builtin/msg"
"github.com/rootless-containers/rootlesskit/pkg/port/builtin/opaque"
@@ -56,6 +57,14 @@ type driver struct {
nextID int
}
+func (d *driver) Info(ctx context.Context) (*api.PortDriverInfo, error) {
+ info := &api.PortDriverInfo{
+ Driver: "builtin",
+ Protos: []string{"tcp", "tcp4", "tcp6", "udp", "udp4", "udp6"},
+ }
+ return info, nil
+}
+
func (d *driver) OpaqueForChild() map[string]string {
return map[string]string{
opaque.SocketPath: d.socketPath,
@@ -134,9 +143,9 @@ func (d *driver) AddPort(ctx context.Context, spec port.Spec) (*port.Status, err
return nil // FIXME
}
switch spec.Proto {
- case "tcp":
+ case "tcp", "tcp4", "tcp6":
err = tcp.Run(d.socketPath, spec, routineStopCh, d.logWriter)
- case "udp":
+ case "udp", "udp4", "udp6":
err = udp.Run(d.socketPath, spec, routineStopCh, d.logWriter)
default:
// NOTREACHED
diff --git a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/tcp/tcp.go b/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/tcp/tcp.go
index 9fb801162..7a7a167f1 100644
--- a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/tcp/tcp.go
+++ b/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/tcp/tcp.go
@@ -13,7 +13,7 @@ import (
)
func Run(socketPath string, spec port.Spec, stopCh <-chan struct{}, logWriter io.Writer) error {
- ln, err := net.Listen("tcp", net.JoinHostPort(spec.ParentIP, strconv.Itoa(spec.ParentPort)))
+ ln, err := net.Listen(spec.Proto, net.JoinHostPort(spec.ParentIP, strconv.Itoa(spec.ParentPort)))
if err != nil {
fmt.Fprintf(logWriter, "listen: %v\n", err)
return err
diff --git a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/udp/udp.go b/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/udp/udp.go
index fbff2b081..0080dd22c 100644
--- a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/udp/udp.go
+++ b/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/udp/udp.go
@@ -14,11 +14,11 @@ import (
)
func Run(socketPath string, spec port.Spec, stopCh <-chan struct{}, logWriter io.Writer) error {
- addr, err := net.ResolveUDPAddr("udp", net.JoinHostPort(spec.ParentIP, strconv.Itoa(spec.ParentPort)))
+ addr, err := net.ResolveUDPAddr(spec.Proto, net.JoinHostPort(spec.ParentIP, strconv.Itoa(spec.ParentPort)))
if err != nil {
return err
}
- c, err := net.ListenUDP("udp", addr)
+ c, err := net.ListenUDP(spec.Proto, addr)
if err != nil {
return err
}
diff --git a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/port.go b/vendor/github.com/rootless-containers/rootlesskit/pkg/port/port.go
index 41ec33487..c95bfc7c7 100644
--- a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/port.go
+++ b/vendor/github.com/rootless-containers/rootlesskit/pkg/port/port.go
@@ -3,17 +3,20 @@ package port
import (
"context"
"net"
+
+ "github.com/rootless-containers/rootlesskit/pkg/api"
)
type Spec struct {
- Proto string `json:"proto,omitempty"` // either "tcp" or "udp". in future "sctp" will be supported as well.
- ParentIP string `json:"parentIP,omitempty"` // IPv4 address. can be empty (0.0.0.0).
+ // Proto is one of ["tcp", "tcp4", "tcp6", "udp", "udp4", "udp6"].
+ // "tcp" may cause listening on both IPv4 and IPv6. (Corresponds to Go's net.Listen .)
+ Proto string `json:"proto,omitempty"`
+ ParentIP string `json:"parentIP,omitempty"` // IPv4 or IPv6 address. can be empty (0.0.0.0).
ParentPort int `json:"parentPort,omitempty"`
ChildPort int `json:"childPort,omitempty"`
- // ChildIP is an IPv4 address.
+ // ChildIP is an IPv4 or IPv6 address.
// Default values:
// - builtin driver: 127.0.0.1
- // - socat driver: 127.0.0.1
// - slirp4netns driver: slirp4netns's child IP, e.g., 10.0.2.100
ChildIP string `json:"childIP,omitempty"`
}
@@ -41,6 +44,7 @@ type ChildContext struct {
// ParentDriver is a driver for the parent process.
type ParentDriver interface {
Manager
+ Info(ctx context.Context) (*api.PortDriverInfo, error)
// OpaqueForChild typically consists of socket path
// for controlling child from parent
OpaqueForChild() map[string]string
diff --git a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/portutil/portutil.go b/vendor/github.com/rootless-containers/rootlesskit/pkg/port/portutil/portutil.go
index a885a76ca..937932642 100644
--- a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/portutil/portutil.go
+++ b/vendor/github.com/rootless-containers/rootlesskit/pkg/port/portutil/portutil.go
@@ -152,7 +152,10 @@ func ValidatePortSpec(spec port.Spec, existingPorts map[int]*port.Status) error
func validateProto(proto string) error {
switch proto {
- case "tcp", "udp", "sctp":
+ case
+ "tcp", "tcp4", "tcp6",
+ "udp", "udp4", "udp6",
+ "sctp", "sctp4", "sctp6":
return nil
default:
return errors.Errorf("unknown proto: %q", proto)
diff --git a/vendor/github.com/uber/jaeger-client-go/.gitignore b/vendor/github.com/uber/jaeger-client-go/.gitignore
deleted file mode 100644
index 273490790..000000000
--- a/vendor/github.com/uber/jaeger-client-go/.gitignore
+++ /dev/null
@@ -1,15 +0,0 @@
-*.out
-*.test
-*.xml
-*.swp
-.idea/
-.tmp/
-*.iml
-*.cov
-*.html
-*.log
-gen/thrift/js
-gen/thrift/py
-vendor/
-crossdock-main
-crossdock/jaeger-docker-compose.yml
diff --git a/vendor/github.com/uber/jaeger-client-go/.gitmodules b/vendor/github.com/uber/jaeger-client-go/.gitmodules
deleted file mode 100644
index 295ebcf62..000000000
--- a/vendor/github.com/uber/jaeger-client-go/.gitmodules
+++ /dev/null
@@ -1,3 +0,0 @@
-[submodule "idl"]
- path = idl
- url = https://github.com/uber/jaeger-idl.git
diff --git a/vendor/github.com/uber/jaeger-client-go/.travis.yml b/vendor/github.com/uber/jaeger-client-go/.travis.yml
deleted file mode 100644
index f9c9a7776..000000000
--- a/vendor/github.com/uber/jaeger-client-go/.travis.yml
+++ /dev/null
@@ -1,56 +0,0 @@
-sudo: required
-
-language: go
-go_import_path: github.com/uber/jaeger-client-go
-
-dist: trusty
-
-matrix:
- include:
- - go: 1.14.x
- env:
- - TESTS=true
- - USE_DEP=true
- - COVERAGE=true
- - go: 1.14.x
- env:
- - USE_DEP=true
- - CROSSDOCK=true
- - go: 1.14.x
- env:
- - TESTS=true
- - USE_DEP=false
- - USE_GLIDE=true
- # test with previous version of Go
- - go: 1.13.x
- env:
- - TESTS=true
- - USE_DEP=true
- - CI_SKIP_LINT=true
-
-services:
- - docker
-
-env:
- global:
- - DOCKER_COMPOSE_VERSION=1.8.0
- - COMMIT=${TRAVIS_COMMIT::8}
- # DOCKER_PASS
- - secure: "CnjVyxNvMC/dhr/eR7C+FiWucZ4/O5LfAuz9YU0qlnV6XLR7XXRtzZlfFKIImJT6xHp+OptTqAIXqUbvwK2OXDP1ZsLiWRm+2elb9/isGusWXjs3g817lX8njSUcIFILbfi+vAE7UD2BKjHxpmvWmCZidisU1rcaZ9OQNPqMnNIDxVx0FOTwYx+2hfkdjnN5dikzafBDQ6ZZV/mGbcaTG45GGFU6DHyVLzf9qCPXyXnz2+VDhcoPQsYkzE56XHCmHxvEfXxgfqYefJNUlFPhniAQySVsCNVDJ8QcCV6uHaXoIzxJKx9FdUnWKI1/AtpQsTZPgEm4Ujnt+kGJsXopXy2Xx4MZxmcTCBwAMjZxPMF7KoojbtDeOZgEMtf1tGPN6DTNc3NpVmr0BKZ44lhqk+vnd8HAiC1tHDEoSb1Esl7dMUUf1qZAh3MtT+NYi3mTwyx/ilXUS7KPyy7x0ezB3kGuMoLhvR2hrprqRr5NOV2hrd1au+IXmb+4IanFOsBlceBfs8P0JFMO/aw15r+HimSZpQsJx//IT0LReCZYXLe0/WVsF/8+HDwHKlO99gGpk4iXlNKKvdPWabihMp3I3peMrvL+jnlwh47RqHs/0Q71xsKjVWTn+Svq3FpVP0Pgyxhg+oG4WEByBiLnBQcZwSBhWexkJrNI73GzaZiIldk="
- # DOCKER_USER
- - secure: "bpBSmypHzI4PnteM4cwLiMC2163Sj/4mEl+1dj+6NWl2tr1hREeVXKhsWBpah25n6BDyr2A4yhBZcWLaNKrsCKT3U37csAQTOFVeQ9x5xhPq+ohANd/OsspFsxNZaKwx161LizH/uTDotMxxevZacsyYWGNv/cRFkwcQ8upLkReRR6puJ+jNQC0BFpKWBJY/zpm5J7xFb7FO20LvQVyRgsgzqWmg9oRNVw9uwOfSY3btacftYctDLUbAr8YRNHd2C6dZnMAi8KdDTLXKTqjKmp6WidOmi92Ml7tOjB+bV6TOaVAhrcI5Rdje4rRWG4MucAjPMP0ZBW36KTfcGqFUcDhX7UqISe2WxoI+8ZD6fJ+nNtD3bk4YAUJB4BSs2sQdiYyjpHyGJR6RW50+3uRz2YbXpzVr9wqv2lZSl/xy3wC5Hag55uqzVlSiDw2pK8lctT3dnQveE7PqAI577PjF2NrHlgrBbykOwwUCNbRTmykzqoDnkxclmiZ+rflEeWsSYglePK/d6Gj9+N7wJZM5heprdJJMFTrzMWZ21Ll9ZGY9updCBKmJA8pBYiLHbu0lWOp+9QUGC+621Zq0d1PHhN6L4eXk/f3RNoZTr//cX6WdNmmO7tBbaGpmp/UYiYTY1WO9vP7tCDsT75k285HCfnIrlGRdbCZZbfuYNGPKIQ0="
-
-install:
- - make install-ci USE_DEP=$USE_DEP
- - if [ "$CROSSDOCK" == true ]; then bash ./travis/install-crossdock-deps.sh ; fi
-
-script:
- - if [ "$TESTS" == true ]; then make test-ci ; else echo 'skipping tests'; fi
- - if [ "$CROSSDOCK" == true ]; then bash ./travis/build-crossdock.sh ; else echo 'skipping crossdock'; fi
-
-after_success:
- - if [ "$COVERAGE" == true ]; then mv cover.out coverage.txt ; else echo 'skipping coverage'; fi
- - if [ "$COVERAGE" == true ]; then bash <(curl -s https://codecov.io/bash) ; else echo 'skipping coverage'; fi
-
-after_failure:
- - if [ "$CROSSDOCK" == true ]; then timeout 5 docker-compose -f crossdock/docker-compose.yml logs; fi
diff --git a/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md b/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md
deleted file mode 100644
index cab87e9d6..000000000
--- a/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md
+++ /dev/null
@@ -1,351 +0,0 @@
-Changes by Version
-==================
-
-2.25.0 (2020-07-13)
--------------------
-## Breaking changes
-- [feat] Periodically re-resolve UDP server address, with opt-out (#520) -- Trevor Foster
-
- The re-resolving of UDP address is now enabled by default, to make the client more robust in Kubernetes deployments.
- The old resolve-once behavior can be restored by setting DisableAttemptReconnecting=true in the Configuration struct,
- or via JAEGER_REPORTER_ATTEMPT_RECONNECTING_DISABLED=true environment variable.
-
-## Bug fixes
-- Do not add invalid context to references (#521) -- Yuri Shkuro
-
-
-2.24.0 (2020-06-14)
--------------------
-- Mention FromEnv() in the README, docs, and examples (#518) -- Martin Lercher
-- Serialize access to RemotelyControlledSampler.sampler (#515) -- Dima
-- Override reporter config only when agent host/port is set in env (#513) -- ilylia
-- Converge on JAEGER_SAMPLING_ENDPOINT env variable (#511) -- Eundoo Song
-
-
-2.23.1 (2020-04-28)
--------------------
-- Fix regression by handling nil logger correctly ([#507](https://github.com/jaegertracing/jaeger-client-go/pull/507)) -- Prithvi Raj
-
-
-2.23.0 (2020-04-22)
--------------------
-
-- Add the ability to log all span interactions at a new debug log level([#502](https://github.com/jaegertracing/jaeger-client-go/pull/502), [#503](https://github.com/jaegertracing/jaeger-client-go/pull/503), [#504](https://github.com/jaegertracing/jaeger-client-go/pull/504)) -- Prithvi Raj
-- Chore (docs): fix typos ([#496](https://github.com/jaegertracing/jaeger-client-go/pull/496), [#498](https://github.com/jaegertracing/jaeger-client-go/pull/498)) -- Febrian Setianto and Ivan Babrou
-- Unset highest bit of traceID in probabilistic sampler ([#490](https://github.com/jaegertracing/jaeger-client-go/pull/490)) -- Sokolov Yura
-
-2.22.1 (2020-01-16)
--------------------
-
-- Increase UDP batch overhead to account for data loss metrics ([#488](https://github.com/jaegertracing/jaeger-client-go/pull/488)) -- Yuri Shkuro
-
-
-2.22.0 (2020-01-15)
--------------------
-
-- Report data loss stats to Jaeger backend ([#482](https://github.com/jaegertracing/jaeger-client-go/pull/482)) -- Yuri Shkuro
-- Add limit on log records per span ([#483](https://github.com/jaegertracing/jaeger-client-go/pull/483)) -- Sokolov Yura
-
-
-2.21.1 (2019-12-20)
--------------------
-
-- Update version correctly.
-
-
-2.21.0 (2019-12-20)
--------------------
-
-- Clarify reporting error logs ([#469](https://github.com/jaegertracing/jaeger-client-go/pull/469)) -- Yuri Shkuro
-- Do not strip leading zeros from trace IDs ([#472](https://github.com/jaegertracing/jaeger-client-go/pull/472)) -- Yuri Shkuro
-- Chore (docs): fixed a couple of typos ([#475](https://github.com/jaegertracing/jaeger-client-go/pull/475)) -- Marc Bramaud
-- Support custom HTTP headers when reporting spans over HTTP ([#479](https://github.com/jaegertracing/jaeger-client-go/pull/479)) -- Albert Teoh
-
-
-2.20.1 (2019-11-08)
--------------------
-
-Minor patch via https://github.com/jaegertracing/jaeger-client-go/pull/468
-
-- Make `AdaptiveSamplerUpdater` usable with default values; Resolves #467
-- Create `OperationNameLateBinding` sampler option and config option
-- Make `SamplerOptions` var of public type, so that its functions are discoverable via godoc
-
-
-2.20.0 (2019-11-06)
--------------------
-
-## New Features
-
-- Allow all in-process spans of a trace to share sampling state (#443) -- Prithvi Raj
-
- Sampling state is shared between all spans of the trace that are still in memory.
- This allows implementation of delayed sampling decisions (see below).
-
-- Support delayed sampling decisions (#449) -- Yuri Shkuro
-
- This is a large structural change to how the samplers work.
- It allows some samplers to be executed multiple times on different
- span events (like setting a tag) and make a positive sampling decision
- later in the span life cycle, or even based on children spans.
- See [README](./README.md#delayed-sampling) for more details.
-
- There is a related minor change in behavior of the adaptive (per-operation) sampler,
- which will no longer re-sample the trace when `span.SetOperation()` is called, i.e. the
- operation used to make the sampling decision is always the one provided at span creation.
-
-- Add experimental tag matching sampler (#452) -- Yuri Shkuro
-
- A sampler that can sample a trace based on a certain tag added to the root
- span or one of its local (in-process) children. The sampler can be used with
- another experimental `PrioritySampler` that allows multiple samplers to try
- to make a sampling decision, in a certain priority order.
-
-- [log/zap] Report whether a trace was sampled (#445) -- Abhinav Gupta
-- Allow config.FromEnv() to enrich an existing config object (#436) -- Vineeth Reddy
-
-## Minor patches
-
-- Expose Sampler on Tracer and accept sampler options via Configuration (#460) -- Yuri Shkuro
-- Fix github.com/uber-go/atomic import (#464) -- Yuri Shkuro
-- Add nodejs to crossdock tests (#441) -- Bhavin Gandhi
-- Bump Go compiler version to 1.13 (#453) -- Yuri Shkuro
-
-2.19.0 (2019-09-23)
--------------------
-
-- Upgrade jaeger-lib to 2.2 and unpin Prom client (#434) -- Yuri Shkuro
-
-
-2.18.1 (2019-09-16)
--------------------
-
-- Remove go.mod / go.sum that interfere with `go get` (#432)
-
-
-2.18.0 (2019-09-09)
--------------------
-
-- Add option "noDebugFlagOnForcedSampling" for tracer initialization [resolves #422] (#423) <Jun Guo>
-
-
-2.17.0 (2019-08-30)
--------------------
-
-- Add a flag for firehose mode (#419) <Prithvi Raj>
-- Default sampling server URL to agent (#414) <Bryan Boreham>
-- Update default sampling rate when sampling strategy is refreshed (#413) <Bryan Boreham>
-- Support "Self" Span Reference (#411) <dm03514>
-- Don't complain about blank service name if tracing is Disabled (#410) Yuri <Shkuro>
-- Use IP address from tag if exist (#402) <NikoKVCS>
-- Expose span data to custom reporters [fixes #394] (#399) <Curtis Allen>
-- Fix the span allocation in the pool (#381) <Dmitry Ponomarev>
-
-
-2.16.0 (2019-03-24)
--------------------
-
-- Add baggage to B3 codec (#319) <Pavol Loffay>
-- Add support for 128bit trace ids to zipkin thrift spans. (#378) <Douglas Reid>
-- Update zipkin propagation logic to support 128bit traceIDs (#373) <Douglas Reid>
-- Accept "true" for the x-b3-sampled header (#356) <Adrian Bogatu>
-
-- Allow setting of PoolSpans from Config object (#322) <Matthew Pound>
-- Make propagators public to allow wrapping (#379) <Ivan Babrou>
-- Change default metric namespace to use relevant separator for the metric backend (#364) <Gary Brown>
-- Change metrics prefix to jaeger_tracer and add descriptions (#346) <Gary Brown>
-- Bump OpenTracing to ^1.1.x (#383) <Yuri Shkuro>
-- Upgrade jaeger-lib to v2.0.0 (#359) <Gary Brown>
-- Avoid defer when generating random number (#358) <Gary Brown>
-- Use a pool of rand.Source to reduce lock contention when creating span ids (#357) <Gary Brown>
-- Make JAEGER_ENDPOINT take priority over JAEGER_AGENT_XXX (#342) <Eundoo Song>
-
-
-2.15.0 (2018-10-10)
--------------------
-
-- Fix FollowsFrom spans ignoring baggage/debug header from dummy parent context (#313) <Zvi Cahana>
-- Make maximum annotation length configurable in tracer options (#318) <Eric Chang>
-- Support more environment variables in configuration (#323) <Daneyon Hansen>
-- Print error on Sampler Query failure (#328) <Goutham Veeramachaneni>
-- Add an HTTPOption to support custom http.RoundTripper (#333) <Michael Puncel>
-- Return an error when an HTTP error code is seen in zipkin HTTP transport (#331) <Michael Puncel>
-
-
-2.14.0 (2018-04-30)
--------------------
-
-- Support throttling for debug traces (#274) <Isaac Hier>
-- Remove dependency on Apache Thrift (#303) <Yuri Shkuro>
-- Remove dependency on tchannel (#295) (#294) <Yuri Shkuro>
-- Test with Go 1.9 (#298) <Yuri Shkuro>
-
-
-2.13.0 (2018-04-15)
--------------------
-
-- Use value receiver for config.NewTracer() (#283) <Yuri Shkuro>
-- Lock span during jaeger thrift conversion (#273) <Won Jun Jang>
-- Fix the RemotelyControlledSampler so that it terminates go-routine on Close() (#260) <Scott Kidder> <Yuri Shkuro>
-- Added support for client configuration via env vars (#275) <Juraci Paixão Kröhling>
-- Allow overriding sampler in the Config (#270) <Mike Kabischev>
-
-
-2.12.0 (2018-03-14)
--------------------
-
-- Use lock when retrieving span.Context() (#268)
-- Add Configuration support for custom Injector and Extractor (#263) <Martin Liu>
-
-
-2.11.2 (2018-01-12)
--------------------
-
-- Add Gopkg.toml to allow using the lib with `dep`
-
-
-2.11.1 (2018-01-03)
--------------------
-
-- Do not enqueue spans after Reporter is closed (#235, #245)
-- Change default flush interval to 1sec (#243)
-
-
-2.11.0 (2017-11-27)
--------------------
-
-- Normalize metric names and tags to be compatible with Prometheus (#222)
-
-
-2.10.0 (2017-11-14)
--------------------
-
-- Support custom tracing headers (#176)
-- Add BaggageRestrictionManager (#178) and RemoteBaggageRestrictionManager (#182)
-- Do not coerce baggage keys to lower case (#196)
-- Log span name when span cannot be reported (#198)
-- Add option to enable gen128Bit for tracer (#193) and allow custom generator for high bits of trace ID (#219)
-
-
-2.9.0 (2017-07-29)
-------------------
-
-- Pin thrift <= 0.10 (#179)
-- Introduce a parallel interface ContribObserver (#159)
-
-
-2.8.0 (2017-07-05)
-------------------
-
-- Drop `jaeger.` prefix from `jaeger.hostname` process-level tag
-- Add options to set tracer tags
-
-
-2.7.0 (2017-06-21)
-------------------
-
-- Fix rate limiter balance [#135](https://github.com/uber/jaeger-client-go/pull/135) [#140](https://github.com/uber/jaeger-client-go/pull/140)
-- Default client to send Jaeger.thrift [#147](https://github.com/uber/jaeger-client-go/pull/147)
-- Save baggage in span [#153](https://github.com/uber/jaeger-client-go/pull/153)
-- Move reporter.queueLength to the top of the struct to guarantee 64bit alignment [#158](https://github.com/uber/jaeger-client-go/pull/158)
-- Support HTTP transport with jaeger.thrift [#161](https://github.com/uber/jaeger-client-go/pull/161)
-
-
-2.6.0 (2017-03-28)
-------------------
-
-- Add config option to initialize RPC Metrics feature
-
-
-2.5.0 (2017-03-23)
-------------------
-
-- Split request latency metric by success/failure [#123](https://github.com/uber/jaeger-client-go/pull/123)
-- Add mutex to adaptive sampler and fix race condition [#124](https://github.com/uber/jaeger-client-go/pull/124)
-- Fix rate limiter panic [#125](https://github.com/uber/jaeger-client-go/pull/125)
-
-
-2.4.0 (2017-03-21)
-------------------
-
-- Remove `_ms` suffix from request latency metric name [#121](https://github.com/uber/jaeger-client-go/pull/121)
-- Rename all metrics to "request" and "http_request" and use tags for other dimensions [#121](https://github.com/uber/jaeger-client-go/pull/121)
-
-
-2.3.0 (2017-03-20)
-------------------
-
-- Make Span type public to allow access to non-std methods for testing [#117](https://github.com/uber/jaeger-client-go/pull/117)
-- Add a structured way to extract traces for logging with zap [#118](https://github.com/uber/jaeger-client-go/pull/118)
-
-
-2.2.1 (2017-03-14)
-------------------
-
-- Fix panic caused by updating the remote sampler from adaptive sampler to any other sampler type (https://github.com/uber/jaeger-client-go/pull/111)
-
-
-2.2.0 (2017-03-10)
-------------------
-
-- Introduce Observer and SpanObserver (https://github.com/uber/jaeger-client-go/pull/94)
-- Add RPC metrics emitter as Observer/SpanObserver (https://github.com/uber/jaeger-client-go/pull/103)
-
-
-2.1.2 (2017-02-27)
--------------------
-
-- Fix leaky bucket bug (https://github.com/uber/jaeger-client-go/pull/99)
-- Fix zap logger Infof (https://github.com/uber/jaeger-client-go/pull/100)
-- Add tracer initialization godoc examples
-
-
-2.1.1 (2017-02-21)
--------------------
-
-- Fix inefficient usage of zap.Logger
-
-
-2.1.0 (2017-02-17)
--------------------
-
-- Add adapter for zap.Logger (https://github.com/uber-go/zap)
-- Move logging API to ./log/ package
-
-
-2.0.0 (2017-02-08)
--------------------
-
-- Support Adaptive Sampling
-- Support 128bit Trace IDs
-- Change trace/span IDs from uint64 to strong types TraceID and SpanID
-- Add Zipkin HTTP B3 Propagation format support #72
-- Rip out existing metrics and use github.com/uber/jaeger-lib/metrics
-- Change API for tracer, reporter, sampler initialization
-
-
-1.6.0 (2016-10-14)
--------------------
-
-- Add Zipkin HTTP transport
-- Support external baggage via jaeger-baggage header
-- Unpin Thrift version, keep to master
-
-
-1.5.1 (2016-09-27)
--------------------
-
-- Relax dependency on opentracing to ^1
-
-
-1.5.0 (2016-09-27)
--------------------
-
-- Upgrade to opentracing-go 1.0
-- Support KV logging for Spans
-
-
-1.4.0 (2016-09-14)
--------------------
-
-- Support debug traces via HTTP header "jaeger-debug-id"
diff --git a/vendor/github.com/uber/jaeger-client-go/CONTRIBUTING.md b/vendor/github.com/uber/jaeger-client-go/CONTRIBUTING.md
deleted file mode 100644
index 41e2154cf..000000000
--- a/vendor/github.com/uber/jaeger-client-go/CONTRIBUTING.md
+++ /dev/null
@@ -1,170 +0,0 @@
-# How to Contribute to Jaeger
-
-We'd love your help!
-
-Jaeger is [Apache 2.0 licensed](LICENSE) and accepts contributions via GitHub
-pull requests. This document outlines some of the conventions on development
-workflow, commit message formatting, contact points and other resources to make
-it easier to get your contribution accepted.
-
-We gratefully welcome improvements to documentation as well as to code.
-
-# Certificate of Origin
-
-By contributing to this project you agree to the [Developer Certificate of
-Origin](https://developercertificate.org/) (DCO). This document was created
-by the Linux Kernel community and is a simple statement that you, as a
-contributor, have the legal right to make the contribution. See the [DCO](DCO)
-file for details.
-
-## Getting Started
-
-This library uses [dep](https://golang.github.io/dep/) to manage dependencies.
-
-To get started, make sure you clone the Git repository into the correct location
-`github.com/uber/jaeger-client-go` relative to `$GOPATH`:
-
-```
-mkdir -p $GOPATH/src/github.com/uber
-cd $GOPATH/src/github.com/uber
-git clone git@github.com:jaegertracing/jaeger-client-go.git jaeger-client-go
-cd jaeger-client-go
-git submodule update --init --recursive
-```
-
-Then install dependencies and run the tests:
-
-```
-make install
-make test
-```
-
-## Imports grouping
-
-This projects follows the following pattern for grouping imports in Go files:
- * imports from standard library
- * imports from other projects
- * imports from `jaeger-client-go` project
-
-For example:
-
-```go
-import (
- "fmt"
-
- "github.com/uber/jaeger-lib/metrics"
- "go.uber.org/zap"
-
- "github.com/uber/jaeger-client-go/config"
-)
-```
-
-## Making A Change
-
-*Before making any significant changes, please [open an
-issue](https://github.com/jaegertracing/jaeger-client-go/issues).* Discussing your proposed
-changes ahead of time will make the contribution process smooth for everyone.
-
-Once we've discussed your changes and you've got your code ready, make sure
-that tests are passing (`make test` or `make cover`) and open your PR. Your
-pull request is most likely to be accepted if it:
-
-* Includes tests for new functionality.
-* Follows the guidelines in [Effective
- Go](https://golang.org/doc/effective_go.html) and the [Go team's common code
- review comments](https://github.com/golang/go/wiki/CodeReviewComments).
-* Has a [good commit message](https://chris.beams.io/posts/git-commit/):
- * Separate subject from body with a blank line
- * Limit the subject line to 50 characters
- * Capitalize the subject line
- * Do not end the subject line with a period
- * Use the imperative mood in the subject line
- * Wrap the body at 72 characters
- * Use the body to explain _what_ and _why_ instead of _how_
-* Each commit must be signed by the author ([see below](#sign-your-work)).
-
-## License
-
-By contributing your code, you agree to license your contribution under the terms
-of the [Apache License](LICENSE).
-
-If you are adding a new file it should have a header like below. The easiest
-way to add such header is to run `make fmt`.
-
-```
-// Copyright (c) 2017 The Jaeger Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-```
-
-## Sign your work
-
-The sign-off is a simple line at the end of the explanation for the
-patch, which certifies that you wrote it or otherwise have the right to
-pass it on as an open-source patch. The rules are pretty simple: if you
-can certify the below (from
-[developercertificate.org](http://developercertificate.org/)):
-
-```
-Developer Certificate of Origin
-Version 1.1
-
-Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
-660 York Street, Suite 102,
-San Francisco, CA 94110 USA
-
-Everyone is permitted to copy and distribute verbatim copies of this
-license document, but changing it is not allowed.
-
-
-Developer's Certificate of Origin 1.1
-
-By making a contribution to this project, I certify that:
-
-(a) The contribution was created in whole or in part by me and I
- have the right to submit it under the open source license
- indicated in the file; or
-
-(b) The contribution is based upon previous work that, to the best
- of my knowledge, is covered under an appropriate open source
- license and I have the right under that license to submit that
- work with modifications, whether created in whole or in part
- by me, under the same open source license (unless I am
- permitted to submit under a different license), as indicated
- in the file; or
-
-(c) The contribution was provided directly to me by some other
- person who certified (a), (b) or (c) and I have not modified
- it.
-
-(d) I understand and agree that this project and the contribution
- are public and that a record of the contribution (including all
- personal information I submit with it, including my sign-off) is
- maintained indefinitely and may be redistributed consistent with
- this project or the open source license(s) involved.
-```
-
-then you just add a line to every git commit message:
-
- Signed-off-by: Joe Smith <joe@gmail.com>
-
-using your real name (sorry, no pseudonyms or anonymous contributions.)
-
-You can add the sign off when creating the git commit via `git commit -s`.
-
-If you want this to be automatic you can set up some aliases:
-
-```
-git config --add alias.amend "commit -s --amend"
-git config --add alias.c "commit -s"
-```
diff --git a/vendor/github.com/uber/jaeger-client-go/DCO b/vendor/github.com/uber/jaeger-client-go/DCO
deleted file mode 100644
index 068953d4b..000000000
--- a/vendor/github.com/uber/jaeger-client-go/DCO
+++ /dev/null
@@ -1,37 +0,0 @@
-Developer Certificate of Origin
-Version 1.1
-
-Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
-660 York Street, Suite 102,
-San Francisco, CA 94110 USA
-
-Everyone is permitted to copy and distribute verbatim copies of this
-license document, but changing it is not allowed.
-
-
-Developer's Certificate of Origin 1.1
-
-By making a contribution to this project, I certify that:
-
-(a) The contribution was created in whole or in part by me and I
- have the right to submit it under the open source license
- indicated in the file; or
-
-(b) The contribution is based upon previous work that, to the best
- of my knowledge, is covered under an appropriate open source
- license and I have the right under that license to submit that
- work with modifications, whether created in whole or in part
- by me, under the same open source license (unless I am
- permitted to submit under a different license), as indicated
- in the file; or
-
-(c) The contribution was provided directly to me by some other
- person who certified (a), (b) or (c) and I have not modified
- it.
-
-(d) I understand and agree that this project and the contribution
- are public and that a record of the contribution (including all
- personal information I submit with it, including my sign-off) is
- maintained indefinitely and may be redistributed consistent with
- this project or the open source license(s) involved.
-
diff --git a/vendor/github.com/uber/jaeger-client-go/Gopkg.lock b/vendor/github.com/uber/jaeger-client-go/Gopkg.lock
deleted file mode 100644
index 387958b12..000000000
--- a/vendor/github.com/uber/jaeger-client-go/Gopkg.lock
+++ /dev/null
@@ -1,401 +0,0 @@
-# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
-
-
-[[projects]]
- digest = "1:9f3b30d9f8e0d7040f729b82dcbc8f0dead820a133b3147ce355fc451f32d761"
- name = "github.com/BurntSushi/toml"
- packages = ["."]
- pruneopts = "UT"
- revision = "3012a1dbe2e4bd1391d42b32f0577cb7bbc7f005"
- version = "v0.3.1"
-
-[[projects]]
- digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d"
- name = "github.com/beorn7/perks"
- packages = ["quantile"]
- pruneopts = "UT"
- revision = "37c8de3658fcb183f997c4e13e8337516ab753e6"
- version = "v1.0.1"
-
-[[projects]]
- branch = "master"
- digest = "1:4c4c33075b704791d6a7f09dfb55c66769e8a1dc6adf87026292d274fe8ad113"
- name = "github.com/codahale/hdrhistogram"
- packages = ["."]
- pruneopts = "UT"
- revision = "3a0bb77429bd3a61596f5e8a3172445844342120"
-
-[[projects]]
- branch = "master"
- digest = "1:a382acd6150713655ded76ab5fbcbc7924a7808dab4312dda5d1f23dd8ce5277"
- name = "github.com/crossdock/crossdock-go"
- packages = [
- ".",
- "assert",
- "require",
- ]
- pruneopts = "UT"
- revision = "049aabb0122b03bc9bd30cab8f3f91fb60166361"
-
-[[projects]]
- digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec"
- name = "github.com/davecgh/go-spew"
- packages = ["spew"]
- pruneopts = "UT"
- revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
- version = "v1.1.1"
-
-[[projects]]
- digest = "1:7ae311278f7ccaa724de8f2cdec0a507ba3ee6dea8c77237e8157bcf64b0f28b"
- name = "github.com/golang/mock"
- packages = ["gomock"]
- pruneopts = "UT"
- revision = "3a35fb6e3e18b9dbfee291262260dee7372d2a92"
- version = "v1.4.3"
-
-[[projects]]
- digest = "1:573ca21d3669500ff845bdebee890eb7fc7f0f50c59f2132f2a0c6b03d85086a"
- name = "github.com/golang/protobuf"
- packages = ["proto"]
- pruneopts = "UT"
- revision = "6c65a5562fc06764971b7c5d05c76c75e84bdbf7"
- version = "v1.3.2"
-
-[[projects]]
- digest = "1:ff5ebae34cfbf047d505ee150de27e60570e8c394b3b8fdbb720ff6ac71985fc"
- name = "github.com/matttproud/golang_protobuf_extensions"
- packages = ["pbutil"]
- pruneopts = "UT"
- revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c"
- version = "v1.0.1"
-
-[[projects]]
- digest = "1:727b8f567a30d0739d6c26b9472b3422b351c93cf62095164c845a54b16fc18e"
- name = "github.com/opentracing/opentracing-go"
- packages = [
- ".",
- "ext",
- "harness",
- "log",
- ]
- pruneopts = "UT"
- revision = "659c90643e714681897ec2521c60567dd21da733"
- version = "v1.1.0"
-
-[[projects]]
- digest = "1:cf31692c14422fa27c83a05292eb5cbe0fb2775972e8f1f8446a71549bd8980b"
- name = "github.com/pkg/errors"
- packages = ["."]
- pruneopts = "UT"
- revision = "ba968bfe8b2f7e042a574c888954fccecfa385b4"
- version = "v0.8.1"
-
-[[projects]]
- digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe"
- name = "github.com/pmezard/go-difflib"
- packages = ["difflib"]
- pruneopts = "UT"
- revision = "792786c7400a136282c1664665ae0a8db921c6c2"
- version = "v1.0.0"
-
-[[projects]]
- digest = "1:7097829edd12fd7211fca0d29496b44f94ef9e6d72f88fb64f3d7b06315818ad"
- name = "github.com/prometheus/client_golang"
- packages = [
- "prometheus",
- "prometheus/internal",
- ]
- pruneopts = "UT"
- revision = "170205fb58decfd011f1550d4cfb737230d7ae4f"
- version = "v1.1.0"
-
-[[projects]]
- branch = "master"
- digest = "1:2d5cd61daa5565187e1d96bae64dbbc6080dacf741448e9629c64fd93203b0d4"
- name = "github.com/prometheus/client_model"
- packages = ["go"]
- pruneopts = "UT"
- revision = "14fe0d1b01d4d5fc031dd4bec1823bd3ebbe8016"
-
-[[projects]]
- digest = "1:f119e3205d3a1f0f19dbd7038eb37528e2c6f0933269dc344e305951fb87d632"
- name = "github.com/prometheus/common"
- packages = [
- "expfmt",
- "internal/bitbucket.org/ww/goautoneg",
- "model",
- ]
- pruneopts = "UT"
- revision = "287d3e634a1e550c9e463dd7e5a75a422c614505"
- version = "v0.7.0"
-
-[[projects]]
- digest = "1:a210815b437763623ecca8eb91e6a0bf4f2d6773c5a6c9aec0e28f19e5fd6deb"
- name = "github.com/prometheus/procfs"
- packages = [
- ".",
- "internal/fs",
- "internal/util",
- ]
- pruneopts = "UT"
- revision = "499c85531f756d1129edd26485a5f73871eeb308"
- version = "v0.0.5"
-
-[[projects]]
- digest = "1:ac83cf90d08b63ad5f7e020ef480d319ae890c208f8524622a2f3136e2686b02"
- name = "github.com/stretchr/objx"
- packages = ["."]
- pruneopts = "UT"
- revision = "477a77ecc69700c7cdeb1fa9e129548e1c1c393c"
- version = "v0.1.1"
-
-[[projects]]
- digest = "1:d88ba57c4e8f5db6ce9ab6605a89f4542ee751b576884ba5271c2ba3d4b6f2d2"
- name = "github.com/stretchr/testify"
- packages = [
- "assert",
- "mock",
- "require",
- "suite",
- ]
- pruneopts = "UT"
- revision = "221dbe5ed46703ee255b1da0dec05086f5035f62"
- version = "v1.4.0"
-
-[[projects]]
- digest = "1:5b98956718573850caf7e0fd00b571a6657c4ef1f345ddf0c96b43ce355fe862"
- name = "github.com/uber/jaeger-client-go"
- packages = [
- ".",
- "config",
- "crossdock/client",
- "crossdock/common",
- "crossdock/endtoend",
- "crossdock/log",
- "crossdock/server",
- "crossdock/thrift/tracetest",
- "internal/baggage",
- "internal/baggage/remote",
- "internal/reporterstats",
- "internal/spanlog",
- "internal/throttler",
- "internal/throttler/remote",
- "log",
- "log/zap/mock_opentracing",
- "rpcmetrics",
- "testutils",
- "thrift",
- "thrift-gen/agent",
- "thrift-gen/baggage",
- "thrift-gen/jaeger",
- "thrift-gen/sampling",
- "thrift-gen/zipkincore",
- "transport",
- "transport/zipkin",
- "utils",
- ]
- pruneopts = "UT"
- revision = "66c008c3d6ad856cac92a0af53186efbffa8e6a5"
- version = "v2.24.0"
-
-[[projects]]
- digest = "1:0ec60ffd594af00ba1660bc746aa0e443d27dd4003dee55f9d08a0b4ff5431a3"
- name = "github.com/uber/jaeger-lib"
- packages = [
- "metrics",
- "metrics/metricstest",
- "metrics/prometheus",
- ]
- pruneopts = "UT"
- revision = "a87ae9d84fb038a8d79266298970720be7c80fcd"
- version = "v2.2.0"
-
-[[projects]]
- digest = "1:0bdcb0c740d79d400bd3f7946ac22a715c94db62b20bfd2e01cd50693aba0600"
- name = "go.uber.org/atomic"
- packages = ["."]
- pruneopts = "UT"
- revision = "9dc4df04d0d1c39369750a9f6c32c39560672089"
- version = "v1.5.0"
-
-[[projects]]
- digest = "1:002ebc50f3ef475ac325e1904be931d9dcba6dc6d73b5682afce0c63436e3902"
- name = "go.uber.org/multierr"
- packages = ["."]
- pruneopts = "UT"
- revision = "c3fc3d02ec864719d8e25be2d7dde1e35a36aa27"
- version = "v1.3.0"
-
-[[projects]]
- branch = "master"
- digest = "1:3032e90a153750ea149f68bf081f97ca738f041fba45c41c80737f572ffdf2f4"
- name = "go.uber.org/tools"
- packages = ["update-license"]
- pruneopts = "UT"
- revision = "2cfd321de3ee5d5f8a5fda2521d1703478334d98"
-
-[[projects]]
- digest = "1:98a70115729234dc73ee7bb83973cb39cb8fedf278d17df77264382bad0183ec"
- name = "go.uber.org/zap"
- packages = [
- ".",
- "buffer",
- "internal/bufferpool",
- "internal/color",
- "internal/exit",
- "zapcore",
- "zaptest/observer",
- ]
- pruneopts = "UT"
- revision = "a6015e13fab9b744d96085308ce4e8f11bad1996"
- version = "v1.12.0"
-
-[[projects]]
- branch = "master"
- digest = "1:21d7bad9b7da270fd2d50aba8971a041bd691165c95096a2a4c68db823cbc86a"
- name = "golang.org/x/lint"
- packages = [
- ".",
- "golint",
- ]
- pruneopts = "UT"
- revision = "16217165b5de779cb6a5e4fc81fa9c1166fda457"
-
-[[projects]]
- branch = "master"
- digest = "1:f8b491a7c25030a895a0e579742d07136e6958e77ef2d46e769db8eec4e58fcd"
- name = "golang.org/x/net"
- packages = [
- "context",
- "context/ctxhttp",
- ]
- pruneopts = "UT"
- revision = "0deb6923b6d97481cb43bc1043fe5b72a0143032"
-
-[[projects]]
- branch = "master"
- digest = "1:5dfb17d45415b7b8927382f53955a66f55f9d9d11557aa82f7f481d642ab247a"
- name = "golang.org/x/sys"
- packages = ["windows"]
- pruneopts = "UT"
- revision = "f43be2a4598cf3a47be9f94f0c28197ed9eae611"
-
-[[projects]]
- branch = "master"
- digest = "1:bae8b3bf837d9d7f601776f37f44e031d46943677beff8fb2eb9c7317d44de2f"
- name = "golang.org/x/tools"
- packages = [
- "go/analysis",
- "go/analysis/passes/inspect",
- "go/ast/astutil",
- "go/ast/inspector",
- "go/buildutil",
- "go/gcexportdata",
- "go/internal/gcimporter",
- "go/internal/packagesdriver",
- "go/packages",
- "go/types/objectpath",
- "go/types/typeutil",
- "internal/fastwalk",
- "internal/gopathwalk",
- "internal/semver",
- "internal/span",
- ]
- pruneopts = "UT"
- revision = "8dbcdeb83d3faec5315146800b375c4962a42fc6"
-
-[[projects]]
- digest = "1:59f10c1537d2199d9115d946927fe31165959a95190849c82ff11e05803528b0"
- name = "gopkg.in/yaml.v2"
- packages = ["."]
- pruneopts = "UT"
- revision = "f221b8435cfb71e54062f6c6e99e9ade30b124d5"
- version = "v2.2.4"
-
-[[projects]]
- digest = "1:131158a88aad1f94854d0aa21a64af2802d0a470fb0f01cb33c04fafd2047111"
- name = "honnef.co/go/tools"
- packages = [
- "arg",
- "cmd/staticcheck",
- "config",
- "deprecated",
- "facts",
- "functions",
- "go/types/typeutil",
- "internal/cache",
- "internal/passes/buildssa",
- "internal/renameio",
- "internal/sharedcheck",
- "lint",
- "lint/lintdsl",
- "lint/lintutil",
- "lint/lintutil/format",
- "loader",
- "printf",
- "simple",
- "ssa",
- "ssautil",
- "staticcheck",
- "staticcheck/vrp",
- "stylecheck",
- "unused",
- "version",
- ]
- pruneopts = "UT"
- revision = "afd67930eec2a9ed3e9b19f684d17a062285f16a"
- version = "2019.2.3"
-
-[solve-meta]
- analyzer-name = "dep"
- analyzer-version = 1
- input-imports = [
- "github.com/crossdock/crossdock-go",
- "github.com/golang/mock/gomock",
- "github.com/opentracing/opentracing-go",
- "github.com/opentracing/opentracing-go/ext",
- "github.com/opentracing/opentracing-go/harness",
- "github.com/opentracing/opentracing-go/log",
- "github.com/pkg/errors",
- "github.com/prometheus/client_golang/prometheus",
- "github.com/stretchr/testify/assert",
- "github.com/stretchr/testify/mock",
- "github.com/stretchr/testify/require",
- "github.com/stretchr/testify/suite",
- "github.com/uber/jaeger-client-go",
- "github.com/uber/jaeger-client-go/config",
- "github.com/uber/jaeger-client-go/crossdock/client",
- "github.com/uber/jaeger-client-go/crossdock/common",
- "github.com/uber/jaeger-client-go/crossdock/endtoend",
- "github.com/uber/jaeger-client-go/crossdock/log",
- "github.com/uber/jaeger-client-go/crossdock/server",
- "github.com/uber/jaeger-client-go/crossdock/thrift/tracetest",
- "github.com/uber/jaeger-client-go/internal/baggage",
- "github.com/uber/jaeger-client-go/internal/baggage/remote",
- "github.com/uber/jaeger-client-go/internal/reporterstats",
- "github.com/uber/jaeger-client-go/internal/spanlog",
- "github.com/uber/jaeger-client-go/internal/throttler",
- "github.com/uber/jaeger-client-go/internal/throttler/remote",
- "github.com/uber/jaeger-client-go/log",
- "github.com/uber/jaeger-client-go/log/zap/mock_opentracing",
- "github.com/uber/jaeger-client-go/rpcmetrics",
- "github.com/uber/jaeger-client-go/testutils",
- "github.com/uber/jaeger-client-go/thrift",
- "github.com/uber/jaeger-client-go/thrift-gen/agent",
- "github.com/uber/jaeger-client-go/thrift-gen/baggage",
- "github.com/uber/jaeger-client-go/thrift-gen/jaeger",
- "github.com/uber/jaeger-client-go/thrift-gen/sampling",
- "github.com/uber/jaeger-client-go/thrift-gen/zipkincore",
- "github.com/uber/jaeger-client-go/transport",
- "github.com/uber/jaeger-client-go/transport/zipkin",
- "github.com/uber/jaeger-client-go/utils",
- "github.com/uber/jaeger-lib/metrics",
- "github.com/uber/jaeger-lib/metrics/metricstest",
- "github.com/uber/jaeger-lib/metrics/prometheus",
- "go.uber.org/atomic",
- "go.uber.org/zap",
- "go.uber.org/zap/zapcore",
- "go.uber.org/zap/zaptest/observer",
- ]
- solver-name = "gps-cdcl"
- solver-version = 1
diff --git a/vendor/github.com/uber/jaeger-client-go/Gopkg.toml b/vendor/github.com/uber/jaeger-client-go/Gopkg.toml
deleted file mode 100644
index 1fed7f814..000000000
--- a/vendor/github.com/uber/jaeger-client-go/Gopkg.toml
+++ /dev/null
@@ -1,31 +0,0 @@
-[[constraint]]
- name = "github.com/crossdock/crossdock-go"
- branch = "master"
-
-[[constraint]]
- name = "github.com/opentracing/opentracing-go"
- version = "^1.1"
-
-[[constraint]]
- name = "github.com/prometheus/client_golang"
- version = "^1"
-
-[[constraint]]
- name = "github.com/stretchr/testify"
- version = "^1.1.3"
-
-[[constraint]]
- name = "go.uber.org/atomic"
- version = "^1"
-
-[[constraint]]
- name = "github.com/uber/jaeger-lib"
- version = "^2.2"
-
-[[constraint]]
- name = "go.uber.org/zap"
- version = "^1"
-
-[prune]
- go-tests = true
- unused-packages = true
diff --git a/vendor/github.com/uber/jaeger-client-go/Makefile b/vendor/github.com/uber/jaeger-client-go/Makefile
deleted file mode 100644
index d5e962ccf..000000000
--- a/vendor/github.com/uber/jaeger-client-go/Makefile
+++ /dev/null
@@ -1,134 +0,0 @@
-PROJECT_ROOT=github.com/uber/jaeger-client-go
-PACKAGES := . $(shell go list ./... | awk -F/ 'NR>1 {print "./"$$4"/..."}' | grep -v -e ./thrift-gen/... -e ./thrift/... | sort -u)
-# all .go files that don't exist in hidden directories
-ALL_SRC := $(shell find . -name "*.go" | grep -v -e vendor -e thrift-gen -e ./thrift/ \
- -e ".*/\..*" \
- -e ".*/_.*" \
- -e ".*/mocks.*")
-
-USE_DEP := true
-
--include crossdock/rules.mk
-
-RACE=-race
-GOTEST=go test -v $(RACE)
-GOLINT=golint
-GOVET=go vet
-GOFMT=gofmt
-FMT_LOG=fmt.log
-LINT_LOG=lint.log
-
-THRIFT_VER=0.9.3
-THRIFT_IMG=thrift:$(THRIFT_VER)
-THRIFT=docker run -v "${PWD}:/data" $(THRIFT_IMG) thrift
-THRIFT_GO_ARGS=thrift_import="github.com/apache/thrift/lib/go/thrift"
-THRIFT_GEN_DIR=thrift-gen
-
-PASS=$(shell printf "\033[32mPASS\033[0m")
-FAIL=$(shell printf "\033[31mFAIL\033[0m")
-COLORIZE=sed ''/PASS/s//$(PASS)/'' | sed ''/FAIL/s//$(FAIL)/''
-
-.DEFAULT_GOAL := test-and-lint
-
-.PHONY: test-and-lint
-test-and-lint: test fmt lint
-
-.PHONY: test
-test:
-ifeq ($(USE_DEP),true)
- dep check
-endif
- bash -c "set -e; set -o pipefail; $(GOTEST) $(PACKAGES) | $(COLORIZE)"
-
-.PHONY: fmt
-fmt:
- $(GOFMT) -e -s -l -w $(ALL_SRC)
- ./scripts/updateLicenses.sh
-
-.PHONY: lint
-lint:
- $(GOVET) $(PACKAGES)
- @cat /dev/null > $(LINT_LOG)
- @$(foreach pkg, $(PACKAGES), $(GOLINT) $(pkg) | grep -v crossdock/thrift >> $(LINT_LOG) || true;)
- @[ ! -s "$(LINT_LOG)" ] || (echo "Lint Failures" | cat - $(LINT_LOG) && false)
- @$(GOFMT) -e -s -l $(ALL_SRC) > $(FMT_LOG)
- ./scripts/updateLicenses.sh >> $(FMT_LOG)
- @[ ! -s "$(FMT_LOG)" ] || (echo "go fmt or license check failures, run 'make fmt'" | cat - $(FMT_LOG) && false)
-
-
-.PHONY: install
-install:
- @echo install: USE_DEP=$(USE_DEP) USE_GLIDE=$(USE_GLIDE)
-ifeq ($(USE_DEP),true)
- dep version || make install-dep
- dep ensure
-endif
-ifeq ($(USE_GLIDE),true)
- glide --version || go get github.com/Masterminds/glide
- glide install
-endif
-
-
-.PHONY: cover
-cover:
- $(GOTEST) -cover -coverprofile cover.out $(PACKAGES)
-
-.PHONY: cover-html
-cover-html: cover
- go tool cover -html=cover.out -o cover.html
-
-# This is not part of the regular test target because we don't want to slow it
-# down.
-.PHONY: test-examples
-test-examples:
- make -C examples
-
-.PHONY: thrift
-thrift: idl-submodule thrift-compile
-
-# TODO at the moment we're not generating tchan_*.go files
-.PHONY: thrift-compile
-thrift-compile: thrift-image
- $(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/agent.thrift
- $(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/sampling.thrift
- $(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/jaeger.thrift
- $(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/zipkincore.thrift
- $(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/baggage.thrift
- $(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/crossdock/thrift/ /data/idl/thrift/crossdock/tracetest.thrift
- sed -i '' 's|"zipkincore"|"$(PROJECT_ROOT)/thrift-gen/zipkincore"|g' $(THRIFT_GEN_DIR)/agent/*.go
- sed -i '' 's|"jaeger"|"$(PROJECT_ROOT)/thrift-gen/jaeger"|g' $(THRIFT_GEN_DIR)/agent/*.go
- sed -i '' 's|"github.com/apache/thrift/lib/go/thrift"|"github.com/uber/jaeger-client-go/thrift"|g' \
- $(THRIFT_GEN_DIR)/*/*.go crossdock/thrift/tracetest/*.go
- rm -rf thrift-gen/*/*-remote
- rm -rf crossdock/thrift/*/*-remote
- rm -rf thrift-gen/jaeger/collector.go
-
-.PHONY: idl-submodule
-idl-submodule:
- git submodule init
- git submodule update
-
-.PHONY: thrift-image
-thrift-image:
- $(THRIFT) -version
-
-.PHONY: install-dep
-install-dep:
- - curl -L -s https://github.com/golang/dep/releases/download/v0.5.0/dep-linux-amd64 -o $$GOPATH/bin/dep
- - chmod +x $$GOPATH/bin/dep
-
-.PHONY: install-ci
-install-ci: install
- go get github.com/wadey/gocovmerge
- go get github.com/mattn/goveralls
- go get golang.org/x/tools/cmd/cover
- go get golang.org/x/lint/golint
-
-.PHONY: test-ci
-test-ci: cover
-ifeq ($(CI_SKIP_LINT),true)
- echo 'skipping lint'
-else
- make lint
-endif
-
diff --git a/vendor/github.com/uber/jaeger-client-go/README.md b/vendor/github.com/uber/jaeger-client-go/README.md
deleted file mode 100644
index 687f5780c..000000000
--- a/vendor/github.com/uber/jaeger-client-go/README.md
+++ /dev/null
@@ -1,324 +0,0 @@
-[![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] [![OpenTracing 1.0 Enabled][ot-img]][ot-url]
-
-# Jaeger Bindings for Go OpenTracing API
-
-Instrumentation library that implements an
-[OpenTracing Go](https://github.com/opentracing/opentracing-go) Tracer for Jaeger (https://jaegertracing.io).
-
-**IMPORTANT**: The library's import path is based on its original location under `github.com/uber`. Do not try to import it as `github.com/jaegertracing`, it will not compile. We might revisit this in the next major release.
- * :white_check_mark: `import "github.com/uber/jaeger-client-go"`
- * :x: `import "github.com/jaegertracing/jaeger-client-go"`
-
-## How to Contribute
-
-Please see [CONTRIBUTING.md](CONTRIBUTING.md).
-
-## Installation
-
-We recommended using a dependency manager like [dep](https://golang.github.io/dep/)
-and [semantic versioning](http://semver.org/) when including this library into an application.
-For example, Jaeger backend imports this library like this:
-
-```toml
-[[constraint]]
- name = "github.com/uber/jaeger-client-go"
- version = "2.17"
-```
-
-If you instead want to use the latest version in `master`, you can pull it via `go get`.
-Note that during `go get` you may see build errors due to incompatible dependencies, which is why
-we recommend using semantic versions for dependencies. The error may be fixed by running
-`make install` (it will install `dep` if you don't have it):
-
-```shell
-go get -u github.com/uber/jaeger-client-go/
-cd $GOPATH/src/github.com/uber/jaeger-client-go/
-git submodule update --init --recursive
-make install
-```
-
-## Initialization
-
-See tracer initialization examples in [godoc](https://godoc.org/github.com/uber/jaeger-client-go/config#pkg-examples)
-and [config/example_test.go](./config/example_test.go).
-
-### Environment variables
-
-The tracer can be initialized with values coming from environment variables, if it is
-[built from a config](https://pkg.go.dev/github.com/uber/jaeger-client-go/config?tab=doc#Configuration.NewTracer)
-that was created via [FromEnv()](https://pkg.go.dev/github.com/uber/jaeger-client-go/config?tab=doc#FromEnv).
-None of the env vars are required and all of them can be overridden via direct setting
-of the property on the configuration object.
-
-Property| Description
---- | ---
-JAEGER_SERVICE_NAME | The service name.
-JAEGER_AGENT_HOST | The hostname for communicating with agent via UDP (default `localhost`).
-JAEGER_AGENT_PORT | The port for communicating with agent via UDP (default `6831`).
-JAEGER_ENDPOINT | The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces. If specified, the agent host/port are ignored.
-JAEGER_USER | Username to send as part of "Basic" authentication to the collector endpoint.
-JAEGER_PASSWORD | Password to send as part of "Basic" authentication to the collector endpoint.
-JAEGER_REPORTER_LOG_SPANS | Whether the reporter should also log the spans" `true` or `false` (default `false`).
-JAEGER_REPORTER_MAX_QUEUE_SIZE | The reporter's maximum queue size (default `100`).
-JAEGER_REPORTER_FLUSH_INTERVAL | The reporter's flush interval, with units, e.g. `500ms` or `2s` ([valid units][timeunits]; default `1s`).
-JAEGER_REPORTER_ATTEMPT_RECONNECTING_DISABLED | When true, disables udp connection helper that periodically re-resolves the agent's hostname and reconnects if there was a change (default `false`).
-JAEGER_REPORTER_ATTEMPT_RECONNECT_INTERVAL | Controls how often the agent client re-resolves the provided hostname in order to detect address changes ([valid units][timeunits]; default `30s`).
-JAEGER_SAMPLER_TYPE | The sampler type: `remote`, `const`, `probabilistic`, `ratelimiting` (default `remote`). See also https://www.jaegertracing.io/docs/latest/sampling/.
-JAEGER_SAMPLER_PARAM | The sampler parameter (number).
-JAEGER_SAMPLER_MANAGER_HOST_PORT | (deprecated) The HTTP endpoint when using the `remote` sampler.
-JAEGER_SAMPLING_ENDPOINT | The URL for the sampling configuration server when using sampler type `remote` (default `http://127.0.0.1:5778/sampling`).
-JAEGER_SAMPLER_MAX_OPERATIONS | The maximum number of operations that the sampler will keep track of (default `2000`).
-JAEGER_SAMPLER_REFRESH_INTERVAL | How often the `remote` sampler should poll the configuration server for the appropriate sampling strategy, e.g. "1m" or "30s" ([valid units][timeunits]; default `1m`).
-JAEGER_TAGS | A comma separated list of `name=value` tracer-level tags, which get added to all reported spans. The value can also refer to an environment variable using the format `${envVarName:defaultValue}`.
-JAEGER_DISABLED | Whether the tracer is disabled or not. If `true`, the `opentracing.NoopTracer` is used (default `false`).
-JAEGER_RPC_METRICS | Whether to store RPC metrics, `true` or `false` (default `false`).
-
-By default, the client sends traces via UDP to the agent at `localhost:6831`. Use `JAEGER_AGENT_HOST` and
-`JAEGER_AGENT_PORT` to send UDP traces to a different `host:port`. If `JAEGER_ENDPOINT` is set, the client sends traces
-to the endpoint via `HTTP`, making the `JAEGER_AGENT_HOST` and `JAEGER_AGENT_PORT` unused. If `JAEGER_ENDPOINT` is
-secured, HTTP basic authentication can be performed by setting the `JAEGER_USER` and `JAEGER_PASSWORD` environment
-variables.
-
-### Closing the tracer via `io.Closer`
-
-The constructor function for Jaeger Tracer returns the tracer itself and an `io.Closer` instance.
-It is recommended to structure your `main()` so that it calls the `Close()` function on the closer
-before exiting, e.g.
-
-```go
-tracer, closer, err := cfg.NewTracer(...)
-defer closer.Close()
-```
-
-This is especially useful for command-line tools that enable tracing, as well as
-for the long-running apps that support graceful shutdown. For example, if your deployment
-system sends SIGTERM instead of killing the process and you trap that signal to do a graceful
-exit, then having `defer closer.Close()` ensures that all buffered spans are flushed.
-
-### Metrics & Monitoring
-
-The tracer emits a number of different metrics, defined in
-[metrics.go](metrics.go). The monitoring backend is expected to support
-tag-based metric names, e.g. instead of `statsd`-style string names
-like `counters.my-service.jaeger.spans.started.sampled`, the metrics
-are defined by a short name and a collection of key/value tags, for
-example: `name:jaeger.traces, state:started, sampled:y`. See [metrics.go](./metrics.go)
-file for the full list and descriptions of emitted metrics.
-
-The monitoring backend is represented by the `metrics.Factory` interface from package
-[`"github.com/uber/jaeger-lib/metrics"`](https://github.com/jaegertracing/jaeger-lib/tree/master/metrics). An implementation
-of that interface can be passed as an option to either the Configuration object or the Tracer
-constructor, for example:
-
-```go
-import (
- "github.com/uber/jaeger-client-go/config"
- "github.com/uber/jaeger-lib/metrics/prometheus"
-)
-
- metricsFactory := prometheus.New()
- tracer, closer, err := config.Configuration{
- ServiceName: "your-service-name",
- }.NewTracer(
- config.Metrics(metricsFactory),
- )
-```
-
-By default, a no-op `metrics.NullFactory` is used.
-
-### Logging
-
-The tracer can be configured with an optional logger, which will be
-used to log communication errors, or log spans if a logging reporter
-option is specified in the configuration. The logging API is abstracted
-by the [Logger](logger.go) interface. A logger instance implementing
-this interface can be set on the `Config` object before calling the
-`New` method.
-
-Besides the [zap](https://github.com/uber-go/zap) implementation
-bundled with this package there is also a [go-kit](https://github.com/go-kit/kit)
-one in the [jaeger-lib](https://github.com/jaegertracing/jaeger-lib) repository.
-
-## Instrumentation for Tracing
-
-Since this tracer is fully compliant with OpenTracing API 1.0,
-all code instrumentation should only use the API itself, as described
-in the [opentracing-go](https://github.com/opentracing/opentracing-go) documentation.
-
-## Features
-
-### Reporters
-
-A "reporter" is a component that receives the finished spans and reports
-them to somewhere. Under normal circumstances, the Tracer
-should use the default `RemoteReporter`, which sends the spans out of
-process via configurable "transport". For testing purposes, one can
-use an `InMemoryReporter` that accumulates spans in a buffer and
-allows to retrieve them for later verification. Also available are
-`NullReporter`, a no-op reporter that does nothing, a `LoggingReporter`
-which logs all finished spans using their `String()` method, and a
-`CompositeReporter` that can be used to combine more than one reporter
-into one, e.g. to attach a logging reporter to the main remote reporter.
-
-### Span Reporting Transports
-
-The remote reporter uses "transports" to actually send the spans out
-of process. Currently the supported transports include:
- * [Jaeger Thrift](https://github.com/jaegertracing/jaeger-idl/blob/master/thrift/agent.thrift) over UDP or HTTP,
- * [Zipkin Thrift](https://github.com/jaegertracing/jaeger-idl/blob/master/thrift/zipkincore.thrift) over HTTP.
-
-### Sampling
-
-The tracer does not record all spans, but only those that have the
-sampling bit set in the `flags`. When a new trace is started and a new
-unique ID is generated, a sampling decision is made whether this trace
-should be sampled. The sampling decision is propagated to all downstream
-calls via the `flags` field of the trace context. The following samplers
-are available:
- 1. `RemotelyControlledSampler` uses one of the other simpler samplers
- and periodically updates it by polling an external server. This
- allows dynamic control of the sampling strategies.
- 1. `ConstSampler` always makes the same sampling decision for all
- trace IDs. it can be configured to either sample all traces, or
- to sample none.
- 1. `ProbabilisticSampler` uses a fixed sampling rate as a probability
- for a given trace to be sampled. The actual decision is made by
- comparing the trace ID with a random number multiplied by the
- sampling rate.
- 1. `RateLimitingSampler` can be used to allow only a certain fixed
- number of traces to be sampled per second.
-
-#### Delayed sampling
-
-Version 2.20 introduced the ability to delay sampling decisions in the life cycle
-of the root span. It involves several features and architectural changes:
- * **Shared sampling state**: the sampling state is shared across all local
- (i.e. in-process) spans for a given trace.
- * **New `SamplerV2` API** allows the sampler to be called at multiple points
- in the life cycle of a span:
- * on span creation
- * on overwriting span operation name
- * on setting span tags
- * on finishing the span
- * **Final/non-final sampling state**: the new `SamplerV2` API allows the sampler
- to indicate if the negative sampling decision is final or not (positive sampling
- decisions are always final). If the decision is not final, the sampler will be
- called again on further span life cycle events, like setting tags.
-
-These new features are used in the experimental `x.TagMatchingSampler`, which
-can sample a trace based on a certain tag added to the root
-span or one of its local (in-process) children. The sampler can be used with
-another experimental `x.PrioritySampler` that allows multiple samplers to try
-to make a sampling decision, in a certain priority order.
-
-### Baggage Injection
-
-The OpenTracing spec allows for [baggage][baggage], which are key value pairs that are added
-to the span context and propagated throughout the trace. An external process can inject baggage
-by setting the special HTTP Header `jaeger-baggage` on a request:
-
-```sh
-curl -H "jaeger-baggage: key1=value1, key2=value2" http://myhost.com
-```
-
-Baggage can also be programatically set inside your service:
-
-```go
-if span := opentracing.SpanFromContext(ctx); span != nil {
- span.SetBaggageItem("key", "value")
-}
-```
-
-Another service downstream of that can retrieve the baggage in a similar way:
-
-```go
-if span := opentracing.SpanFromContext(ctx); span != nil {
- val := span.BaggageItem("key")
- println(val)
-}
-```
-
-### Debug Traces (Forced Sampling)
-
-#### Programmatically
-
-The OpenTracing API defines a `sampling.priority` standard tag that
-can be used to affect the sampling of a span and its children:
-
-```go
-import (
- "github.com/opentracing/opentracing-go"
- "github.com/opentracing/opentracing-go/ext"
-)
-
-span := opentracing.SpanFromContext(ctx)
-ext.SamplingPriority.Set(span, 1)
-```
-
-#### Via HTTP Headers
-
-Jaeger Tracer also understands a special HTTP Header `jaeger-debug-id`,
-which can be set in the incoming request, e.g.
-
-```sh
-curl -H "jaeger-debug-id: some-correlation-id" http://myhost.com
-```
-
-When Jaeger sees this header in the request that otherwise has no
-tracing context, it ensures that the new trace started for this
-request will be sampled in the "debug" mode (meaning it should survive
-all downsampling that might happen in the collection pipeline), and the
-root span will have a tag as if this statement was executed:
-
-```go
-span.SetTag("jaeger-debug-id", "some-correlation-id")
-```
-
-This allows using Jaeger UI to find the trace by this tag.
-
-### Zipkin HTTP B3 compatible header propagation
-
-Jaeger Tracer supports Zipkin B3 Propagation HTTP headers, which are used
-by a lot of Zipkin tracers. This means that you can use Jaeger in conjunction with e.g. [these OpenZipkin tracers](https://github.com/openzipkin).
-
-However it is not the default propagation format, see [here](zipkin/README.md#NewZipkinB3HTTPHeaderPropagator) how to set it up.
-
-## SelfRef
-
-Jaeger Tracer supports an additional [span reference][] type call `Self`, which was proposed
-to the OpenTracing Specification (https://github.com/opentracing/specification/issues/81)
-but not yet accepted. This allows the caller to provide an already created `SpanContext`
-when starting a new span. The `Self` reference bypasses trace and span id generation,
-as well as sampling decisions (i.e. the sampling bit in the `SpanContext.flags` must be
-set appropriately by the caller).
-
-The `Self` reference supports the following use cases:
- * the ability to provide externally generated trace and span IDs
- * appending data to the same span from different processes, such as loading and continuing spans/traces from offline (ie log-based) storage
-
-Usage requires passing in a `SpanContext` and the `jaeger.Self` reference type:
-```
-span := tracer.StartSpan(
- "continued_span",
- jaeger.SelfRef(yourSpanContext),
-)
-...
-defer span.Finish()
-```
-
-## License
-
-[Apache 2.0 License](LICENSE).
-
-
-[doc-img]: https://godoc.org/github.com/uber/jaeger-client-go?status.svg
-[doc]: https://godoc.org/github.com/uber/jaeger-client-go
-[ci-img]: https://travis-ci.org/jaegertracing/jaeger-client-go.svg?branch=master
-[ci]: https://travis-ci.org/jaegertracing/jaeger-client-go
-[cov-img]: https://codecov.io/gh/jaegertracing/jaeger-client-go/branch/master/graph/badge.svg
-[cov]: https://codecov.io/gh/jaegertracing/jaeger-client-go
-[ot-img]: https://img.shields.io/badge/OpenTracing--1.0-enabled-blue.svg
-[ot-url]: http://opentracing.io
-[baggage]: https://github.com/opentracing/specification/blob/master/specification.md#set-a-baggage-item
-[timeunits]: https://golang.org/pkg/time/#ParseDuration
-[span reference]: https://github.com/opentracing/specification/blob/1.1/specification.md#references-between-spans
diff --git a/vendor/github.com/uber/jaeger-client-go/RELEASE.md b/vendor/github.com/uber/jaeger-client-go/RELEASE.md
deleted file mode 100644
index 12438d841..000000000
--- a/vendor/github.com/uber/jaeger-client-go/RELEASE.md
+++ /dev/null
@@ -1,12 +0,0 @@
-# Release Process
-
-1. Create a PR "Preparing for release X.Y.Z" against master branch
- * Alter CHANGELOG.md from `<placeholder_version> (unreleased)` to `<X.Y.Z> (YYYY-MM-DD)`
- * Use `git log --pretty=format:'- %s -- %an'` as the basis for for changelog entries
- * Update `JaegerClientVersion` in constants.go to `Go-X.Y.Z`
-2. Create a release "Release X.Y.Z" on Github
- * Create Tag `vX.Y.Z`
- * Copy CHANGELOG.md into the release notes
-3. Create a PR "Back to development" against master branch
- * Add `<next_version> (unreleased)` to CHANGELOG.md
- * Update `JaegerClientVersion` in constants.go to `Go-<next_version>dev`
diff --git a/vendor/github.com/uber/jaeger-client-go/baggage_setter.go b/vendor/github.com/uber/jaeger-client-go/baggage_setter.go
deleted file mode 100644
index 1037ca0e8..000000000
--- a/vendor/github.com/uber/jaeger-client-go/baggage_setter.go
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-import (
- "github.com/opentracing/opentracing-go/log"
-
- "github.com/uber/jaeger-client-go/internal/baggage"
-)
-
-// baggageSetter is an actor that can set a baggage value on a Span given certain
-// restrictions (eg. maxValueLength).
-type baggageSetter struct {
- restrictionManager baggage.RestrictionManager
- metrics *Metrics
-}
-
-func newBaggageSetter(restrictionManager baggage.RestrictionManager, metrics *Metrics) *baggageSetter {
- return &baggageSetter{
- restrictionManager: restrictionManager,
- metrics: metrics,
- }
-}
-
-// (NB) span should hold the lock before making this call
-func (s *baggageSetter) setBaggage(span *Span, key, value string) {
- var truncated bool
- var prevItem string
- restriction := s.restrictionManager.GetRestriction(span.serviceName(), key)
- if !restriction.KeyAllowed() {
- s.logFields(span, key, value, prevItem, truncated, restriction.KeyAllowed())
- s.metrics.BaggageUpdateFailure.Inc(1)
- return
- }
- if len(value) > restriction.MaxValueLength() {
- truncated = true
- value = value[:restriction.MaxValueLength()]
- s.metrics.BaggageTruncate.Inc(1)
- }
- prevItem = span.context.baggage[key]
- s.logFields(span, key, value, prevItem, truncated, restriction.KeyAllowed())
- span.context = span.context.WithBaggageItem(key, value)
- s.metrics.BaggageUpdateSuccess.Inc(1)
-}
-
-func (s *baggageSetter) logFields(span *Span, key, value, prevItem string, truncated, valid bool) {
- if !span.context.IsSampled() {
- return
- }
- fields := []log.Field{
- log.String("event", "baggage"),
- log.String("key", key),
- log.String("value", value),
- }
- if prevItem != "" {
- fields = append(fields, log.String("override", "true"))
- }
- if truncated {
- fields = append(fields, log.String("truncated", "true"))
- }
- if !valid {
- fields = append(fields, log.String("invalid", "true"))
- }
- span.logFieldsNoLocking(fields...)
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/config/config.go b/vendor/github.com/uber/jaeger-client-go/config/config.go
deleted file mode 100644
index bb1228294..000000000
--- a/vendor/github.com/uber/jaeger-client-go/config/config.go
+++ /dev/null
@@ -1,434 +0,0 @@
-// Copyright (c) 2017-2018 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package config
-
-import (
- "errors"
- "fmt"
- "io"
- "strings"
- "time"
-
- "github.com/opentracing/opentracing-go"
- "github.com/uber/jaeger-client-go/utils"
-
- "github.com/uber/jaeger-client-go"
- "github.com/uber/jaeger-client-go/internal/baggage/remote"
- throttler "github.com/uber/jaeger-client-go/internal/throttler/remote"
- "github.com/uber/jaeger-client-go/rpcmetrics"
- "github.com/uber/jaeger-client-go/transport"
- "github.com/uber/jaeger-lib/metrics"
-)
-
-const defaultSamplingProbability = 0.001
-
-// Configuration configures and creates Jaeger Tracer
-type Configuration struct {
- // ServiceName specifies the service name to use on the tracer.
- // Can be provided by FromEnv() via the environment variable named JAEGER_SERVICE_NAME
- ServiceName string `yaml:"serviceName"`
-
- // Disabled can be provided by FromEnv() via the environment variable named JAEGER_DISABLED
- Disabled bool `yaml:"disabled"`
-
- // RPCMetrics can be provided by FromEnv() via the environment variable named JAEGER_RPC_METRICS
- RPCMetrics bool `yaml:"rpc_metrics"`
-
- // Tags can be provided by FromEnv() via the environment variable named JAEGER_TAGS
- Tags []opentracing.Tag `yaml:"tags"`
-
- Sampler *SamplerConfig `yaml:"sampler"`
- Reporter *ReporterConfig `yaml:"reporter"`
- Headers *jaeger.HeadersConfig `yaml:"headers"`
- BaggageRestrictions *BaggageRestrictionsConfig `yaml:"baggage_restrictions"`
- Throttler *ThrottlerConfig `yaml:"throttler"`
-}
-
-// SamplerConfig allows initializing a non-default sampler. All fields are optional.
-type SamplerConfig struct {
- // Type specifies the type of the sampler: const, probabilistic, rateLimiting, or remote.
- // Can be provided by FromEnv() via the environment variable named JAEGER_SAMPLER_TYPE
- Type string `yaml:"type"`
-
- // Param is a value passed to the sampler.
- // Valid values for Param field are:
- // - for "const" sampler, 0 or 1 for always false/true respectively
- // - for "probabilistic" sampler, a probability between 0 and 1
- // - for "rateLimiting" sampler, the number of spans per second
- // - for "remote" sampler, param is the same as for "probabilistic"
- // and indicates the initial sampling rate before the actual one
- // is received from the mothership.
- // Can be provided by FromEnv() via the environment variable named JAEGER_SAMPLER_PARAM
- Param float64 `yaml:"param"`
-
- // SamplingServerURL is the URL of sampling manager that can provide
- // sampling strategy to this service.
- // Can be provided by FromEnv() via the environment variable named JAEGER_SAMPLING_ENDPOINT
- SamplingServerURL string `yaml:"samplingServerURL"`
-
- // SamplingRefreshInterval controls how often the remotely controlled sampler will poll
- // sampling manager for the appropriate sampling strategy.
- // Can be provided by FromEnv() via the environment variable named JAEGER_SAMPLER_REFRESH_INTERVAL
- SamplingRefreshInterval time.Duration `yaml:"samplingRefreshInterval"`
-
- // MaxOperations is the maximum number of operations that the PerOperationSampler
- // will keep track of. If an operation is not tracked, a default probabilistic
- // sampler will be used rather than the per operation specific sampler.
- // Can be provided by FromEnv() via the environment variable named JAEGER_SAMPLER_MAX_OPERATIONS.
- MaxOperations int `yaml:"maxOperations"`
-
- // Opt-in feature for applications that require late binding of span name via explicit
- // call to SetOperationName when using PerOperationSampler. When this feature is enabled,
- // the sampler will return retryable=true from OnCreateSpan(), thus leaving the sampling
- // decision as non-final (and the span as writeable). This may lead to degraded performance
- // in applications that always provide the correct span name on trace creation.
- //
- // For backwards compatibility this option is off by default.
- OperationNameLateBinding bool `yaml:"operationNameLateBinding"`
-
- // Options can be used to programmatically pass additional options to the Remote sampler.
- Options []jaeger.SamplerOption
-}
-
-// ReporterConfig configures the reporter. All fields are optional.
-type ReporterConfig struct {
- // QueueSize controls how many spans the reporter can keep in memory before it starts dropping
- // new spans. The queue is continuously drained by a background go-routine, as fast as spans
- // can be sent out of process.
- // Can be provided by FromEnv() via the environment variable named JAEGER_REPORTER_MAX_QUEUE_SIZE
- QueueSize int `yaml:"queueSize"`
-
- // BufferFlushInterval controls how often the buffer is force-flushed, even if it's not full.
- // It is generally not useful, as it only matters for very low traffic services.
- // Can be provided by FromEnv() via the environment variable named JAEGER_REPORTER_FLUSH_INTERVAL
- BufferFlushInterval time.Duration
-
- // LogSpans, when true, enables LoggingReporter that runs in parallel with the main reporter
- // and logs all submitted spans. Main Configuration.Logger must be initialized in the code
- // for this option to have any effect.
- // Can be provided by FromEnv() via the environment variable named JAEGER_REPORTER_LOG_SPANS
- LogSpans bool `yaml:"logSpans"`
-
- // LocalAgentHostPort instructs reporter to send spans to jaeger-agent at this address.
- // Can be provided by FromEnv() via the environment variable named JAEGER_AGENT_HOST / JAEGER_AGENT_PORT
- LocalAgentHostPort string `yaml:"localAgentHostPort"`
-
- // DisableAttemptReconnecting when true, disables udp connection helper that periodically re-resolves
- // the agent's hostname and reconnects if there was a change. This option only
- // applies if LocalAgentHostPort is specified.
- // Can be provided by FromEnv() via the environment variable named JAEGER_REPORTER_ATTEMPT_RECONNECTING_DISABLED
- DisableAttemptReconnecting bool `yaml:"disableAttemptReconnecting"`
-
- // AttemptReconnectInterval controls how often the agent client re-resolves the provided hostname
- // in order to detect address changes. This option only applies if DisableAttemptReconnecting is false.
- // Can be provided by FromEnv() via the environment variable named JAEGER_REPORTER_ATTEMPT_RECONNECT_INTERVAL
- AttemptReconnectInterval time.Duration
-
- // CollectorEndpoint instructs reporter to send spans to jaeger-collector at this URL.
- // Can be provided by FromEnv() via the environment variable named JAEGER_ENDPOINT
- CollectorEndpoint string `yaml:"collectorEndpoint"`
-
- // User instructs reporter to include a user for basic http authentication when sending spans to jaeger-collector.
- // Can be provided by FromEnv() via the environment variable named JAEGER_USER
- User string `yaml:"user"`
-
- // Password instructs reporter to include a password for basic http authentication when sending spans to
- // jaeger-collector.
- // Can be provided by FromEnv() via the environment variable named JAEGER_PASSWORD
- Password string `yaml:"password"`
-
- // HTTPHeaders instructs the reporter to add these headers to the http request when reporting spans.
- // This field takes effect only when using HTTPTransport by setting the CollectorEndpoint.
- HTTPHeaders map[string]string `yaml:"http_headers"`
-}
-
-// BaggageRestrictionsConfig configures the baggage restrictions manager which can be used to whitelist
-// certain baggage keys. All fields are optional.
-type BaggageRestrictionsConfig struct {
- // DenyBaggageOnInitializationFailure controls the startup failure mode of the baggage restriction
- // manager. If true, the manager will not allow any baggage to be written until baggage restrictions have
- // been retrieved from jaeger-agent. If false, the manager wil allow any baggage to be written until baggage
- // restrictions have been retrieved from jaeger-agent.
- DenyBaggageOnInitializationFailure bool `yaml:"denyBaggageOnInitializationFailure"`
-
- // HostPort is the hostPort of jaeger-agent's baggage restrictions server
- HostPort string `yaml:"hostPort"`
-
- // RefreshInterval controls how often the baggage restriction manager will poll
- // jaeger-agent for the most recent baggage restrictions.
- RefreshInterval time.Duration `yaml:"refreshInterval"`
-}
-
-// ThrottlerConfig configures the throttler which can be used to throttle the
-// rate at which the client may send debug requests.
-type ThrottlerConfig struct {
- // HostPort of jaeger-agent's credit server.
- HostPort string `yaml:"hostPort"`
-
- // RefreshInterval controls how often the throttler will poll jaeger-agent
- // for more throttling credits.
- RefreshInterval time.Duration `yaml:"refreshInterval"`
-
- // SynchronousInitialization determines whether or not the throttler should
- // synchronously fetch credits from the agent when an operation is seen for
- // the first time. This should be set to true if the client will be used by
- // a short lived service that needs to ensure that credits are fetched
- // upfront such that sampling or throttling occurs.
- SynchronousInitialization bool `yaml:"synchronousInitialization"`
-}
-
-type nullCloser struct{}
-
-func (*nullCloser) Close() error { return nil }
-
-// New creates a new Jaeger Tracer, and a closer func that can be used to flush buffers
-// before shutdown.
-//
-// Deprecated: use NewTracer() function
-func (c Configuration) New(
- serviceName string,
- options ...Option,
-) (opentracing.Tracer, io.Closer, error) {
- if serviceName != "" {
- c.ServiceName = serviceName
- }
-
- return c.NewTracer(options...)
-}
-
-// NewTracer returns a new tracer based on the current configuration, using the given options,
-// and a closer func that can be used to flush buffers before shutdown.
-func (c Configuration) NewTracer(options ...Option) (opentracing.Tracer, io.Closer, error) {
- if c.Disabled {
- return &opentracing.NoopTracer{}, &nullCloser{}, nil
- }
-
- if c.ServiceName == "" {
- return nil, nil, errors.New("no service name provided")
- }
-
- opts := applyOptions(options...)
- tracerMetrics := jaeger.NewMetrics(opts.metrics, nil)
- if c.RPCMetrics {
- Observer(
- rpcmetrics.NewObserver(
- opts.metrics.Namespace(metrics.NSOptions{Name: "jaeger-rpc", Tags: map[string]string{"component": "jaeger"}}),
- rpcmetrics.DefaultNameNormalizer,
- ),
- )(&opts) // adds to c.observers
- }
- if c.Sampler == nil {
- c.Sampler = &SamplerConfig{
- Type: jaeger.SamplerTypeRemote,
- Param: defaultSamplingProbability,
- }
- }
- if c.Reporter == nil {
- c.Reporter = &ReporterConfig{}
- }
-
- sampler := opts.sampler
- if sampler == nil {
- s, err := c.Sampler.NewSampler(c.ServiceName, tracerMetrics)
- if err != nil {
- return nil, nil, err
- }
- sampler = s
- }
-
- reporter := opts.reporter
- if reporter == nil {
- r, err := c.Reporter.NewReporter(c.ServiceName, tracerMetrics, opts.logger)
- if err != nil {
- return nil, nil, err
- }
- reporter = r
- }
-
- tracerOptions := []jaeger.TracerOption{
- jaeger.TracerOptions.Metrics(tracerMetrics),
- jaeger.TracerOptions.Logger(opts.logger),
- jaeger.TracerOptions.CustomHeaderKeys(c.Headers),
- jaeger.TracerOptions.Gen128Bit(opts.gen128Bit),
- jaeger.TracerOptions.PoolSpans(opts.poolSpans),
- jaeger.TracerOptions.ZipkinSharedRPCSpan(opts.zipkinSharedRPCSpan),
- jaeger.TracerOptions.MaxTagValueLength(opts.maxTagValueLength),
- jaeger.TracerOptions.NoDebugFlagOnForcedSampling(opts.noDebugFlagOnForcedSampling),
- }
-
- for _, tag := range opts.tags {
- tracerOptions = append(tracerOptions, jaeger.TracerOptions.Tag(tag.Key, tag.Value))
- }
-
- for _, tag := range c.Tags {
- tracerOptions = append(tracerOptions, jaeger.TracerOptions.Tag(tag.Key, tag.Value))
- }
-
- for _, obs := range opts.observers {
- tracerOptions = append(tracerOptions, jaeger.TracerOptions.Observer(obs))
- }
-
- for _, cobs := range opts.contribObservers {
- tracerOptions = append(tracerOptions, jaeger.TracerOptions.ContribObserver(cobs))
- }
-
- for format, injector := range opts.injectors {
- tracerOptions = append(tracerOptions, jaeger.TracerOptions.Injector(format, injector))
- }
-
- for format, extractor := range opts.extractors {
- tracerOptions = append(tracerOptions, jaeger.TracerOptions.Extractor(format, extractor))
- }
-
- if c.BaggageRestrictions != nil {
- mgr := remote.NewRestrictionManager(
- c.ServiceName,
- remote.Options.Metrics(tracerMetrics),
- remote.Options.Logger(opts.logger),
- remote.Options.HostPort(c.BaggageRestrictions.HostPort),
- remote.Options.RefreshInterval(c.BaggageRestrictions.RefreshInterval),
- remote.Options.DenyBaggageOnInitializationFailure(
- c.BaggageRestrictions.DenyBaggageOnInitializationFailure,
- ),
- )
- tracerOptions = append(tracerOptions, jaeger.TracerOptions.BaggageRestrictionManager(mgr))
- }
-
- if c.Throttler != nil {
- debugThrottler := throttler.NewThrottler(
- c.ServiceName,
- throttler.Options.Metrics(tracerMetrics),
- throttler.Options.Logger(opts.logger),
- throttler.Options.HostPort(c.Throttler.HostPort),
- throttler.Options.RefreshInterval(c.Throttler.RefreshInterval),
- throttler.Options.SynchronousInitialization(
- c.Throttler.SynchronousInitialization,
- ),
- )
-
- tracerOptions = append(tracerOptions, jaeger.TracerOptions.DebugThrottler(debugThrottler))
- }
-
- tracer, closer := jaeger.NewTracer(
- c.ServiceName,
- sampler,
- reporter,
- tracerOptions...,
- )
-
- return tracer, closer, nil
-}
-
-// InitGlobalTracer creates a new Jaeger Tracer, and sets it as global OpenTracing Tracer.
-// It returns a closer func that can be used to flush buffers before shutdown.
-func (c Configuration) InitGlobalTracer(
- serviceName string,
- options ...Option,
-) (io.Closer, error) {
- if c.Disabled {
- return &nullCloser{}, nil
- }
- tracer, closer, err := c.New(serviceName, options...)
- if err != nil {
- return nil, err
- }
- opentracing.SetGlobalTracer(tracer)
- return closer, nil
-}
-
-// NewSampler creates a new sampler based on the configuration
-func (sc *SamplerConfig) NewSampler(
- serviceName string,
- metrics *jaeger.Metrics,
-) (jaeger.Sampler, error) {
- samplerType := strings.ToLower(sc.Type)
- if samplerType == jaeger.SamplerTypeConst {
- return jaeger.NewConstSampler(sc.Param != 0), nil
- }
- if samplerType == jaeger.SamplerTypeProbabilistic {
- if sc.Param >= 0 && sc.Param <= 1.0 {
- return jaeger.NewProbabilisticSampler(sc.Param)
- }
- return nil, fmt.Errorf(
- "invalid Param for probabilistic sampler; expecting value between 0 and 1, received %v",
- sc.Param,
- )
- }
- if samplerType == jaeger.SamplerTypeRateLimiting {
- return jaeger.NewRateLimitingSampler(sc.Param), nil
- }
- if samplerType == jaeger.SamplerTypeRemote || sc.Type == "" {
- sc2 := *sc
- sc2.Type = jaeger.SamplerTypeProbabilistic
- initSampler, err := sc2.NewSampler(serviceName, nil)
- if err != nil {
- return nil, err
- }
- options := []jaeger.SamplerOption{
- jaeger.SamplerOptions.Metrics(metrics),
- jaeger.SamplerOptions.InitialSampler(initSampler),
- jaeger.SamplerOptions.SamplingServerURL(sc.SamplingServerURL),
- jaeger.SamplerOptions.MaxOperations(sc.MaxOperations),
- jaeger.SamplerOptions.OperationNameLateBinding(sc.OperationNameLateBinding),
- jaeger.SamplerOptions.SamplingRefreshInterval(sc.SamplingRefreshInterval),
- }
- options = append(options, sc.Options...)
- return jaeger.NewRemotelyControlledSampler(serviceName, options...), nil
- }
- return nil, fmt.Errorf("unknown sampler type (%s)", sc.Type)
-}
-
-// NewReporter instantiates a new reporter that submits spans to the collector
-func (rc *ReporterConfig) NewReporter(
- serviceName string,
- metrics *jaeger.Metrics,
- logger jaeger.Logger,
-) (jaeger.Reporter, error) {
- sender, err := rc.newTransport(logger)
- if err != nil {
- return nil, err
- }
- reporter := jaeger.NewRemoteReporter(
- sender,
- jaeger.ReporterOptions.QueueSize(rc.QueueSize),
- jaeger.ReporterOptions.BufferFlushInterval(rc.BufferFlushInterval),
- jaeger.ReporterOptions.Logger(logger),
- jaeger.ReporterOptions.Metrics(metrics))
- if rc.LogSpans && logger != nil {
- logger.Infof("Initializing logging reporter\n")
- reporter = jaeger.NewCompositeReporter(jaeger.NewLoggingReporter(logger), reporter)
- }
- return reporter, err
-}
-
-func (rc *ReporterConfig) newTransport(logger jaeger.Logger) (jaeger.Transport, error) {
- switch {
- case rc.CollectorEndpoint != "":
- httpOptions := []transport.HTTPOption{transport.HTTPBatchSize(1), transport.HTTPHeaders(rc.HTTPHeaders)}
- if rc.User != "" && rc.Password != "" {
- httpOptions = append(httpOptions, transport.HTTPBasicAuth(rc.User, rc.Password))
- }
- return transport.NewHTTPTransport(rc.CollectorEndpoint, httpOptions...), nil
- default:
- return jaeger.NewUDPTransportWithParams(jaeger.UDPTransportParams{
- AgentClientUDPParams: utils.AgentClientUDPParams{
- HostPort: rc.LocalAgentHostPort,
- Logger: logger,
- DisableAttemptReconnecting: rc.DisableAttemptReconnecting,
- AttemptReconnectInterval: rc.AttemptReconnectInterval,
- },
- })
- }
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/config/config_env.go b/vendor/github.com/uber/jaeger-client-go/config/config_env.go
deleted file mode 100644
index 92d60cd59..000000000
--- a/vendor/github.com/uber/jaeger-client-go/config/config_env.go
+++ /dev/null
@@ -1,259 +0,0 @@
-// Copyright (c) 2018 The Jaeger Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package config
-
-import (
- "fmt"
- "net/url"
- "os"
- "strconv"
- "strings"
- "time"
-
- "github.com/opentracing/opentracing-go"
- "github.com/pkg/errors"
- "github.com/uber/jaeger-client-go"
-)
-
-const (
- // environment variable names
- envServiceName = "JAEGER_SERVICE_NAME"
- envDisabled = "JAEGER_DISABLED"
- envRPCMetrics = "JAEGER_RPC_METRICS"
- envTags = "JAEGER_TAGS"
- envSamplerType = "JAEGER_SAMPLER_TYPE"
- envSamplerParam = "JAEGER_SAMPLER_PARAM"
- envSamplerManagerHostPort = "JAEGER_SAMPLER_MANAGER_HOST_PORT" // Deprecated by envSamplingEndpoint
- envSamplingEndpoint = "JAEGER_SAMPLING_ENDPOINT"
- envSamplerMaxOperations = "JAEGER_SAMPLER_MAX_OPERATIONS"
- envSamplerRefreshInterval = "JAEGER_SAMPLER_REFRESH_INTERVAL"
- envReporterMaxQueueSize = "JAEGER_REPORTER_MAX_QUEUE_SIZE"
- envReporterFlushInterval = "JAEGER_REPORTER_FLUSH_INTERVAL"
- envReporterLogSpans = "JAEGER_REPORTER_LOG_SPANS"
- envReporterAttemptReconnectingDisabled = "JAEGER_REPORTER_ATTEMPT_RECONNECTING_DISABLED"
- envReporterAttemptReconnectInterval = "JAEGER_REPORTER_ATTEMPT_RECONNECT_INTERVAL"
- envEndpoint = "JAEGER_ENDPOINT"
- envUser = "JAEGER_USER"
- envPassword = "JAEGER_PASSWORD"
- envAgentHost = "JAEGER_AGENT_HOST"
- envAgentPort = "JAEGER_AGENT_PORT"
-)
-
-// FromEnv uses environment variables to set the tracer's Configuration
-func FromEnv() (*Configuration, error) {
- c := &Configuration{}
- return c.FromEnv()
-}
-
-// FromEnv uses environment variables and overrides existing tracer's Configuration
-func (c *Configuration) FromEnv() (*Configuration, error) {
- if e := os.Getenv(envServiceName); e != "" {
- c.ServiceName = e
- }
-
- if e := os.Getenv(envRPCMetrics); e != "" {
- if value, err := strconv.ParseBool(e); err == nil {
- c.RPCMetrics = value
- } else {
- return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envRPCMetrics, e)
- }
- }
-
- if e := os.Getenv(envDisabled); e != "" {
- if value, err := strconv.ParseBool(e); err == nil {
- c.Disabled = value
- } else {
- return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envDisabled, e)
- }
- }
-
- if e := os.Getenv(envTags); e != "" {
- c.Tags = parseTags(e)
- }
-
- if c.Sampler == nil {
- c.Sampler = &SamplerConfig{}
- }
-
- if s, err := c.Sampler.samplerConfigFromEnv(); err == nil {
- c.Sampler = s
- } else {
- return nil, errors.Wrap(err, "cannot obtain sampler config from env")
- }
-
- if c.Reporter == nil {
- c.Reporter = &ReporterConfig{}
- }
-
- if r, err := c.Reporter.reporterConfigFromEnv(); err == nil {
- c.Reporter = r
- } else {
- return nil, errors.Wrap(err, "cannot obtain reporter config from env")
- }
-
- return c, nil
-}
-
-// samplerConfigFromEnv creates a new SamplerConfig based on the environment variables
-func (sc *SamplerConfig) samplerConfigFromEnv() (*SamplerConfig, error) {
- if e := os.Getenv(envSamplerType); e != "" {
- sc.Type = e
- }
-
- if e := os.Getenv(envSamplerParam); e != "" {
- if value, err := strconv.ParseFloat(e, 64); err == nil {
- sc.Param = value
- } else {
- return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envSamplerParam, e)
- }
- }
-
- if e := os.Getenv(envSamplingEndpoint); e != "" {
- sc.SamplingServerURL = e
- } else if e := os.Getenv(envSamplerManagerHostPort); e != "" {
- sc.SamplingServerURL = e
- } else if e := os.Getenv(envAgentHost); e != "" {
- // Fallback if we know the agent host - try the sampling endpoint there
- sc.SamplingServerURL = fmt.Sprintf("http://%s:%d/sampling", e, jaeger.DefaultSamplingServerPort)
- }
-
- if e := os.Getenv(envSamplerMaxOperations); e != "" {
- if value, err := strconv.ParseInt(e, 10, 0); err == nil {
- sc.MaxOperations = int(value)
- } else {
- return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envSamplerMaxOperations, e)
- }
- }
-
- if e := os.Getenv(envSamplerRefreshInterval); e != "" {
- if value, err := time.ParseDuration(e); err == nil {
- sc.SamplingRefreshInterval = value
- } else {
- return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envSamplerRefreshInterval, e)
- }
- }
-
- return sc, nil
-}
-
-// reporterConfigFromEnv creates a new ReporterConfig based on the environment variables
-func (rc *ReporterConfig) reporterConfigFromEnv() (*ReporterConfig, error) {
- if e := os.Getenv(envReporterMaxQueueSize); e != "" {
- if value, err := strconv.ParseInt(e, 10, 0); err == nil {
- rc.QueueSize = int(value)
- } else {
- return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterMaxQueueSize, e)
- }
- }
-
- if e := os.Getenv(envReporterFlushInterval); e != "" {
- if value, err := time.ParseDuration(e); err == nil {
- rc.BufferFlushInterval = value
- } else {
- return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterFlushInterval, e)
- }
- }
-
- if e := os.Getenv(envReporterLogSpans); e != "" {
- if value, err := strconv.ParseBool(e); err == nil {
- rc.LogSpans = value
- } else {
- return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterLogSpans, e)
- }
- }
-
- if e := os.Getenv(envEndpoint); e != "" {
- u, err := url.ParseRequestURI(e)
- if err != nil {
- return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envEndpoint, e)
- }
- rc.CollectorEndpoint = u.String()
- user := os.Getenv(envUser)
- pswd := os.Getenv(envPassword)
- if user != "" && pswd == "" || user == "" && pswd != "" {
- return nil, errors.Errorf("you must set %s and %s env vars together", envUser, envPassword)
- }
- rc.User = user
- rc.Password = pswd
- } else {
- useEnv := false
- host := jaeger.DefaultUDPSpanServerHost
- if e := os.Getenv(envAgentHost); e != "" {
- host = e
- useEnv = true
- }
-
- port := jaeger.DefaultUDPSpanServerPort
- if e := os.Getenv(envAgentPort); e != "" {
- if value, err := strconv.ParseInt(e, 10, 0); err == nil {
- port = int(value)
- useEnv = true
- } else {
- return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envAgentPort, e)
- }
- }
- if useEnv || rc.LocalAgentHostPort == "" {
- rc.LocalAgentHostPort = fmt.Sprintf("%s:%d", host, port)
- }
-
- if e := os.Getenv(envReporterAttemptReconnectingDisabled); e != "" {
- if value, err := strconv.ParseBool(e); err == nil {
- rc.DisableAttemptReconnecting = value
- } else {
- return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterAttemptReconnectingDisabled, e)
- }
- }
-
- if !rc.DisableAttemptReconnecting {
- if e := os.Getenv(envReporterAttemptReconnectInterval); e != "" {
- if value, err := time.ParseDuration(e); err == nil {
- rc.AttemptReconnectInterval = value
- } else {
- return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterAttemptReconnectInterval, e)
- }
- }
- }
- }
-
- return rc, nil
-}
-
-// parseTags parses the given string into a collection of Tags.
-// Spec for this value:
-// - comma separated list of key=value
-// - value can be specified using the notation ${envVar:defaultValue}, where `envVar`
-// is an environment variable and `defaultValue` is the value to use in case the env var is not set
-func parseTags(sTags string) []opentracing.Tag {
- pairs := strings.Split(sTags, ",")
- tags := make([]opentracing.Tag, 0)
- for _, p := range pairs {
- kv := strings.SplitN(p, "=", 2)
- k, v := strings.TrimSpace(kv[0]), strings.TrimSpace(kv[1])
-
- if strings.HasPrefix(v, "${") && strings.HasSuffix(v, "}") {
- ed := strings.SplitN(v[2:len(v)-1], ":", 2)
- e, d := ed[0], ed[1]
- v = os.Getenv(e)
- if v == "" && d != "" {
- v = d
- }
- }
-
- tag := opentracing.Tag{Key: k, Value: v}
- tags = append(tags, tag)
- }
-
- return tags
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/config/options.go b/vendor/github.com/uber/jaeger-client-go/config/options.go
deleted file mode 100644
index e0e50e834..000000000
--- a/vendor/github.com/uber/jaeger-client-go/config/options.go
+++ /dev/null
@@ -1,165 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package config
-
-import (
- opentracing "github.com/opentracing/opentracing-go"
- "github.com/uber/jaeger-lib/metrics"
-
- "github.com/uber/jaeger-client-go"
-)
-
-// Option is a function that sets some option on the client.
-type Option func(c *Options)
-
-// Options control behavior of the client.
-type Options struct {
- metrics metrics.Factory
- logger jaeger.Logger
- reporter jaeger.Reporter
- sampler jaeger.Sampler
- contribObservers []jaeger.ContribObserver
- observers []jaeger.Observer
- gen128Bit bool
- poolSpans bool
- zipkinSharedRPCSpan bool
- maxTagValueLength int
- noDebugFlagOnForcedSampling bool
- tags []opentracing.Tag
- injectors map[interface{}]jaeger.Injector
- extractors map[interface{}]jaeger.Extractor
-}
-
-// Metrics creates an Option that initializes Metrics in the tracer,
-// which is used to emit statistics about spans.
-func Metrics(factory metrics.Factory) Option {
- return func(c *Options) {
- c.metrics = factory
- }
-}
-
-// Logger can be provided to log Reporter errors, as well as to log spans
-// if Reporter.LogSpans is set to true.
-func Logger(logger jaeger.Logger) Option {
- return func(c *Options) {
- c.logger = logger
- }
-}
-
-// Reporter can be provided explicitly to override the configuration.
-// Useful for testing, e.g. by passing InMemoryReporter.
-func Reporter(reporter jaeger.Reporter) Option {
- return func(c *Options) {
- c.reporter = reporter
- }
-}
-
-// Sampler can be provided explicitly to override the configuration.
-func Sampler(sampler jaeger.Sampler) Option {
- return func(c *Options) {
- c.sampler = sampler
- }
-}
-
-// Observer can be registered with the Tracer to receive notifications about new Spans.
-func Observer(observer jaeger.Observer) Option {
- return func(c *Options) {
- c.observers = append(c.observers, observer)
- }
-}
-
-// ContribObserver can be registered with the Tracer to receive notifications
-// about new spans.
-func ContribObserver(observer jaeger.ContribObserver) Option {
- return func(c *Options) {
- c.contribObservers = append(c.contribObservers, observer)
- }
-}
-
-// Gen128Bit specifies whether to generate 128bit trace IDs.
-func Gen128Bit(gen128Bit bool) Option {
- return func(c *Options) {
- c.gen128Bit = gen128Bit
- }
-}
-
-// PoolSpans specifies whether to pool spans
-func PoolSpans(poolSpans bool) Option {
- return func(c *Options) {
- c.poolSpans = poolSpans
- }
-}
-
-// ZipkinSharedRPCSpan creates an option that enables sharing span ID between client
-// and server spans a la zipkin. If false, client and server spans will be assigned
-// different IDs.
-func ZipkinSharedRPCSpan(zipkinSharedRPCSpan bool) Option {
- return func(c *Options) {
- c.zipkinSharedRPCSpan = zipkinSharedRPCSpan
- }
-}
-
-// MaxTagValueLength can be provided to override the default max tag value length.
-func MaxTagValueLength(maxTagValueLength int) Option {
- return func(c *Options) {
- c.maxTagValueLength = maxTagValueLength
- }
-}
-
-// NoDebugFlagOnForcedSampling can be used to decide whether debug flag will be set or not
-// when calling span.setSamplingPriority to force sample a span.
-func NoDebugFlagOnForcedSampling(noDebugFlagOnForcedSampling bool) Option {
- return func(c *Options) {
- c.noDebugFlagOnForcedSampling = noDebugFlagOnForcedSampling
- }
-}
-
-// Tag creates an option that adds a tracer-level tag.
-func Tag(key string, value interface{}) Option {
- return func(c *Options) {
- c.tags = append(c.tags, opentracing.Tag{Key: key, Value: value})
- }
-}
-
-// Injector registers an Injector with the given format.
-func Injector(format interface{}, injector jaeger.Injector) Option {
- return func(c *Options) {
- c.injectors[format] = injector
- }
-}
-
-// Extractor registers an Extractor with the given format.
-func Extractor(format interface{}, extractor jaeger.Extractor) Option {
- return func(c *Options) {
- c.extractors[format] = extractor
- }
-}
-
-func applyOptions(options ...Option) Options {
- opts := Options{
- injectors: make(map[interface{}]jaeger.Injector),
- extractors: make(map[interface{}]jaeger.Extractor),
- }
- for _, option := range options {
- option(&opts)
- }
- if opts.metrics == nil {
- opts.metrics = metrics.NullFactory
- }
- if opts.logger == nil {
- opts.logger = jaeger.NullLogger
- }
- return opts
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/constants.go b/vendor/github.com/uber/jaeger-client-go/constants.go
deleted file mode 100644
index 2f63d5909..000000000
--- a/vendor/github.com/uber/jaeger-client-go/constants.go
+++ /dev/null
@@ -1,106 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-import (
- "fmt"
-
- "github.com/opentracing/opentracing-go"
-)
-
-const (
- // JaegerClientVersion is the version of the client library reported as Span tag.
- JaegerClientVersion = "Go-2.25.0"
-
- // JaegerClientVersionTagKey is the name of the tag used to report client version.
- JaegerClientVersionTagKey = "jaeger.version"
-
- // JaegerDebugHeader is the name of HTTP header or a TextMap carrier key which,
- // if found in the carrier, forces the trace to be sampled as "debug" trace.
- // The value of the header is recorded as the tag on the root span, so that the
- // trace can be found in the UI using this value as a correlation ID.
- JaegerDebugHeader = "jaeger-debug-id"
-
- // JaegerBaggageHeader is the name of the HTTP header that is used to submit baggage.
- // It differs from TraceBaggageHeaderPrefix in that it can be used only in cases where
- // a root span does not exist.
- JaegerBaggageHeader = "jaeger-baggage"
-
- // TracerHostnameTagKey used to report host name of the process.
- TracerHostnameTagKey = "hostname"
-
- // TracerIPTagKey used to report ip of the process.
- TracerIPTagKey = "ip"
-
- // TracerUUIDTagKey used to report UUID of the client process.
- TracerUUIDTagKey = "client-uuid"
-
- // SamplerTypeTagKey reports which sampler was used on the root span.
- SamplerTypeTagKey = "sampler.type"
-
- // SamplerParamTagKey reports the parameter of the sampler, like sampling probability.
- SamplerParamTagKey = "sampler.param"
-
- // TraceContextHeaderName is the http header name used to propagate tracing context.
- // This must be in lower-case to avoid mismatches when decoding incoming headers.
- TraceContextHeaderName = "uber-trace-id"
-
- // TracerStateHeaderName is deprecated.
- // Deprecated: use TraceContextHeaderName
- TracerStateHeaderName = TraceContextHeaderName
-
- // TraceBaggageHeaderPrefix is the prefix for http headers used to propagate baggage.
- // This must be in lower-case to avoid mismatches when decoding incoming headers.
- TraceBaggageHeaderPrefix = "uberctx-"
-
- // SamplerTypeConst is the type of sampler that always makes the same decision.
- SamplerTypeConst = "const"
-
- // SamplerTypeRemote is the type of sampler that polls Jaeger agent for sampling strategy.
- SamplerTypeRemote = "remote"
-
- // SamplerTypeProbabilistic is the type of sampler that samples traces
- // with a certain fixed probability.
- SamplerTypeProbabilistic = "probabilistic"
-
- // SamplerTypeRateLimiting is the type of sampler that samples
- // only up to a fixed number of traces per second.
- SamplerTypeRateLimiting = "ratelimiting"
-
- // SamplerTypeLowerBound is the type of sampler that samples
- // at least a fixed number of traces per second.
- SamplerTypeLowerBound = "lowerbound"
-
- // DefaultUDPSpanServerHost is the default host to send the spans to, via UDP
- DefaultUDPSpanServerHost = "localhost"
-
- // DefaultUDPSpanServerPort is the default port to send the spans to, via UDP
- DefaultUDPSpanServerPort = 6831
-
- // DefaultSamplingServerPort is the default port to fetch sampling config from, via http
- DefaultSamplingServerPort = 5778
-
- // DefaultMaxTagValueLength is the default max length of byte array or string allowed in the tag value.
- DefaultMaxTagValueLength = 256
-
- // SelfRefType is a jaeger specific reference type that supports creating a span
- // with an already defined context.
- selfRefType opentracing.SpanReferenceType = 99
-)
-
-var (
- // DefaultSamplingServerURL is the default url to fetch sampling config from, via http
- DefaultSamplingServerURL = fmt.Sprintf("http://127.0.0.1:%d/sampling", DefaultSamplingServerPort)
-)
diff --git a/vendor/github.com/uber/jaeger-client-go/contrib_observer.go b/vendor/github.com/uber/jaeger-client-go/contrib_observer.go
deleted file mode 100644
index 4ce1881f3..000000000
--- a/vendor/github.com/uber/jaeger-client-go/contrib_observer.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-import (
- opentracing "github.com/opentracing/opentracing-go"
-)
-
-// ContribObserver can be registered with the Tracer to receive notifications
-// about new Spans. Modelled after github.com/opentracing-contrib/go-observer.
-type ContribObserver interface {
- // Create and return a span observer. Called when a span starts.
- // If the Observer is not interested in the given span, it must return (nil, false).
- // E.g :
- // func StartSpan(opName string, opts ...opentracing.StartSpanOption) {
- // var sp opentracing.Span
- // sso := opentracing.StartSpanOptions{}
- // if spanObserver, ok := Observer.OnStartSpan(span, opName, sso); ok {
- // // we have a valid SpanObserver
- // }
- // ...
- // }
- OnStartSpan(sp opentracing.Span, operationName string, options opentracing.StartSpanOptions) (ContribSpanObserver, bool)
-}
-
-// ContribSpanObserver is created by the Observer and receives notifications
-// about other Span events. This interface is meant to match
-// github.com/opentracing-contrib/go-observer, via duck typing, without
-// directly importing the go-observer package.
-type ContribSpanObserver interface {
- OnSetOperationName(operationName string)
- OnSetTag(key string, value interface{})
- OnFinish(options opentracing.FinishOptions)
-}
-
-// wrapper observer for the old observers (see observer.go)
-type oldObserver struct {
- obs Observer
-}
-
-func (o *oldObserver) OnStartSpan(sp opentracing.Span, operationName string, options opentracing.StartSpanOptions) (ContribSpanObserver, bool) {
- spanObserver := o.obs.OnStartSpan(operationName, options)
- return spanObserver, spanObserver != nil
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/doc.go b/vendor/github.com/uber/jaeger-client-go/doc.go
deleted file mode 100644
index 4f5549033..000000000
--- a/vendor/github.com/uber/jaeger-client-go/doc.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-Package jaeger implements an OpenTracing (http://opentracing.io) Tracer.
-It is currently using Zipkin-compatible data model and can be directly
-itegrated with Zipkin backend (http://zipkin.io).
-
-For integration instructions please refer to the README:
-
-https://github.com/uber/jaeger-client-go/blob/master/README.md
-*/
-package jaeger
diff --git a/vendor/github.com/uber/jaeger-client-go/glide.lock b/vendor/github.com/uber/jaeger-client-go/glide.lock
deleted file mode 100644
index f4c05b2db..000000000
--- a/vendor/github.com/uber/jaeger-client-go/glide.lock
+++ /dev/null
@@ -1,98 +0,0 @@
-hash: a4a449cfc060c2d7be850a69b171e4382a3bd00d1a0a72cfc944facc3fe263bf
-updated: 2019-09-23T17:10:15.213856-04:00
-imports:
-- name: github.com/beorn7/perks
- version: 37c8de3658fcb183f997c4e13e8337516ab753e6
- subpackages:
- - quantile
-- name: github.com/codahale/hdrhistogram
- version: 3a0bb77429bd3a61596f5e8a3172445844342120
-- name: github.com/crossdock/crossdock-go
- version: 049aabb0122b03bc9bd30cab8f3f91fb60166361
- subpackages:
- - assert
- - require
-- name: github.com/davecgh/go-spew
- version: d8f796af33cc11cb798c1aaeb27a4ebc5099927d
- subpackages:
- - spew
-- name: github.com/golang/protobuf
- version: 1680a479a2cfb3fa22b972af7e36d0a0fde47bf8
- subpackages:
- - proto
-- name: github.com/matttproud/golang_protobuf_extensions
- version: c182affec369e30f25d3eb8cd8a478dee585ae7d
- subpackages:
- - pbutil
-- name: github.com/opentracing/opentracing-go
- version: 659c90643e714681897ec2521c60567dd21da733
- subpackages:
- - ext
- - harness
- - log
-- name: github.com/pkg/errors
- version: ba968bfe8b2f7e042a574c888954fccecfa385b4
-- name: github.com/pmezard/go-difflib
- version: 5d4384ee4fb2527b0a1256a821ebfc92f91efefc
- subpackages:
- - difflib
-- name: github.com/prometheus/client_golang
- version: 170205fb58decfd011f1550d4cfb737230d7ae4f
- subpackages:
- - prometheus
- - prometheus/internal
-- name: github.com/prometheus/client_model
- version: 14fe0d1b01d4d5fc031dd4bec1823bd3ebbe8016
- subpackages:
- - go
-- name: github.com/prometheus/common
- version: 287d3e634a1e550c9e463dd7e5a75a422c614505
- subpackages:
- - expfmt
- - internal/bitbucket.org/ww/goautoneg
- - model
-- name: github.com/prometheus/procfs
- version: de25ac347ef9305868b04dc42425c973b863b18c
- subpackages:
- - internal/fs
- - internal/util
-- name: github.com/stretchr/testify
- version: 85f2b59c4459e5bf57488796be8c3667cb8246d6
- subpackages:
- - assert
- - require
- - suite
-- name: github.com/uber-go/atomic
- version: df976f2515e274675050de7b3f42545de80594fd
-- name: github.com/uber/jaeger-lib
- version: a87ae9d84fb038a8d79266298970720be7c80fcd
- subpackages:
- - metrics
- - metrics/metricstest
- - metrics/prometheus
-- name: go.uber.org/atomic
- version: df976f2515e274675050de7b3f42545de80594fd
-- name: go.uber.org/multierr
- version: 3c4937480c32f4c13a875a1829af76c98ca3d40a
-- name: go.uber.org/zap
- version: 27376062155ad36be76b0f12cf1572a221d3a48c
- subpackages:
- - buffer
- - internal/bufferpool
- - internal/color
- - internal/exit
- - zapcore
-- name: golang.org/x/net
- version: aa69164e4478b84860dc6769c710c699c67058a3
- subpackages:
- - context
- - context/ctxhttp
-- name: golang.org/x/sys
- version: 0a153f010e6963173baba2306531d173aa843137
- subpackages:
- - windows
-- name: gopkg.in/yaml.v2
- version: 51d6538a90f86fe93ac480b35f37b2be17fef232
-- name: github.com/golang/mock
- version: 3a35fb6e3e18b9dbfee291262260dee7372d2a92
-testImports: []
diff --git a/vendor/github.com/uber/jaeger-client-go/glide.yaml b/vendor/github.com/uber/jaeger-client-go/glide.yaml
deleted file mode 100644
index eb58c67ff..000000000
--- a/vendor/github.com/uber/jaeger-client-go/glide.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-package: github.com/uber/jaeger-client-go
-import:
-- package: github.com/opentracing/opentracing-go
- version: ^1.1
- subpackages:
- - ext
- - log
-- package: github.com/crossdock/crossdock-go
-- package: github.com/uber/jaeger-lib
- version: ^2.0.0
- subpackages:
- - metrics
-- package: github.com/pkg/errors
- version: ~0.8.0
-- package: go.uber.org/zap
- source: https://github.com/uber-go/zap.git
- version: ^1
-- package: github.com/uber-go/atomic
- version: ^1
-- package: github.com/prometheus/client_golang
- version: ^1
-testImport:
-- package: github.com/stretchr/testify
- subpackages:
- - assert
- - require
- - suite
-- package: github.com/golang/mock
diff --git a/vendor/github.com/uber/jaeger-client-go/header.go b/vendor/github.com/uber/jaeger-client-go/header.go
deleted file mode 100644
index 5da70351d..000000000
--- a/vendor/github.com/uber/jaeger-client-go/header.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-// HeadersConfig contains the values for the header keys that Jaeger will use.
-// These values may be either custom or default depending on whether custom
-// values were provided via a configuration.
-type HeadersConfig struct {
- // JaegerDebugHeader is the name of HTTP header or a TextMap carrier key which,
- // if found in the carrier, forces the trace to be sampled as "debug" trace.
- // The value of the header is recorded as the tag on the root span, so that the
- // trace can be found in the UI using this value as a correlation ID.
- JaegerDebugHeader string `yaml:"jaegerDebugHeader"`
-
- // JaegerBaggageHeader is the name of the HTTP header that is used to submit baggage.
- // It differs from TraceBaggageHeaderPrefix in that it can be used only in cases where
- // a root span does not exist.
- JaegerBaggageHeader string `yaml:"jaegerBaggageHeader"`
-
- // TraceContextHeaderName is the http header name used to propagate tracing context.
- // This must be in lower-case to avoid mismatches when decoding incoming headers.
- TraceContextHeaderName string `yaml:"TraceContextHeaderName"`
-
- // TraceBaggageHeaderPrefix is the prefix for http headers used to propagate baggage.
- // This must be in lower-case to avoid mismatches when decoding incoming headers.
- TraceBaggageHeaderPrefix string `yaml:"traceBaggageHeaderPrefix"`
-}
-
-// ApplyDefaults sets missing configuration keys to default values
-func (c *HeadersConfig) ApplyDefaults() *HeadersConfig {
- if c.JaegerBaggageHeader == "" {
- c.JaegerBaggageHeader = JaegerBaggageHeader
- }
- if c.JaegerDebugHeader == "" {
- c.JaegerDebugHeader = JaegerDebugHeader
- }
- if c.TraceBaggageHeaderPrefix == "" {
- c.TraceBaggageHeaderPrefix = TraceBaggageHeaderPrefix
- }
- if c.TraceContextHeaderName == "" {
- c.TraceContextHeaderName = TraceContextHeaderName
- }
- return c
-}
-
-func getDefaultHeadersConfig() *HeadersConfig {
- return &HeadersConfig{
- JaegerDebugHeader: JaegerDebugHeader,
- JaegerBaggageHeader: JaegerBaggageHeader,
- TraceContextHeaderName: TraceContextHeaderName,
- TraceBaggageHeaderPrefix: TraceBaggageHeaderPrefix,
- }
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/options.go b/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/options.go
deleted file mode 100644
index 745729319..000000000
--- a/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/options.go
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package remote
-
-import (
- "time"
-
- "github.com/uber/jaeger-client-go"
-)
-
-const (
- defaultMaxValueLength = 2048
- defaultRefreshInterval = time.Minute
- defaultHostPort = "localhost:5778"
-)
-
-// Option is a function that sets some option on the RestrictionManager
-type Option func(options *options)
-
-// Options is a factory for all available options
-var Options options
-
-type options struct {
- denyBaggageOnInitializationFailure bool
- metrics *jaeger.Metrics
- logger jaeger.Logger
- hostPort string
- refreshInterval time.Duration
-}
-
-// DenyBaggageOnInitializationFailure creates an Option that determines the startup failure mode of RestrictionManager.
-// If DenyBaggageOnInitializationFailure is true, RestrictionManager will not allow any baggage to be written until baggage
-// restrictions have been retrieved from agent.
-// If DenyBaggageOnInitializationFailure is false, RestrictionManager will allow any baggage to be written until baggage
-// restrictions have been retrieved from agent.
-func (options) DenyBaggageOnInitializationFailure(b bool) Option {
- return func(o *options) {
- o.denyBaggageOnInitializationFailure = b
- }
-}
-
-// Metrics creates an Option that initializes Metrics on the RestrictionManager, which is used to emit statistics.
-func (options) Metrics(m *jaeger.Metrics) Option {
- return func(o *options) {
- o.metrics = m
- }
-}
-
-// Logger creates an Option that sets the logger used by the RestrictionManager.
-func (options) Logger(logger jaeger.Logger) Option {
- return func(o *options) {
- o.logger = logger
- }
-}
-
-// HostPort creates an Option that sets the hostPort of the local agent that contains the baggage restrictions.
-func (options) HostPort(hostPort string) Option {
- return func(o *options) {
- o.hostPort = hostPort
- }
-}
-
-// RefreshInterval creates an Option that sets how often the RestrictionManager will poll local agent for
-// the baggage restrictions.
-func (options) RefreshInterval(refreshInterval time.Duration) Option {
- return func(o *options) {
- o.refreshInterval = refreshInterval
- }
-}
-
-func applyOptions(o ...Option) options {
- opts := options{}
- for _, option := range o {
- option(&opts)
- }
- if opts.metrics == nil {
- opts.metrics = jaeger.NewNullMetrics()
- }
- if opts.logger == nil {
- opts.logger = jaeger.NullLogger
- }
- if opts.hostPort == "" {
- opts.hostPort = defaultHostPort
- }
- if opts.refreshInterval == 0 {
- opts.refreshInterval = defaultRefreshInterval
- }
- return opts
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/restriction_manager.go b/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/restriction_manager.go
deleted file mode 100644
index a56515aca..000000000
--- a/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/restriction_manager.go
+++ /dev/null
@@ -1,157 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package remote
-
-import (
- "fmt"
- "net/url"
- "sync"
- "time"
-
- "github.com/uber/jaeger-client-go/internal/baggage"
- thrift "github.com/uber/jaeger-client-go/thrift-gen/baggage"
- "github.com/uber/jaeger-client-go/utils"
-)
-
-type httpBaggageRestrictionManagerProxy struct {
- url string
-}
-
-func newHTTPBaggageRestrictionManagerProxy(hostPort, serviceName string) *httpBaggageRestrictionManagerProxy {
- v := url.Values{}
- v.Set("service", serviceName)
- return &httpBaggageRestrictionManagerProxy{
- url: fmt.Sprintf("http://%s/baggageRestrictions?%s", hostPort, v.Encode()),
- }
-}
-
-func (s *httpBaggageRestrictionManagerProxy) GetBaggageRestrictions(serviceName string) ([]*thrift.BaggageRestriction, error) {
- var out []*thrift.BaggageRestriction
- if err := utils.GetJSON(s.url, &out); err != nil {
- return nil, err
- }
- return out, nil
-}
-
-// RestrictionManager manages baggage restrictions by retrieving baggage restrictions from agent
-type RestrictionManager struct {
- options
-
- mux sync.RWMutex
- serviceName string
- restrictions map[string]*baggage.Restriction
- thriftProxy thrift.BaggageRestrictionManager
- pollStopped sync.WaitGroup
- stopPoll chan struct{}
- invalidRestriction *baggage.Restriction
- validRestriction *baggage.Restriction
-
- // Determines if the manager has successfully retrieved baggage restrictions from agent
- initialized bool
-}
-
-// NewRestrictionManager returns a BaggageRestrictionManager that polls the agent for the latest
-// baggage restrictions.
-func NewRestrictionManager(serviceName string, options ...Option) *RestrictionManager {
- // TODO there is a developing use case where a single tracer can generate traces on behalf of many services.
- // restrictionsMap will need to exist per service
- opts := applyOptions(options...)
- m := &RestrictionManager{
- serviceName: serviceName,
- options: opts,
- restrictions: make(map[string]*baggage.Restriction),
- thriftProxy: newHTTPBaggageRestrictionManagerProxy(opts.hostPort, serviceName),
- stopPoll: make(chan struct{}),
- invalidRestriction: baggage.NewRestriction(false, 0),
- validRestriction: baggage.NewRestriction(true, defaultMaxValueLength),
- }
- m.pollStopped.Add(1)
- go m.pollManager()
- return m
-}
-
-// isReady returns true if the manager has retrieved baggage restrictions from the remote source.
-func (m *RestrictionManager) isReady() bool {
- m.mux.RLock()
- defer m.mux.RUnlock()
- return m.initialized
-}
-
-// GetRestriction implements RestrictionManager#GetRestriction.
-func (m *RestrictionManager) GetRestriction(service, key string) *baggage.Restriction {
- m.mux.RLock()
- defer m.mux.RUnlock()
- if !m.initialized {
- if m.denyBaggageOnInitializationFailure {
- return m.invalidRestriction
- }
- return m.validRestriction
- }
- if restriction, ok := m.restrictions[key]; ok {
- return restriction
- }
- return m.invalidRestriction
-}
-
-// Close stops remote polling and closes the RemoteRestrictionManager.
-func (m *RestrictionManager) Close() error {
- close(m.stopPoll)
- m.pollStopped.Wait()
- return nil
-}
-
-func (m *RestrictionManager) pollManager() {
- defer m.pollStopped.Done()
- // attempt to initialize baggage restrictions
- if err := m.updateRestrictions(); err != nil {
- m.logger.Error(fmt.Sprintf("Failed to initialize baggage restrictions: %s", err.Error()))
- }
- ticker := time.NewTicker(m.refreshInterval)
- defer ticker.Stop()
-
- for {
- select {
- case <-ticker.C:
- if err := m.updateRestrictions(); err != nil {
- m.logger.Error(fmt.Sprintf("Failed to update baggage restrictions: %s", err.Error()))
- }
- case <-m.stopPoll:
- return
- }
- }
-}
-
-func (m *RestrictionManager) updateRestrictions() error {
- restrictions, err := m.thriftProxy.GetBaggageRestrictions(m.serviceName)
- if err != nil {
- m.metrics.BaggageRestrictionsUpdateFailure.Inc(1)
- return err
- }
- newRestrictions := m.parseRestrictions(restrictions)
- m.metrics.BaggageRestrictionsUpdateSuccess.Inc(1)
- m.mux.Lock()
- defer m.mux.Unlock()
- m.initialized = true
- m.restrictions = newRestrictions
- return nil
-}
-
-func (m *RestrictionManager) parseRestrictions(restrictions []*thrift.BaggageRestriction) map[string]*baggage.Restriction {
- setters := make(map[string]*baggage.Restriction, len(restrictions))
- for _, restriction := range restrictions {
- setters[restriction.BaggageKey] = baggage.NewRestriction(true, int(restriction.MaxValueLength))
- }
- return setters
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/internal/baggage/restriction_manager.go b/vendor/github.com/uber/jaeger-client-go/internal/baggage/restriction_manager.go
deleted file mode 100644
index c16a5c566..000000000
--- a/vendor/github.com/uber/jaeger-client-go/internal/baggage/restriction_manager.go
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package baggage
-
-const (
- defaultMaxValueLength = 2048
-)
-
-// Restriction determines whether a baggage key is allowed and contains any restrictions on the baggage value.
-type Restriction struct {
- keyAllowed bool
- maxValueLength int
-}
-
-// NewRestriction returns a new Restriction.
-func NewRestriction(keyAllowed bool, maxValueLength int) *Restriction {
- return &Restriction{
- keyAllowed: keyAllowed,
- maxValueLength: maxValueLength,
- }
-}
-
-// KeyAllowed returns whether the baggage key for this restriction is allowed.
-func (r *Restriction) KeyAllowed() bool {
- return r.keyAllowed
-}
-
-// MaxValueLength returns the max length for the baggage value.
-func (r *Restriction) MaxValueLength() int {
- return r.maxValueLength
-}
-
-// RestrictionManager keeps track of valid baggage keys and their restrictions. The manager
-// will return a Restriction for a specific baggage key which will determine whether the baggage
-// key is allowed for the current service and any other applicable restrictions on the baggage
-// value.
-type RestrictionManager interface {
- GetRestriction(service, key string) *Restriction
-}
-
-// DefaultRestrictionManager allows any baggage key.
-type DefaultRestrictionManager struct {
- defaultRestriction *Restriction
-}
-
-// NewDefaultRestrictionManager returns a DefaultRestrictionManager.
-func NewDefaultRestrictionManager(maxValueLength int) *DefaultRestrictionManager {
- if maxValueLength == 0 {
- maxValueLength = defaultMaxValueLength
- }
- return &DefaultRestrictionManager{
- defaultRestriction: &Restriction{keyAllowed: true, maxValueLength: maxValueLength},
- }
-}
-
-// GetRestriction implements RestrictionManager#GetRestriction.
-func (m *DefaultRestrictionManager) GetRestriction(service, key string) *Restriction {
- return m.defaultRestriction
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/internal/reporterstats/stats.go b/vendor/github.com/uber/jaeger-client-go/internal/reporterstats/stats.go
deleted file mode 100644
index fe0bef268..000000000
--- a/vendor/github.com/uber/jaeger-client-go/internal/reporterstats/stats.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright (c) 2020 The Jaeger Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package reporterstats
-
-// ReporterStats exposes some metrics from the RemoteReporter.
-type ReporterStats interface {
- SpansDroppedFromQueue() int64
-}
-
-// Receiver can be implemented by a Transport to be given ReporterStats.
-type Receiver interface {
- SetReporterStats(ReporterStats)
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/internal/spanlog/json.go b/vendor/github.com/uber/jaeger-client-go/internal/spanlog/json.go
deleted file mode 100644
index 0e10b8a5a..000000000
--- a/vendor/github.com/uber/jaeger-client-go/internal/spanlog/json.go
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package spanlog
-
-import (
- "encoding/json"
- "fmt"
-
- "github.com/opentracing/opentracing-go/log"
-)
-
-type fieldsAsMap map[string]string
-
-// MaterializeWithJSON converts log Fields into JSON string
-// TODO refactor into pluggable materializer
-func MaterializeWithJSON(logFields []log.Field) ([]byte, error) {
- fields := fieldsAsMap(make(map[string]string, len(logFields)))
- for _, field := range logFields {
- field.Marshal(fields)
- }
- if event, ok := fields["event"]; ok && len(fields) == 1 {
- return []byte(event), nil
- }
- return json.Marshal(fields)
-}
-
-func (ml fieldsAsMap) EmitString(key, value string) {
- ml[key] = value
-}
-
-func (ml fieldsAsMap) EmitBool(key string, value bool) {
- ml[key] = fmt.Sprintf("%t", value)
-}
-
-func (ml fieldsAsMap) EmitInt(key string, value int) {
- ml[key] = fmt.Sprintf("%d", value)
-}
-
-func (ml fieldsAsMap) EmitInt32(key string, value int32) {
- ml[key] = fmt.Sprintf("%d", value)
-}
-
-func (ml fieldsAsMap) EmitInt64(key string, value int64) {
- ml[key] = fmt.Sprintf("%d", value)
-}
-
-func (ml fieldsAsMap) EmitUint32(key string, value uint32) {
- ml[key] = fmt.Sprintf("%d", value)
-}
-
-func (ml fieldsAsMap) EmitUint64(key string, value uint64) {
- ml[key] = fmt.Sprintf("%d", value)
-}
-
-func (ml fieldsAsMap) EmitFloat32(key string, value float32) {
- ml[key] = fmt.Sprintf("%f", value)
-}
-
-func (ml fieldsAsMap) EmitFloat64(key string, value float64) {
- ml[key] = fmt.Sprintf("%f", value)
-}
-
-func (ml fieldsAsMap) EmitObject(key string, value interface{}) {
- ml[key] = fmt.Sprintf("%+v", value)
-}
-
-func (ml fieldsAsMap) EmitLazyLogger(value log.LazyLogger) {
- value(ml)
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/options.go b/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/options.go
deleted file mode 100644
index f52c322fb..000000000
--- a/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/options.go
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright (c) 2018 The Jaeger Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package remote
-
-import (
- "time"
-
- "github.com/uber/jaeger-client-go"
-)
-
-const (
- defaultHostPort = "localhost:5778"
- defaultRefreshInterval = time.Second * 5
-)
-
-// Option is a function that sets some option on the Throttler
-type Option func(options *options)
-
-// Options is a factory for all available options
-var Options options
-
-type options struct {
- metrics *jaeger.Metrics
- logger jaeger.Logger
- hostPort string
- refreshInterval time.Duration
- synchronousInitialization bool
-}
-
-// Metrics creates an Option that initializes Metrics on the Throttler, which is used to emit statistics.
-func (options) Metrics(m *jaeger.Metrics) Option {
- return func(o *options) {
- o.metrics = m
- }
-}
-
-// Logger creates an Option that sets the logger used by the Throttler.
-func (options) Logger(logger jaeger.Logger) Option {
- return func(o *options) {
- o.logger = logger
- }
-}
-
-// HostPort creates an Option that sets the hostPort of the local agent that keeps track of credits.
-func (options) HostPort(hostPort string) Option {
- return func(o *options) {
- o.hostPort = hostPort
- }
-}
-
-// RefreshInterval creates an Option that sets how often the Throttler will poll local agent for
-// credits.
-func (options) RefreshInterval(refreshInterval time.Duration) Option {
- return func(o *options) {
- o.refreshInterval = refreshInterval
- }
-}
-
-// SynchronousInitialization creates an Option that determines whether the throttler should synchronously
-// fetch credits from the agent when an operation is seen for the first time. This should be set to true
-// if the client will be used by a short lived service that needs to ensure that credits are fetched upfront
-// such that sampling or throttling occurs.
-func (options) SynchronousInitialization(b bool) Option {
- return func(o *options) {
- o.synchronousInitialization = b
- }
-}
-
-func applyOptions(o ...Option) options {
- opts := options{}
- for _, option := range o {
- option(&opts)
- }
- if opts.metrics == nil {
- opts.metrics = jaeger.NewNullMetrics()
- }
- if opts.logger == nil {
- opts.logger = jaeger.NullLogger
- }
- if opts.hostPort == "" {
- opts.hostPort = defaultHostPort
- }
- if opts.refreshInterval == 0 {
- opts.refreshInterval = defaultRefreshInterval
- }
- return opts
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/throttler.go b/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/throttler.go
deleted file mode 100644
index 20f434fe4..000000000
--- a/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/throttler.go
+++ /dev/null
@@ -1,216 +0,0 @@
-// Copyright (c) 2018 The Jaeger Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package remote
-
-import (
- "fmt"
- "net/url"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/pkg/errors"
-
- "github.com/uber/jaeger-client-go"
- "github.com/uber/jaeger-client-go/utils"
-)
-
-const (
- // minimumCredits is the minimum amount of credits necessary to not be throttled.
- // i.e. if currentCredits > minimumCredits, then the operation will not be throttled.
- minimumCredits = 1.0
-)
-
-var (
- errorUUIDNotSet = errors.New("Throttler UUID must be set")
-)
-
-type operationBalance struct {
- Operation string `json:"operation"`
- Balance float64 `json:"balance"`
-}
-
-type creditResponse struct {
- Balances []operationBalance `json:"balances"`
-}
-
-type httpCreditManagerProxy struct {
- hostPort string
-}
-
-func newHTTPCreditManagerProxy(hostPort string) *httpCreditManagerProxy {
- return &httpCreditManagerProxy{
- hostPort: hostPort,
- }
-}
-
-// N.B. Operations list must not be empty.
-func (m *httpCreditManagerProxy) FetchCredits(uuid, serviceName string, operations []string) (*creditResponse, error) {
- params := url.Values{}
- params.Set("service", serviceName)
- params.Set("uuid", uuid)
- for _, op := range operations {
- params.Add("operations", op)
- }
- var resp creditResponse
- if err := utils.GetJSON(fmt.Sprintf("http://%s/credits?%s", m.hostPort, params.Encode()), &resp); err != nil {
- return nil, errors.Wrap(err, "Failed to receive credits from agent")
- }
- return &resp, nil
-}
-
-// Throttler retrieves credits from agent and uses it to throttle operations.
-type Throttler struct {
- options
-
- mux sync.RWMutex
- service string
- uuid atomic.Value
- creditManager *httpCreditManagerProxy
- credits map[string]float64 // map of operation->credits
- close chan struct{}
- stopped sync.WaitGroup
-}
-
-// NewThrottler returns a Throttler that polls agent for credits and uses them to throttle
-// the service.
-func NewThrottler(service string, options ...Option) *Throttler {
- opts := applyOptions(options...)
- creditManager := newHTTPCreditManagerProxy(opts.hostPort)
- t := &Throttler{
- options: opts,
- creditManager: creditManager,
- service: service,
- credits: make(map[string]float64),
- close: make(chan struct{}),
- }
- t.stopped.Add(1)
- go t.pollManager()
- return t
-}
-
-// IsAllowed implements Throttler#IsAllowed.
-func (t *Throttler) IsAllowed(operation string) bool {
- t.mux.Lock()
- defer t.mux.Unlock()
- value, ok := t.credits[operation]
- if !ok || value == 0 {
- if !ok {
- // NOTE: This appears to be a no-op at first glance, but it stores
- // the operation key in the map. Necessary for functionality of
- // Throttler#operations method.
- t.credits[operation] = 0
- }
- if !t.synchronousInitialization {
- t.metrics.ThrottledDebugSpans.Inc(1)
- return false
- }
- // If it is the first time this operation is being checked, synchronously fetch
- // the credits.
- credits, err := t.fetchCredits([]string{operation})
- if err != nil {
- // Failed to receive credits from agent, try again next time
- t.logger.Error("Failed to fetch credits: " + err.Error())
- return false
- }
- if len(credits.Balances) == 0 {
- // This shouldn't happen but just in case
- return false
- }
- for _, opBalance := range credits.Balances {
- t.credits[opBalance.Operation] += opBalance.Balance
- }
- }
- return t.isAllowed(operation)
-}
-
-// Close stops the throttler from fetching credits from remote.
-func (t *Throttler) Close() error {
- close(t.close)
- t.stopped.Wait()
- return nil
-}
-
-// SetProcess implements ProcessSetter#SetProcess. It's imperative that the UUID is set before any remote
-// requests are made.
-func (t *Throttler) SetProcess(process jaeger.Process) {
- if process.UUID != "" {
- t.uuid.Store(process.UUID)
- }
-}
-
-// N.B. This function must be called with the Write Lock
-func (t *Throttler) isAllowed(operation string) bool {
- credits := t.credits[operation]
- if credits < minimumCredits {
- t.metrics.ThrottledDebugSpans.Inc(1)
- return false
- }
- t.credits[operation] = credits - minimumCredits
- return true
-}
-
-func (t *Throttler) pollManager() {
- defer t.stopped.Done()
- ticker := time.NewTicker(t.refreshInterval)
- defer ticker.Stop()
- for {
- select {
- case <-ticker.C:
- t.refreshCredits()
- case <-t.close:
- return
- }
- }
-}
-
-func (t *Throttler) operations() []string {
- t.mux.RLock()
- defer t.mux.RUnlock()
- operations := make([]string, 0, len(t.credits))
- for op := range t.credits {
- operations = append(operations, op)
- }
- return operations
-}
-
-func (t *Throttler) refreshCredits() {
- operations := t.operations()
- if len(operations) == 0 {
- return
- }
- newCredits, err := t.fetchCredits(operations)
- if err != nil {
- t.metrics.ThrottlerUpdateFailure.Inc(1)
- t.logger.Error("Failed to fetch credits: " + err.Error())
- return
- }
- t.metrics.ThrottlerUpdateSuccess.Inc(1)
-
- t.mux.Lock()
- defer t.mux.Unlock()
- for _, opBalance := range newCredits.Balances {
- t.credits[opBalance.Operation] += opBalance.Balance
- }
-}
-
-func (t *Throttler) fetchCredits(operations []string) (*creditResponse, error) {
- uuid := t.uuid.Load()
- uuidStr, _ := uuid.(string)
- if uuid == nil || uuidStr == "" {
- return nil, errorUUIDNotSet
- }
- return t.creditManager.FetchCredits(uuidStr, t.service, operations)
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/internal/throttler/throttler.go b/vendor/github.com/uber/jaeger-client-go/internal/throttler/throttler.go
deleted file mode 100644
index 196ed69ca..000000000
--- a/vendor/github.com/uber/jaeger-client-go/internal/throttler/throttler.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright (c) 2018 The Jaeger Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package throttler
-
-// Throttler is used to rate limits operations. For example, given how debug spans
-// are always sampled, a throttler can be enabled per client to rate limit the amount
-// of debug spans a client can start.
-type Throttler interface {
- // IsAllowed determines whether the operation should be allowed and not be
- // throttled.
- IsAllowed(operation string) bool
-}
-
-// DefaultThrottler doesn't throttle at all.
-type DefaultThrottler struct{}
-
-// IsAllowed implements Throttler#IsAllowed.
-func (t DefaultThrottler) IsAllowed(operation string) bool {
- return true
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/interop.go b/vendor/github.com/uber/jaeger-client-go/interop.go
deleted file mode 100644
index 8402d087c..000000000
--- a/vendor/github.com/uber/jaeger-client-go/interop.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-import (
- "github.com/opentracing/opentracing-go"
-)
-
-// TODO this file should not be needed after TChannel PR.
-
-type formatKey int
-
-// SpanContextFormat is a constant used as OpenTracing Format.
-// Requires *SpanContext as carrier.
-// This format is intended for interop with TChannel or other Zipkin-like tracers.
-const SpanContextFormat formatKey = iota
-
-type jaegerTraceContextPropagator struct {
- tracer *Tracer
-}
-
-func (p *jaegerTraceContextPropagator) Inject(
- ctx SpanContext,
- abstractCarrier interface{},
-) error {
- carrier, ok := abstractCarrier.(*SpanContext)
- if !ok {
- return opentracing.ErrInvalidCarrier
- }
-
- carrier.CopyFrom(&ctx)
- return nil
-}
-
-func (p *jaegerTraceContextPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) {
- carrier, ok := abstractCarrier.(*SpanContext)
- if !ok {
- return emptyContext, opentracing.ErrInvalidCarrier
- }
- ctx := new(SpanContext)
- ctx.CopyFrom(carrier)
- return *ctx, nil
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/jaeger_tag.go b/vendor/github.com/uber/jaeger-client-go/jaeger_tag.go
deleted file mode 100644
index 868b2a5b5..000000000
--- a/vendor/github.com/uber/jaeger-client-go/jaeger_tag.go
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-import (
- "fmt"
-
- "github.com/opentracing/opentracing-go/log"
-
- j "github.com/uber/jaeger-client-go/thrift-gen/jaeger"
-)
-
-type tags []*j.Tag
-
-// ConvertLogsToJaegerTags converts log Fields into jaeger tags.
-func ConvertLogsToJaegerTags(logFields []log.Field) []*j.Tag {
- fields := tags(make([]*j.Tag, 0, len(logFields)))
- for _, field := range logFields {
- field.Marshal(&fields)
- }
- return fields
-}
-
-func (t *tags) EmitString(key, value string) {
- *t = append(*t, &j.Tag{Key: key, VType: j.TagType_STRING, VStr: &value})
-}
-
-func (t *tags) EmitBool(key string, value bool) {
- *t = append(*t, &j.Tag{Key: key, VType: j.TagType_BOOL, VBool: &value})
-}
-
-func (t *tags) EmitInt(key string, value int) {
- vLong := int64(value)
- *t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &vLong})
-}
-
-func (t *tags) EmitInt32(key string, value int32) {
- vLong := int64(value)
- *t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &vLong})
-}
-
-func (t *tags) EmitInt64(key string, value int64) {
- *t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &value})
-}
-
-func (t *tags) EmitUint32(key string, value uint32) {
- vLong := int64(value)
- *t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &vLong})
-}
-
-func (t *tags) EmitUint64(key string, value uint64) {
- vLong := int64(value)
- *t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &vLong})
-}
-
-func (t *tags) EmitFloat32(key string, value float32) {
- vDouble := float64(value)
- *t = append(*t, &j.Tag{Key: key, VType: j.TagType_DOUBLE, VDouble: &vDouble})
-}
-
-func (t *tags) EmitFloat64(key string, value float64) {
- *t = append(*t, &j.Tag{Key: key, VType: j.TagType_DOUBLE, VDouble: &value})
-}
-
-func (t *tags) EmitObject(key string, value interface{}) {
- vStr := fmt.Sprintf("%+v", value)
- *t = append(*t, &j.Tag{Key: key, VType: j.TagType_STRING, VStr: &vStr})
-}
-
-func (t *tags) EmitLazyLogger(value log.LazyLogger) {
- value(t)
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go b/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go
deleted file mode 100644
index 3ac2f8f94..000000000
--- a/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go
+++ /dev/null
@@ -1,181 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-import (
- "time"
-
- "github.com/opentracing/opentracing-go"
-
- j "github.com/uber/jaeger-client-go/thrift-gen/jaeger"
- "github.com/uber/jaeger-client-go/utils"
-)
-
-// BuildJaegerThrift builds jaeger span based on internal span.
-// TODO: (breaking change) move to internal package.
-func BuildJaegerThrift(span *Span) *j.Span {
- span.Lock()
- defer span.Unlock()
- startTime := utils.TimeToMicrosecondsSinceEpochInt64(span.startTime)
- duration := span.duration.Nanoseconds() / int64(time.Microsecond)
- jaegerSpan := &j.Span{
- TraceIdLow: int64(span.context.traceID.Low),
- TraceIdHigh: int64(span.context.traceID.High),
- SpanId: int64(span.context.spanID),
- ParentSpanId: int64(span.context.parentID),
- OperationName: span.operationName,
- Flags: int32(span.context.samplingState.flags()),
- StartTime: startTime,
- Duration: duration,
- Tags: buildTags(span.tags, span.tracer.options.maxTagValueLength),
- Logs: buildLogs(span.logs),
- References: buildReferences(span.references),
- }
- return jaegerSpan
-}
-
-// BuildJaegerProcessThrift creates a thrift Process type.
-// TODO: (breaking change) move to internal package.
-func BuildJaegerProcessThrift(span *Span) *j.Process {
- span.Lock()
- defer span.Unlock()
- return buildJaegerProcessThrift(span.tracer)
-}
-
-func buildJaegerProcessThrift(tracer *Tracer) *j.Process {
- process := &j.Process{
- ServiceName: tracer.serviceName,
- Tags: buildTags(tracer.tags, tracer.options.maxTagValueLength),
- }
- if tracer.process.UUID != "" {
- process.Tags = append(process.Tags, &j.Tag{Key: TracerUUIDTagKey, VStr: &tracer.process.UUID, VType: j.TagType_STRING})
- }
- return process
-}
-
-func buildTags(tags []Tag, maxTagValueLength int) []*j.Tag {
- jTags := make([]*j.Tag, 0, len(tags))
- for _, tag := range tags {
- jTag := buildTag(&tag, maxTagValueLength)
- jTags = append(jTags, jTag)
- }
- return jTags
-}
-
-func buildLogs(logs []opentracing.LogRecord) []*j.Log {
- jLogs := make([]*j.Log, 0, len(logs))
- for _, log := range logs {
- jLog := &j.Log{
- Timestamp: utils.TimeToMicrosecondsSinceEpochInt64(log.Timestamp),
- Fields: ConvertLogsToJaegerTags(log.Fields),
- }
- jLogs = append(jLogs, jLog)
- }
- return jLogs
-}
-
-func buildTag(tag *Tag, maxTagValueLength int) *j.Tag {
- jTag := &j.Tag{Key: tag.key}
- switch value := tag.value.(type) {
- case string:
- vStr := truncateString(value, maxTagValueLength)
- jTag.VStr = &vStr
- jTag.VType = j.TagType_STRING
- case []byte:
- if len(value) > maxTagValueLength {
- value = value[:maxTagValueLength]
- }
- jTag.VBinary = value
- jTag.VType = j.TagType_BINARY
- case int:
- vLong := int64(value)
- jTag.VLong = &vLong
- jTag.VType = j.TagType_LONG
- case uint:
- vLong := int64(value)
- jTag.VLong = &vLong
- jTag.VType = j.TagType_LONG
- case int8:
- vLong := int64(value)
- jTag.VLong = &vLong
- jTag.VType = j.TagType_LONG
- case uint8:
- vLong := int64(value)
- jTag.VLong = &vLong
- jTag.VType = j.TagType_LONG
- case int16:
- vLong := int64(value)
- jTag.VLong = &vLong
- jTag.VType = j.TagType_LONG
- case uint16:
- vLong := int64(value)
- jTag.VLong = &vLong
- jTag.VType = j.TagType_LONG
- case int32:
- vLong := int64(value)
- jTag.VLong = &vLong
- jTag.VType = j.TagType_LONG
- case uint32:
- vLong := int64(value)
- jTag.VLong = &vLong
- jTag.VType = j.TagType_LONG
- case int64:
- vLong := int64(value)
- jTag.VLong = &vLong
- jTag.VType = j.TagType_LONG
- case uint64:
- vLong := int64(value)
- jTag.VLong = &vLong
- jTag.VType = j.TagType_LONG
- case float32:
- vDouble := float64(value)
- jTag.VDouble = &vDouble
- jTag.VType = j.TagType_DOUBLE
- case float64:
- vDouble := float64(value)
- jTag.VDouble = &vDouble
- jTag.VType = j.TagType_DOUBLE
- case bool:
- vBool := value
- jTag.VBool = &vBool
- jTag.VType = j.TagType_BOOL
- default:
- vStr := truncateString(stringify(value), maxTagValueLength)
- jTag.VStr = &vStr
- jTag.VType = j.TagType_STRING
- }
- return jTag
-}
-
-func buildReferences(references []Reference) []*j.SpanRef {
- retMe := make([]*j.SpanRef, 0, len(references))
- for _, ref := range references {
- if ref.Type == opentracing.ChildOfRef {
- retMe = append(retMe, spanRef(ref.Context, j.SpanRefType_CHILD_OF))
- } else if ref.Type == opentracing.FollowsFromRef {
- retMe = append(retMe, spanRef(ref.Context, j.SpanRefType_FOLLOWS_FROM))
- }
- }
- return retMe
-}
-
-func spanRef(ctx SpanContext, refType j.SpanRefType) *j.SpanRef {
- return &j.SpanRef{
- RefType: refType,
- TraceIdLow: int64(ctx.traceID.Low),
- TraceIdHigh: int64(ctx.traceID.High),
- SpanId: int64(ctx.spanID),
- }
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/logger.go b/vendor/github.com/uber/jaeger-client-go/logger.go
deleted file mode 100644
index d4f0b5019..000000000
--- a/vendor/github.com/uber/jaeger-client-go/logger.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-import "log"
-
-// NB This will be deprecated in 3.0.0, please use jaeger-client-go/log/logger instead.
-
-// Logger provides an abstract interface for logging from Reporters.
-// Applications can provide their own implementation of this interface to adapt
-// reporters logging to whatever logging library they prefer (stdlib log,
-// logrus, go-logging, etc).
-type Logger interface {
- // Error logs a message at error priority
- Error(msg string)
-
- // Infof logs a message at info priority
- Infof(msg string, args ...interface{})
-}
-
-// StdLogger is implementation of the Logger interface that delegates to default `log` package
-var StdLogger = &stdLogger{}
-
-type stdLogger struct{}
-
-func (l *stdLogger) Error(msg string) {
- log.Printf("ERROR: %s", msg)
-}
-
-// Infof logs a message at info priority
-func (l *stdLogger) Infof(msg string, args ...interface{}) {
- log.Printf(msg, args...)
-}
-
-// NullLogger is implementation of the Logger interface that delegates to default `log` package
-var NullLogger = &nullLogger{}
-
-type nullLogger struct{}
-
-func (l *nullLogger) Error(msg string) {}
-func (l *nullLogger) Infof(msg string, args ...interface{}) {}
diff --git a/vendor/github.com/uber/jaeger-client-go/metrics.go b/vendor/github.com/uber/jaeger-client-go/metrics.go
deleted file mode 100644
index 50e4e22d6..000000000
--- a/vendor/github.com/uber/jaeger-client-go/metrics.go
+++ /dev/null
@@ -1,119 +0,0 @@
-// Copyright (c) 2017-2018 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-import (
- "github.com/uber/jaeger-lib/metrics"
-)
-
-// Metrics is a container of all stats emitted by Jaeger tracer.
-type Metrics struct {
- // Number of traces started by this tracer as sampled
- TracesStartedSampled metrics.Counter `metric:"traces" tags:"state=started,sampled=y" help:"Number of traces started by this tracer as sampled"`
-
- // Number of traces started by this tracer as not sampled
- TracesStartedNotSampled metrics.Counter `metric:"traces" tags:"state=started,sampled=n" help:"Number of traces started by this tracer as not sampled"`
-
- // Number of traces started by this tracer with delayed sampling
- TracesStartedDelayedSampling metrics.Counter `metric:"traces" tags:"state=started,sampled=n" help:"Number of traces started by this tracer with delayed sampling"`
-
- // Number of externally started sampled traces this tracer joined
- TracesJoinedSampled metrics.Counter `metric:"traces" tags:"state=joined,sampled=y" help:"Number of externally started sampled traces this tracer joined"`
-
- // Number of externally started not-sampled traces this tracer joined
- TracesJoinedNotSampled metrics.Counter `metric:"traces" tags:"state=joined,sampled=n" help:"Number of externally started not-sampled traces this tracer joined"`
-
- // Number of sampled spans started by this tracer
- SpansStartedSampled metrics.Counter `metric:"started_spans" tags:"sampled=y" help:"Number of spans started by this tracer as sampled"`
-
- // Number of not sampled spans started by this tracer
- SpansStartedNotSampled metrics.Counter `metric:"started_spans" tags:"sampled=n" help:"Number of spans started by this tracer as not sampled"`
-
- // Number of spans with delayed sampling started by this tracer
- SpansStartedDelayedSampling metrics.Counter `metric:"started_spans" tags:"sampled=delayed" help:"Number of spans started by this tracer with delayed sampling"`
-
- // Number of spans finished by this tracer
- SpansFinishedSampled metrics.Counter `metric:"finished_spans" tags:"sampled=y" help:"Number of sampled spans finished by this tracer"`
-
- // Number of spans finished by this tracer
- SpansFinishedNotSampled metrics.Counter `metric:"finished_spans" tags:"sampled=n" help:"Number of not-sampled spans finished by this tracer"`
-
- // Number of spans finished by this tracer
- SpansFinishedDelayedSampling metrics.Counter `metric:"finished_spans" tags:"sampled=delayed" help:"Number of spans with delayed sampling finished by this tracer"`
-
- // Number of errors decoding tracing context
- DecodingErrors metrics.Counter `metric:"span_context_decoding_errors" help:"Number of errors decoding tracing context"`
-
- // Number of spans successfully reported
- ReporterSuccess metrics.Counter `metric:"reporter_spans" tags:"result=ok" help:"Number of spans successfully reported"`
-
- // Number of spans not reported due to a Sender failure
- ReporterFailure metrics.Counter `metric:"reporter_spans" tags:"result=err" help:"Number of spans not reported due to a Sender failure"`
-
- // Number of spans dropped due to internal queue overflow
- ReporterDropped metrics.Counter `metric:"reporter_spans" tags:"result=dropped" help:"Number of spans dropped due to internal queue overflow"`
-
- // Current number of spans in the reporter queue
- ReporterQueueLength metrics.Gauge `metric:"reporter_queue_length" help:"Current number of spans in the reporter queue"`
-
- // Number of times the Sampler succeeded to retrieve sampling strategy
- SamplerRetrieved metrics.Counter `metric:"sampler_queries" tags:"result=ok" help:"Number of times the Sampler succeeded to retrieve sampling strategy"`
-
- // Number of times the Sampler failed to retrieve sampling strategy
- SamplerQueryFailure metrics.Counter `metric:"sampler_queries" tags:"result=err" help:"Number of times the Sampler failed to retrieve sampling strategy"`
-
- // Number of times the Sampler succeeded to retrieve and update sampling strategy
- SamplerUpdated metrics.Counter `metric:"sampler_updates" tags:"result=ok" help:"Number of times the Sampler succeeded to retrieve and update sampling strategy"`
-
- // Number of times the Sampler failed to update sampling strategy
- SamplerUpdateFailure metrics.Counter `metric:"sampler_updates" tags:"result=err" help:"Number of times the Sampler failed to update sampling strategy"`
-
- // Number of times baggage was successfully written or updated on spans.
- BaggageUpdateSuccess metrics.Counter `metric:"baggage_updates" tags:"result=ok" help:"Number of times baggage was successfully written or updated on spans"`
-
- // Number of times baggage failed to write or update on spans.
- BaggageUpdateFailure metrics.Counter `metric:"baggage_updates" tags:"result=err" help:"Number of times baggage failed to write or update on spans"`
-
- // Number of times baggage was truncated as per baggage restrictions.
- BaggageTruncate metrics.Counter `metric:"baggage_truncations" help:"Number of times baggage was truncated as per baggage restrictions"`
-
- // Number of times baggage restrictions were successfully updated.
- BaggageRestrictionsUpdateSuccess metrics.Counter `metric:"baggage_restrictions_updates" tags:"result=ok" help:"Number of times baggage restrictions were successfully updated"`
-
- // Number of times baggage restrictions failed to update.
- BaggageRestrictionsUpdateFailure metrics.Counter `metric:"baggage_restrictions_updates" tags:"result=err" help:"Number of times baggage restrictions failed to update"`
-
- // Number of times debug spans were throttled.
- ThrottledDebugSpans metrics.Counter `metric:"throttled_debug_spans" help:"Number of times debug spans were throttled"`
-
- // Number of times throttler successfully updated.
- ThrottlerUpdateSuccess metrics.Counter `metric:"throttler_updates" tags:"result=ok" help:"Number of times throttler successfully updated"`
-
- // Number of times throttler failed to update.
- ThrottlerUpdateFailure metrics.Counter `metric:"throttler_updates" tags:"result=err" help:"Number of times throttler failed to update"`
-}
-
-// NewMetrics creates a new Metrics struct and initializes it.
-func NewMetrics(factory metrics.Factory, globalTags map[string]string) *Metrics {
- m := &Metrics{}
- // TODO the namespace "jaeger" should be configurable
- metrics.MustInit(m, factory.Namespace(metrics.NSOptions{Name: "jaeger"}).Namespace(metrics.NSOptions{Name: "tracer"}), globalTags)
- return m
-}
-
-// NewNullMetrics creates a new Metrics struct that won't report any metrics.
-func NewNullMetrics() *Metrics {
- return NewMetrics(metrics.NullFactory, nil)
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/observer.go b/vendor/github.com/uber/jaeger-client-go/observer.go
deleted file mode 100644
index 7bbd02889..000000000
--- a/vendor/github.com/uber/jaeger-client-go/observer.go
+++ /dev/null
@@ -1,88 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-import opentracing "github.com/opentracing/opentracing-go"
-
-// Observer can be registered with the Tracer to receive notifications about
-// new Spans.
-//
-// Deprecated: use jaeger.ContribObserver instead.
-type Observer interface {
- OnStartSpan(operationName string, options opentracing.StartSpanOptions) SpanObserver
-}
-
-// SpanObserver is created by the Observer and receives notifications about
-// other Span events.
-//
-// Deprecated: use jaeger.ContribSpanObserver instead.
-type SpanObserver interface {
- OnSetOperationName(operationName string)
- OnSetTag(key string, value interface{})
- OnFinish(options opentracing.FinishOptions)
-}
-
-// compositeObserver is a dispatcher to other observers
-type compositeObserver struct {
- observers []ContribObserver
-}
-
-// compositeSpanObserver is a dispatcher to other span observers
-type compositeSpanObserver struct {
- observers []ContribSpanObserver
-}
-
-// noopSpanObserver is used when there are no observers registered
-// on the Tracer or none of them returns span observers from OnStartSpan.
-var noopSpanObserver = &compositeSpanObserver{}
-
-func (o *compositeObserver) append(contribObserver ContribObserver) {
- o.observers = append(o.observers, contribObserver)
-}
-
-func (o *compositeObserver) OnStartSpan(sp opentracing.Span, operationName string, options opentracing.StartSpanOptions) ContribSpanObserver {
- var spanObservers []ContribSpanObserver
- for _, obs := range o.observers {
- spanObs, ok := obs.OnStartSpan(sp, operationName, options)
- if ok {
- if spanObservers == nil {
- spanObservers = make([]ContribSpanObserver, 0, len(o.observers))
- }
- spanObservers = append(spanObservers, spanObs)
- }
- }
- if len(spanObservers) == 0 {
- return noopSpanObserver
- }
- return &compositeSpanObserver{observers: spanObservers}
-}
-
-func (o *compositeSpanObserver) OnSetOperationName(operationName string) {
- for _, obs := range o.observers {
- obs.OnSetOperationName(operationName)
- }
-}
-
-func (o *compositeSpanObserver) OnSetTag(key string, value interface{}) {
- for _, obs := range o.observers {
- obs.OnSetTag(key, value)
- }
-}
-
-func (o *compositeSpanObserver) OnFinish(options opentracing.FinishOptions) {
- for _, obs := range o.observers {
- obs.OnFinish(options)
- }
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/process.go b/vendor/github.com/uber/jaeger-client-go/process.go
deleted file mode 100644
index 30cbf9962..000000000
--- a/vendor/github.com/uber/jaeger-client-go/process.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright (c) 2018 The Jaeger Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-// Process holds process specific metadata that's relevant to this client.
-type Process struct {
- Service string
- UUID string
- Tags []Tag
-}
-
-// ProcessSetter sets a process. This can be used by any class that requires
-// the process to be set as part of initialization.
-// See internal/throttler/remote/throttler.go for an example.
-type ProcessSetter interface {
- SetProcess(process Process)
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/propagation.go b/vendor/github.com/uber/jaeger-client-go/propagation.go
deleted file mode 100644
index 42fd64b58..000000000
--- a/vendor/github.com/uber/jaeger-client-go/propagation.go
+++ /dev/null
@@ -1,313 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-import (
- "bytes"
- "encoding/binary"
- "fmt"
- "io"
- "log"
- "net/url"
- "strings"
- "sync"
-
- opentracing "github.com/opentracing/opentracing-go"
-)
-
-// Injector is responsible for injecting SpanContext instances in a manner suitable
-// for propagation via a format-specific "carrier" object. Typically the
-// injection will take place across an RPC boundary, but message queues and
-// other IPC mechanisms are also reasonable places to use an Injector.
-type Injector interface {
- // Inject takes `SpanContext` and injects it into `carrier`. The actual type
- // of `carrier` depends on the `format` passed to `Tracer.Inject()`.
- //
- // Implementations may return opentracing.ErrInvalidCarrier or any other
- // implementation-specific error if injection fails.
- Inject(ctx SpanContext, carrier interface{}) error
-}
-
-// Extractor is responsible for extracting SpanContext instances from a
-// format-specific "carrier" object. Typically the extraction will take place
-// on the server side of an RPC boundary, but message queues and other IPC
-// mechanisms are also reasonable places to use an Extractor.
-type Extractor interface {
- // Extract decodes a SpanContext instance from the given `carrier`,
- // or (nil, opentracing.ErrSpanContextNotFound) if no context could
- // be found in the `carrier`.
- Extract(carrier interface{}) (SpanContext, error)
-}
-
-// TextMapPropagator is a combined Injector and Extractor for TextMap format
-type TextMapPropagator struct {
- headerKeys *HeadersConfig
- metrics Metrics
- encodeValue func(string) string
- decodeValue func(string) string
-}
-
-// NewTextMapPropagator creates a combined Injector and Extractor for TextMap format
-func NewTextMapPropagator(headerKeys *HeadersConfig, metrics Metrics) *TextMapPropagator {
- return &TextMapPropagator{
- headerKeys: headerKeys,
- metrics: metrics,
- encodeValue: func(val string) string {
- return val
- },
- decodeValue: func(val string) string {
- return val
- },
- }
-}
-
-// NewHTTPHeaderPropagator creates a combined Injector and Extractor for HTTPHeaders format
-func NewHTTPHeaderPropagator(headerKeys *HeadersConfig, metrics Metrics) *TextMapPropagator {
- return &TextMapPropagator{
- headerKeys: headerKeys,
- metrics: metrics,
- encodeValue: func(val string) string {
- return url.QueryEscape(val)
- },
- decodeValue: func(val string) string {
- // ignore decoding errors, cannot do anything about them
- if v, err := url.QueryUnescape(val); err == nil {
- return v
- }
- return val
- },
- }
-}
-
-// BinaryPropagator is a combined Injector and Extractor for Binary format
-type BinaryPropagator struct {
- tracer *Tracer
- buffers sync.Pool
-}
-
-// NewBinaryPropagator creates a combined Injector and Extractor for Binary format
-func NewBinaryPropagator(tracer *Tracer) *BinaryPropagator {
- return &BinaryPropagator{
- tracer: tracer,
- buffers: sync.Pool{New: func() interface{} { return &bytes.Buffer{} }},
- }
-}
-
-// Inject implements Injector of TextMapPropagator
-func (p *TextMapPropagator) Inject(
- sc SpanContext,
- abstractCarrier interface{},
-) error {
- textMapWriter, ok := abstractCarrier.(opentracing.TextMapWriter)
- if !ok {
- return opentracing.ErrInvalidCarrier
- }
-
- // Do not encode the string with trace context to avoid accidental double-encoding
- // if people are using opentracing < 0.10.0. Our colon-separated representation
- // of the trace context is already safe for HTTP headers.
- textMapWriter.Set(p.headerKeys.TraceContextHeaderName, sc.String())
- for k, v := range sc.baggage {
- safeKey := p.addBaggageKeyPrefix(k)
- safeVal := p.encodeValue(v)
- textMapWriter.Set(safeKey, safeVal)
- }
- return nil
-}
-
-// Extract implements Extractor of TextMapPropagator
-func (p *TextMapPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) {
- textMapReader, ok := abstractCarrier.(opentracing.TextMapReader)
- if !ok {
- return emptyContext, opentracing.ErrInvalidCarrier
- }
- var ctx SpanContext
- var baggage map[string]string
- err := textMapReader.ForeachKey(func(rawKey, value string) error {
- key := strings.ToLower(rawKey) // TODO not necessary for plain TextMap
- if key == p.headerKeys.TraceContextHeaderName {
- var err error
- safeVal := p.decodeValue(value)
- if ctx, err = ContextFromString(safeVal); err != nil {
- return err
- }
- } else if key == p.headerKeys.JaegerDebugHeader {
- ctx.debugID = p.decodeValue(value)
- } else if key == p.headerKeys.JaegerBaggageHeader {
- if baggage == nil {
- baggage = make(map[string]string)
- }
- for k, v := range p.parseCommaSeparatedMap(value) {
- baggage[k] = v
- }
- } else if strings.HasPrefix(key, p.headerKeys.TraceBaggageHeaderPrefix) {
- if baggage == nil {
- baggage = make(map[string]string)
- }
- safeKey := p.removeBaggageKeyPrefix(key)
- safeVal := p.decodeValue(value)
- baggage[safeKey] = safeVal
- }
- return nil
- })
- if err != nil {
- p.metrics.DecodingErrors.Inc(1)
- return emptyContext, err
- }
- if !ctx.traceID.IsValid() && ctx.debugID == "" && len(baggage) == 0 {
- return emptyContext, opentracing.ErrSpanContextNotFound
- }
- ctx.baggage = baggage
- return ctx, nil
-}
-
-// Inject implements Injector of BinaryPropagator
-func (p *BinaryPropagator) Inject(
- sc SpanContext,
- abstractCarrier interface{},
-) error {
- carrier, ok := abstractCarrier.(io.Writer)
- if !ok {
- return opentracing.ErrInvalidCarrier
- }
-
- // Handle the tracer context
- if err := binary.Write(carrier, binary.BigEndian, sc.traceID); err != nil {
- return err
- }
- if err := binary.Write(carrier, binary.BigEndian, sc.spanID); err != nil {
- return err
- }
- if err := binary.Write(carrier, binary.BigEndian, sc.parentID); err != nil {
- return err
- }
- if err := binary.Write(carrier, binary.BigEndian, sc.samplingState.flags()); err != nil {
- return err
- }
-
- // Handle the baggage items
- if err := binary.Write(carrier, binary.BigEndian, int32(len(sc.baggage))); err != nil {
- return err
- }
- for k, v := range sc.baggage {
- if err := binary.Write(carrier, binary.BigEndian, int32(len(k))); err != nil {
- return err
- }
- io.WriteString(carrier, k)
- if err := binary.Write(carrier, binary.BigEndian, int32(len(v))); err != nil {
- return err
- }
- io.WriteString(carrier, v)
- }
-
- return nil
-}
-
-// Extract implements Extractor of BinaryPropagator
-func (p *BinaryPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) {
- carrier, ok := abstractCarrier.(io.Reader)
- if !ok {
- return emptyContext, opentracing.ErrInvalidCarrier
- }
- var ctx SpanContext
- ctx.samplingState = &samplingState{}
-
- if err := binary.Read(carrier, binary.BigEndian, &ctx.traceID); err != nil {
- return emptyContext, opentracing.ErrSpanContextCorrupted
- }
- if err := binary.Read(carrier, binary.BigEndian, &ctx.spanID); err != nil {
- return emptyContext, opentracing.ErrSpanContextCorrupted
- }
- if err := binary.Read(carrier, binary.BigEndian, &ctx.parentID); err != nil {
- return emptyContext, opentracing.ErrSpanContextCorrupted
- }
-
- var flags byte
- if err := binary.Read(carrier, binary.BigEndian, &flags); err != nil {
- return emptyContext, opentracing.ErrSpanContextCorrupted
- }
- ctx.samplingState.setFlags(flags)
-
- // Handle the baggage items
- var numBaggage int32
- if err := binary.Read(carrier, binary.BigEndian, &numBaggage); err != nil {
- return emptyContext, opentracing.ErrSpanContextCorrupted
- }
- if iNumBaggage := int(numBaggage); iNumBaggage > 0 {
- ctx.baggage = make(map[string]string, iNumBaggage)
- buf := p.buffers.Get().(*bytes.Buffer)
- defer p.buffers.Put(buf)
-
- var keyLen, valLen int32
- for i := 0; i < iNumBaggage; i++ {
- if err := binary.Read(carrier, binary.BigEndian, &keyLen); err != nil {
- return emptyContext, opentracing.ErrSpanContextCorrupted
- }
- buf.Reset()
- buf.Grow(int(keyLen))
- if n, err := io.CopyN(buf, carrier, int64(keyLen)); err != nil || int32(n) != keyLen {
- return emptyContext, opentracing.ErrSpanContextCorrupted
- }
- key := buf.String()
-
- if err := binary.Read(carrier, binary.BigEndian, &valLen); err != nil {
- return emptyContext, opentracing.ErrSpanContextCorrupted
- }
- buf.Reset()
- buf.Grow(int(valLen))
- if n, err := io.CopyN(buf, carrier, int64(valLen)); err != nil || int32(n) != valLen {
- return emptyContext, opentracing.ErrSpanContextCorrupted
- }
- ctx.baggage[key] = buf.String()
- }
- }
-
- return ctx, nil
-}
-
-// Converts a comma separated key value pair list into a map
-// e.g. key1=value1, key2=value2, key3 = value3
-// is converted to map[string]string { "key1" : "value1",
-// "key2" : "value2",
-// "key3" : "value3" }
-func (p *TextMapPropagator) parseCommaSeparatedMap(value string) map[string]string {
- baggage := make(map[string]string)
- value, err := url.QueryUnescape(value)
- if err != nil {
- log.Printf("Unable to unescape %s, %v", value, err)
- return baggage
- }
- for _, kvpair := range strings.Split(value, ",") {
- kv := strings.Split(strings.TrimSpace(kvpair), "=")
- if len(kv) == 2 {
- baggage[kv[0]] = kv[1]
- } else {
- log.Printf("Malformed value passed in for %s", p.headerKeys.JaegerBaggageHeader)
- }
- }
- return baggage
-}
-
-// Converts a baggage item key into an http header format,
-// by prepending TraceBaggageHeaderPrefix and encoding the key string
-func (p *TextMapPropagator) addBaggageKeyPrefix(key string) string {
- // TODO encodeBaggageKeyAsHeader add caching and escaping
- return fmt.Sprintf("%v%v", p.headerKeys.TraceBaggageHeaderPrefix, key)
-}
-
-func (p *TextMapPropagator) removeBaggageKeyPrefix(key string) string {
- // TODO decodeBaggageHeaderKey add caching and escaping
- return key[len(p.headerKeys.TraceBaggageHeaderPrefix):]
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/reference.go b/vendor/github.com/uber/jaeger-client-go/reference.go
deleted file mode 100644
index 5646e78bb..000000000
--- a/vendor/github.com/uber/jaeger-client-go/reference.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-import "github.com/opentracing/opentracing-go"
-
-// Reference represents a causal reference to other Spans (via their SpanContext).
-type Reference struct {
- Type opentracing.SpanReferenceType
- Context SpanContext
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/reporter.go b/vendor/github.com/uber/jaeger-client-go/reporter.go
deleted file mode 100644
index a71a92c3e..000000000
--- a/vendor/github.com/uber/jaeger-client-go/reporter.go
+++ /dev/null
@@ -1,322 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-import (
- "fmt"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/opentracing/opentracing-go"
-
- "github.com/uber/jaeger-client-go/internal/reporterstats"
- "github.com/uber/jaeger-client-go/log"
-)
-
-// Reporter is called by the tracer when a span is completed to report the span to the tracing collector.
-type Reporter interface {
- // Report submits a new span to collectors, possibly asynchronously and/or with buffering.
- // If the reporter is processing Span asynchronously then it needs to Retain() the span,
- // and then Release() it when no longer needed, to avoid span data corruption.
- Report(span *Span)
-
- // Close does a clean shutdown of the reporter, flushing any traces that may be buffered in memory.
- Close()
-}
-
-// ------------------------------
-
-type nullReporter struct{}
-
-// NewNullReporter creates a no-op reporter that ignores all reported spans.
-func NewNullReporter() Reporter {
- return &nullReporter{}
-}
-
-// Report implements Report() method of Reporter by doing nothing.
-func (r *nullReporter) Report(span *Span) {
- // no-op
-}
-
-// Close implements Close() method of Reporter by doing nothing.
-func (r *nullReporter) Close() {
- // no-op
-}
-
-// ------------------------------
-
-type loggingReporter struct {
- logger Logger
-}
-
-// NewLoggingReporter creates a reporter that logs all reported spans to provided logger.
-func NewLoggingReporter(logger Logger) Reporter {
- return &loggingReporter{logger}
-}
-
-// Report implements Report() method of Reporter by logging the span to the logger.
-func (r *loggingReporter) Report(span *Span) {
- r.logger.Infof("Reporting span %+v", span)
-}
-
-// Close implements Close() method of Reporter by doing nothing.
-func (r *loggingReporter) Close() {
- // no-op
-}
-
-// ------------------------------
-
-// InMemoryReporter is used for testing, and simply collects spans in memory.
-type InMemoryReporter struct {
- spans []opentracing.Span
- lock sync.Mutex
-}
-
-// NewInMemoryReporter creates a reporter that stores spans in memory.
-// NOTE: the Tracer should be created with options.PoolSpans = false.
-func NewInMemoryReporter() *InMemoryReporter {
- return &InMemoryReporter{
- spans: make([]opentracing.Span, 0, 10),
- }
-}
-
-// Report implements Report() method of Reporter by storing the span in the buffer.
-func (r *InMemoryReporter) Report(span *Span) {
- r.lock.Lock()
- // Need to retain the span otherwise it will be released
- r.spans = append(r.spans, span.Retain())
- r.lock.Unlock()
-}
-
-// Close implements Close() method of Reporter
-func (r *InMemoryReporter) Close() {
- r.Reset()
-}
-
-// SpansSubmitted returns the number of spans accumulated in the buffer.
-func (r *InMemoryReporter) SpansSubmitted() int {
- r.lock.Lock()
- defer r.lock.Unlock()
- return len(r.spans)
-}
-
-// GetSpans returns accumulated spans as a copy of the buffer.
-func (r *InMemoryReporter) GetSpans() []opentracing.Span {
- r.lock.Lock()
- defer r.lock.Unlock()
- copied := make([]opentracing.Span, len(r.spans))
- copy(copied, r.spans)
- return copied
-}
-
-// Reset clears all accumulated spans.
-func (r *InMemoryReporter) Reset() {
- r.lock.Lock()
- defer r.lock.Unlock()
-
- // Before reset the collection need to release Span memory
- for _, span := range r.spans {
- span.(*Span).Release()
- }
- r.spans = r.spans[:0]
-}
-
-// ------------------------------
-
-type compositeReporter struct {
- reporters []Reporter
-}
-
-// NewCompositeReporter creates a reporter that ignores all reported spans.
-func NewCompositeReporter(reporters ...Reporter) Reporter {
- return &compositeReporter{reporters: reporters}
-}
-
-// Report implements Report() method of Reporter by delegating to each underlying reporter.
-func (r *compositeReporter) Report(span *Span) {
- for _, reporter := range r.reporters {
- reporter.Report(span)
- }
-}
-
-// Close implements Close() method of Reporter by closing each underlying reporter.
-func (r *compositeReporter) Close() {
- for _, reporter := range r.reporters {
- reporter.Close()
- }
-}
-
-// ------------- REMOTE REPORTER -----------------
-
-type reporterQueueItemType int
-
-const (
- defaultQueueSize = 100
- defaultBufferFlushInterval = 1 * time.Second
-
- reporterQueueItemSpan reporterQueueItemType = iota
- reporterQueueItemClose
-)
-
-type reporterQueueItem struct {
- itemType reporterQueueItemType
- span *Span
- close *sync.WaitGroup
-}
-
-// reporterStats implements reporterstats.ReporterStats.
-type reporterStats struct {
- droppedCount int64 // provided to Transports to report data loss to the backend
-}
-
-// SpansDroppedFromQueue implements reporterstats.ReporterStats.
-func (r *reporterStats) SpansDroppedFromQueue() int64 {
- return atomic.LoadInt64(&r.droppedCount)
-}
-
-func (r *reporterStats) incDroppedCount() {
- atomic.AddInt64(&r.droppedCount, 1)
-}
-
-type remoteReporter struct {
- // These fields must be first in the struct because `sync/atomic` expects 64-bit alignment.
- // Cf. https://github.com/uber/jaeger-client-go/issues/155, https://goo.gl/zW7dgq
- queueLength int64 // used to update metrics.Gauge
- closed int64 // 0 - not closed, 1 - closed
-
- reporterOptions
-
- sender Transport
- queue chan reporterQueueItem
- reporterStats *reporterStats
-}
-
-// NewRemoteReporter creates a new reporter that sends spans out of process by means of Sender.
-// Calls to Report(Span) return immediately (side effect: if internal buffer is full the span is dropped).
-// Periodically the transport buffer is flushed even if it hasn't reached max packet size.
-// Calls to Close() block until all spans reported prior to the call to Close are flushed.
-func NewRemoteReporter(sender Transport, opts ...ReporterOption) Reporter {
- options := reporterOptions{}
- for _, option := range opts {
- option(&options)
- }
- if options.bufferFlushInterval <= 0 {
- options.bufferFlushInterval = defaultBufferFlushInterval
- }
- if options.logger == nil {
- options.logger = log.NullLogger
- }
- if options.metrics == nil {
- options.metrics = NewNullMetrics()
- }
- if options.queueSize <= 0 {
- options.queueSize = defaultQueueSize
- }
- reporter := &remoteReporter{
- reporterOptions: options,
- sender: sender,
- queue: make(chan reporterQueueItem, options.queueSize),
- reporterStats: new(reporterStats),
- }
- if receiver, ok := sender.(reporterstats.Receiver); ok {
- receiver.SetReporterStats(reporter.reporterStats)
- }
- go reporter.processQueue()
- return reporter
-}
-
-// Report implements Report() method of Reporter.
-// It passes the span to a background go-routine for submission to Jaeger backend.
-// If the internal queue is full, the span is dropped and metrics.ReporterDropped counter is incremented.
-// If Report() is called after the reporter has been Close()-ed, the additional spans will not be
-// sent to the backend, but the metrics.ReporterDropped counter may not reflect them correctly,
-// because some of them may still be successfully added to the queue.
-func (r *remoteReporter) Report(span *Span) {
- select {
- // Need to retain the span otherwise it will be released
- case r.queue <- reporterQueueItem{itemType: reporterQueueItemSpan, span: span.Retain()}:
- atomic.AddInt64(&r.queueLength, 1)
- default:
- r.metrics.ReporterDropped.Inc(1)
- r.reporterStats.incDroppedCount()
- }
-}
-
-// Close implements Close() method of Reporter by waiting for the queue to be drained.
-func (r *remoteReporter) Close() {
- r.logger.Debugf("closing reporter")
- if swapped := atomic.CompareAndSwapInt64(&r.closed, 0, 1); !swapped {
- r.logger.Error("Repeated attempt to close the reporter is ignored")
- return
- }
- r.sendCloseEvent()
- _ = r.sender.Close()
-}
-
-func (r *remoteReporter) sendCloseEvent() {
- wg := &sync.WaitGroup{}
- wg.Add(1)
- item := reporterQueueItem{itemType: reporterQueueItemClose, close: wg}
-
- r.queue <- item // if the queue is full we will block until there is space
- atomic.AddInt64(&r.queueLength, 1)
- wg.Wait()
-}
-
-// processQueue reads spans from the queue, converts them to Thrift, and stores them in an internal buffer.
-// When the buffer length reaches batchSize, it is flushed by submitting the accumulated spans to Jaeger.
-// Buffer also gets flushed automatically every batchFlushInterval seconds, just in case the tracer stopped
-// reporting new spans.
-func (r *remoteReporter) processQueue() {
- // flush causes the Sender to flush its accumulated spans and clear the buffer
- flush := func() {
- if flushed, err := r.sender.Flush(); err != nil {
- r.metrics.ReporterFailure.Inc(int64(flushed))
- r.logger.Error(fmt.Sprintf("failed to flush Jaeger spans to server: %s", err.Error()))
- } else if flushed > 0 {
- r.metrics.ReporterSuccess.Inc(int64(flushed))
- }
- }
-
- timer := time.NewTicker(r.bufferFlushInterval)
- for {
- select {
- case <-timer.C:
- flush()
- case item := <-r.queue:
- atomic.AddInt64(&r.queueLength, -1)
- switch item.itemType {
- case reporterQueueItemSpan:
- span := item.span
- if flushed, err := r.sender.Append(span); err != nil {
- r.metrics.ReporterFailure.Inc(int64(flushed))
- r.logger.Error(fmt.Sprintf("error reporting Jaeger span %q: %s", span.OperationName(), err.Error()))
- } else if flushed > 0 {
- r.metrics.ReporterSuccess.Inc(int64(flushed))
- // to reduce the number of gauge stats, we only emit queue length on flush
- r.metrics.ReporterQueueLength.Update(atomic.LoadInt64(&r.queueLength))
- r.logger.Debugf("flushed %d spans", flushed)
- }
- span.Release()
- case reporterQueueItemClose:
- timer.Stop()
- flush()
- item.close.Done()
- return
- }
- }
- }
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/reporter_options.go b/vendor/github.com/uber/jaeger-client-go/reporter_options.go
deleted file mode 100644
index 2fc030547..000000000
--- a/vendor/github.com/uber/jaeger-client-go/reporter_options.go
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-import (
- "time"
-
- "github.com/uber/jaeger-client-go/log"
-)
-
-// ReporterOption is a function that sets some option on the reporter.
-type ReporterOption func(c *reporterOptions)
-
-// ReporterOptions is a factory for all available ReporterOption's
-var ReporterOptions reporterOptions
-
-// reporterOptions control behavior of the reporter.
-type reporterOptions struct {
- // queueSize is the size of internal queue where reported spans are stored before they are processed in the background
- queueSize int
- // bufferFlushInterval is how often the buffer is force-flushed, even if it's not full
- bufferFlushInterval time.Duration
- // logger is used to log errors of span submissions
- logger log.DebugLogger
- // metrics is used to record runtime stats
- metrics *Metrics
-}
-
-// QueueSize creates a ReporterOption that sets the size of the internal queue where
-// spans are stored before they are processed.
-func (reporterOptions) QueueSize(queueSize int) ReporterOption {
- return func(r *reporterOptions) {
- r.queueSize = queueSize
- }
-}
-
-// Metrics creates a ReporterOption that initializes Metrics in the reporter,
-// which is used to record runtime statistics.
-func (reporterOptions) Metrics(metrics *Metrics) ReporterOption {
- return func(r *reporterOptions) {
- r.metrics = metrics
- }
-}
-
-// BufferFlushInterval creates a ReporterOption that sets how often the queue
-// is force-flushed.
-func (reporterOptions) BufferFlushInterval(bufferFlushInterval time.Duration) ReporterOption {
- return func(r *reporterOptions) {
- r.bufferFlushInterval = bufferFlushInterval
- }
-}
-
-// Logger creates a ReporterOption that initializes the logger used to log
-// errors of span submissions.
-func (reporterOptions) Logger(logger Logger) ReporterOption {
- return func(r *reporterOptions) {
- r.logger = log.DebugLogAdapter(logger)
- }
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/README.md b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/README.md
deleted file mode 100644
index 879948e9c..000000000
--- a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/README.md
+++ /dev/null
@@ -1,5 +0,0 @@
-An Observer that can be used to emit RPC metrics
-================================================
-
-It can be attached to the tracer during tracer construction.
-See `ExampleObserver` function in [observer_test.go](./observer_test.go).
diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/endpoints.go b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/endpoints.go
deleted file mode 100644
index 30555243d..000000000
--- a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/endpoints.go
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rpcmetrics
-
-import "sync"
-
-// normalizedEndpoints is a cache for endpointName -> safeName mappings.
-type normalizedEndpoints struct {
- names map[string]string
- maxSize int
- defaultName string
- normalizer NameNormalizer
- mux sync.RWMutex
-}
-
-func newNormalizedEndpoints(maxSize int, normalizer NameNormalizer) *normalizedEndpoints {
- return &normalizedEndpoints{
- maxSize: maxSize,
- normalizer: normalizer,
- names: make(map[string]string, maxSize),
- }
-}
-
-// normalize looks up the name in the cache, if not found it uses normalizer
-// to convert the name to a safe name. If called with more than maxSize unique
-// names it returns "" for all other names beyond those already cached.
-func (n *normalizedEndpoints) normalize(name string) string {
- n.mux.RLock()
- norm, ok := n.names[name]
- l := len(n.names)
- n.mux.RUnlock()
- if ok {
- return norm
- }
- if l >= n.maxSize {
- return ""
- }
- return n.normalizeWithLock(name)
-}
-
-func (n *normalizedEndpoints) normalizeWithLock(name string) string {
- norm := n.normalizer.Normalize(name)
- n.mux.Lock()
- defer n.mux.Unlock()
- // cache may have grown while we were not holding the lock
- if len(n.names) >= n.maxSize {
- return ""
- }
- n.names[name] = norm
- return norm
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/metrics.go b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/metrics.go
deleted file mode 100644
index a8cec2fa6..000000000
--- a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/metrics.go
+++ /dev/null
@@ -1,124 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rpcmetrics
-
-import (
- "sync"
-
- "github.com/uber/jaeger-lib/metrics"
-)
-
-const (
- otherEndpointsPlaceholder = "other"
- endpointNameMetricTag = "endpoint"
-)
-
-// Metrics is a collection of metrics for an endpoint describing
-// throughput, success, errors, and performance.
-type Metrics struct {
- // RequestCountSuccess is a counter of the total number of successes.
- RequestCountSuccess metrics.Counter `metric:"requests" tags:"error=false"`
-
- // RequestCountFailures is a counter of the number of times any failure has been observed.
- RequestCountFailures metrics.Counter `metric:"requests" tags:"error=true"`
-
- // RequestLatencySuccess is a latency histogram of successful requests.
- RequestLatencySuccess metrics.Timer `metric:"request_latency" tags:"error=false"`
-
- // RequestLatencyFailures is a latency histogram of failed requests.
- RequestLatencyFailures metrics.Timer `metric:"request_latency" tags:"error=true"`
-
- // HTTPStatusCode2xx is a counter of the total number of requests with HTTP status code 200-299
- HTTPStatusCode2xx metrics.Counter `metric:"http_requests" tags:"status_code=2xx"`
-
- // HTTPStatusCode3xx is a counter of the total number of requests with HTTP status code 300-399
- HTTPStatusCode3xx metrics.Counter `metric:"http_requests" tags:"status_code=3xx"`
-
- // HTTPStatusCode4xx is a counter of the total number of requests with HTTP status code 400-499
- HTTPStatusCode4xx metrics.Counter `metric:"http_requests" tags:"status_code=4xx"`
-
- // HTTPStatusCode5xx is a counter of the total number of requests with HTTP status code 500-599
- HTTPStatusCode5xx metrics.Counter `metric:"http_requests" tags:"status_code=5xx"`
-}
-
-func (m *Metrics) recordHTTPStatusCode(statusCode uint16) {
- if statusCode >= 200 && statusCode < 300 {
- m.HTTPStatusCode2xx.Inc(1)
- } else if statusCode >= 300 && statusCode < 400 {
- m.HTTPStatusCode3xx.Inc(1)
- } else if statusCode >= 400 && statusCode < 500 {
- m.HTTPStatusCode4xx.Inc(1)
- } else if statusCode >= 500 && statusCode < 600 {
- m.HTTPStatusCode5xx.Inc(1)
- }
-}
-
-// MetricsByEndpoint is a registry/cache of metrics for each unique endpoint name.
-// Only maxNumberOfEndpoints Metrics are stored, all other endpoint names are mapped
-// to a generic endpoint name "other".
-type MetricsByEndpoint struct {
- metricsFactory metrics.Factory
- endpoints *normalizedEndpoints
- metricsByEndpoint map[string]*Metrics
- mux sync.RWMutex
-}
-
-func newMetricsByEndpoint(
- metricsFactory metrics.Factory,
- normalizer NameNormalizer,
- maxNumberOfEndpoints int,
-) *MetricsByEndpoint {
- return &MetricsByEndpoint{
- metricsFactory: metricsFactory,
- endpoints: newNormalizedEndpoints(maxNumberOfEndpoints, normalizer),
- metricsByEndpoint: make(map[string]*Metrics, maxNumberOfEndpoints+1), // +1 for "other"
- }
-}
-
-func (m *MetricsByEndpoint) get(endpoint string) *Metrics {
- safeName := m.endpoints.normalize(endpoint)
- if safeName == "" {
- safeName = otherEndpointsPlaceholder
- }
- m.mux.RLock()
- met := m.metricsByEndpoint[safeName]
- m.mux.RUnlock()
- if met != nil {
- return met
- }
-
- return m.getWithWriteLock(safeName)
-}
-
-// split to make easier to test
-func (m *MetricsByEndpoint) getWithWriteLock(safeName string) *Metrics {
- m.mux.Lock()
- defer m.mux.Unlock()
-
- // it is possible that the name has been already registered after we released
- // the read lock and before we grabbed the write lock, so check for that.
- if met, ok := m.metricsByEndpoint[safeName]; ok {
- return met
- }
-
- // it would be nice to create the struct before locking, since Init() is somewhat
- // expensive, however some metrics backends (e.g. expvar) may not like duplicate metrics.
- met := &Metrics{}
- tags := map[string]string{endpointNameMetricTag: safeName}
- metrics.Init(met, m.metricsFactory, tags)
-
- m.metricsByEndpoint[safeName] = met
- return met
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/normalizer.go b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/normalizer.go
deleted file mode 100644
index 148d84b3a..000000000
--- a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/normalizer.go
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rpcmetrics
-
-// NameNormalizer is used to convert the endpoint names to strings
-// that can be safely used as tags in the metrics.
-type NameNormalizer interface {
- Normalize(name string) string
-}
-
-// DefaultNameNormalizer converts endpoint names so that they contain only characters
-// from the safe charset [a-zA-Z0-9-./_]. All other characters are replaced with '-'.
-var DefaultNameNormalizer = &SimpleNameNormalizer{
- SafeSets: []SafeCharacterSet{
- &Range{From: 'a', To: 'z'},
- &Range{From: 'A', To: 'Z'},
- &Range{From: '0', To: '9'},
- &Char{'-'},
- &Char{'_'},
- &Char{'/'},
- &Char{'.'},
- },
- Replacement: '-',
-}
-
-// SimpleNameNormalizer uses a set of safe character sets.
-type SimpleNameNormalizer struct {
- SafeSets []SafeCharacterSet
- Replacement byte
-}
-
-// SafeCharacterSet determines if the given character is "safe"
-type SafeCharacterSet interface {
- IsSafe(c byte) bool
-}
-
-// Range implements SafeCharacterSet
-type Range struct {
- From, To byte
-}
-
-// IsSafe implements SafeCharacterSet
-func (r *Range) IsSafe(c byte) bool {
- return c >= r.From && c <= r.To
-}
-
-// Char implements SafeCharacterSet
-type Char struct {
- Val byte
-}
-
-// IsSafe implements SafeCharacterSet
-func (ch *Char) IsSafe(c byte) bool {
- return c == ch.Val
-}
-
-// Normalize checks each character in the string against SafeSets,
-// and if it's not safe substitutes it with Replacement.
-func (n *SimpleNameNormalizer) Normalize(name string) string {
- var retMe []byte
- nameBytes := []byte(name)
- for i, b := range nameBytes {
- if n.safeByte(b) {
- if retMe != nil {
- retMe[i] = b
- }
- } else {
- if retMe == nil {
- retMe = make([]byte, len(nameBytes))
- copy(retMe[0:i], nameBytes[0:i])
- }
- retMe[i] = n.Replacement
- }
- }
- if retMe == nil {
- return name
- }
- return string(retMe)
-}
-
-// safeByte checks if b against all safe charsets.
-func (n *SimpleNameNormalizer) safeByte(b byte) bool {
- for i := range n.SafeSets {
- if n.SafeSets[i].IsSafe(b) {
- return true
- }
- }
- return false
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/observer.go b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/observer.go
deleted file mode 100644
index eca5ff6f3..000000000
--- a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/observer.go
+++ /dev/null
@@ -1,171 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rpcmetrics
-
-import (
- "strconv"
- "sync"
- "time"
-
- "github.com/opentracing/opentracing-go"
- "github.com/opentracing/opentracing-go/ext"
- "github.com/uber/jaeger-lib/metrics"
-
- jaeger "github.com/uber/jaeger-client-go"
-)
-
-const defaultMaxNumberOfEndpoints = 200
-
-// Observer is an observer that can emit RPC metrics.
-type Observer struct {
- metricsByEndpoint *MetricsByEndpoint
-}
-
-// NewObserver creates a new observer that can emit RPC metrics.
-func NewObserver(metricsFactory metrics.Factory, normalizer NameNormalizer) *Observer {
- return &Observer{
- metricsByEndpoint: newMetricsByEndpoint(
- metricsFactory,
- normalizer,
- defaultMaxNumberOfEndpoints,
- ),
- }
-}
-
-// OnStartSpan creates a new Observer for the span.
-func (o *Observer) OnStartSpan(
- operationName string,
- options opentracing.StartSpanOptions,
-) jaeger.SpanObserver {
- return NewSpanObserver(o.metricsByEndpoint, operationName, options)
-}
-
-// SpanKind identifies the span as inboud, outbound, or internal
-type SpanKind int
-
-const (
- // Local span kind
- Local SpanKind = iota
- // Inbound span kind
- Inbound
- // Outbound span kind
- Outbound
-)
-
-// SpanObserver collects RPC metrics
-type SpanObserver struct {
- metricsByEndpoint *MetricsByEndpoint
- operationName string
- startTime time.Time
- mux sync.Mutex
- kind SpanKind
- httpStatusCode uint16
- err bool
-}
-
-// NewSpanObserver creates a new SpanObserver that can emit RPC metrics.
-func NewSpanObserver(
- metricsByEndpoint *MetricsByEndpoint,
- operationName string,
- options opentracing.StartSpanOptions,
-) *SpanObserver {
- so := &SpanObserver{
- metricsByEndpoint: metricsByEndpoint,
- operationName: operationName,
- startTime: options.StartTime,
- }
- for k, v := range options.Tags {
- so.handleTagInLock(k, v)
- }
- return so
-}
-
-// handleTags watches for special tags
-// - SpanKind
-// - HttpStatusCode
-// - Error
-func (so *SpanObserver) handleTagInLock(key string, value interface{}) {
- if key == string(ext.SpanKind) {
- if v, ok := value.(ext.SpanKindEnum); ok {
- value = string(v)
- }
- if v, ok := value.(string); ok {
- if v == string(ext.SpanKindRPCClientEnum) {
- so.kind = Outbound
- } else if v == string(ext.SpanKindRPCServerEnum) {
- so.kind = Inbound
- }
- }
- return
- }
- if key == string(ext.HTTPStatusCode) {
- if v, ok := value.(uint16); ok {
- so.httpStatusCode = v
- } else if v, ok := value.(int); ok {
- so.httpStatusCode = uint16(v)
- } else if v, ok := value.(string); ok {
- if vv, err := strconv.Atoi(v); err == nil {
- so.httpStatusCode = uint16(vv)
- }
- }
- return
- }
- if key == string(ext.Error) {
- if v, ok := value.(bool); ok {
- so.err = v
- } else if v, ok := value.(string); ok {
- if vv, err := strconv.ParseBool(v); err == nil {
- so.err = vv
- }
- }
- return
- }
-}
-
-// OnFinish emits the RPC metrics. It only has an effect when operation name
-// is not blank, and the span kind is an RPC server.
-func (so *SpanObserver) OnFinish(options opentracing.FinishOptions) {
- so.mux.Lock()
- defer so.mux.Unlock()
-
- if so.operationName == "" || so.kind != Inbound {
- return
- }
-
- mets := so.metricsByEndpoint.get(so.operationName)
- latency := options.FinishTime.Sub(so.startTime)
- if so.err {
- mets.RequestCountFailures.Inc(1)
- mets.RequestLatencyFailures.Record(latency)
- } else {
- mets.RequestCountSuccess.Inc(1)
- mets.RequestLatencySuccess.Record(latency)
- }
- mets.recordHTTPStatusCode(so.httpStatusCode)
-}
-
-// OnSetOperationName records new operation name.
-func (so *SpanObserver) OnSetOperationName(operationName string) {
- so.mux.Lock()
- so.operationName = operationName
- so.mux.Unlock()
-}
-
-// OnSetTag implements SpanObserver
-func (so *SpanObserver) OnSetTag(key string, value interface{}) {
- so.mux.Lock()
- so.handleTagInLock(key, value)
- so.mux.Unlock()
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/sampler.go b/vendor/github.com/uber/jaeger-client-go/sampler.go
deleted file mode 100644
index d0be8ad50..000000000
--- a/vendor/github.com/uber/jaeger-client-go/sampler.go
+++ /dev/null
@@ -1,516 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-import (
- "fmt"
- "math"
- "strings"
- "sync"
-
- "github.com/uber/jaeger-client-go/thrift-gen/sampling"
- "github.com/uber/jaeger-client-go/utils"
-)
-
-const (
- defaultMaxOperations = 2000
-)
-
-// Sampler decides whether a new trace should be sampled or not.
-type Sampler interface {
- // IsSampled decides whether a trace with given `id` and `operation`
- // should be sampled. This function will also return the tags that
- // can be used to identify the type of sampling that was applied to
- // the root span. Most simple samplers would return two tags,
- // sampler.type and sampler.param, similar to those used in the Configuration
- IsSampled(id TraceID, operation string) (sampled bool, tags []Tag)
-
- // Close does a clean shutdown of the sampler, stopping any background
- // go-routines it may have started.
- Close()
-
- // Equal checks if the `other` sampler is functionally equivalent
- // to this sampler.
- // TODO (breaking change) remove this function. See PerOperationSampler.Equals for explanation.
- Equal(other Sampler) bool
-}
-
-// -----------------------
-
-// ConstSampler is a sampler that always makes the same decision.
-type ConstSampler struct {
- legacySamplerV1Base
- Decision bool
- tags []Tag
-}
-
-// NewConstSampler creates a ConstSampler.
-func NewConstSampler(sample bool) *ConstSampler {
- tags := []Tag{
- {key: SamplerTypeTagKey, value: SamplerTypeConst},
- {key: SamplerParamTagKey, value: sample},
- }
- s := &ConstSampler{
- Decision: sample,
- tags: tags,
- }
- s.delegate = s.IsSampled
- return s
-}
-
-// IsSampled implements IsSampled() of Sampler.
-func (s *ConstSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
- return s.Decision, s.tags
-}
-
-// Close implements Close() of Sampler.
-func (s *ConstSampler) Close() {
- // nothing to do
-}
-
-// Equal implements Equal() of Sampler.
-func (s *ConstSampler) Equal(other Sampler) bool {
- if o, ok := other.(*ConstSampler); ok {
- return s.Decision == o.Decision
- }
- return false
-}
-
-// String is used to log sampler details.
-func (s *ConstSampler) String() string {
- return fmt.Sprintf("ConstSampler(decision=%t)", s.Decision)
-}
-
-// -----------------------
-
-// ProbabilisticSampler is a sampler that randomly samples a certain percentage
-// of traces.
-type ProbabilisticSampler struct {
- legacySamplerV1Base
- samplingRate float64
- samplingBoundary uint64
- tags []Tag
-}
-
-const maxRandomNumber = ^(uint64(1) << 63) // i.e. 0x7fffffffffffffff
-
-// NewProbabilisticSampler creates a sampler that randomly samples a certain percentage of traces specified by the
-// samplingRate, in the range between 0.0 and 1.0.
-//
-// It relies on the fact that new trace IDs are 63bit random numbers themselves, thus making the sampling decision
-// without generating a new random number, but simply calculating if traceID < (samplingRate * 2^63).
-// TODO remove the error from this function for next major release
-func NewProbabilisticSampler(samplingRate float64) (*ProbabilisticSampler, error) {
- if samplingRate < 0.0 || samplingRate > 1.0 {
- return nil, fmt.Errorf("Sampling Rate must be between 0.0 and 1.0, received %f", samplingRate)
- }
- return newProbabilisticSampler(samplingRate), nil
-}
-
-func newProbabilisticSampler(samplingRate float64) *ProbabilisticSampler {
- s := new(ProbabilisticSampler)
- s.delegate = s.IsSampled
- return s.init(samplingRate)
-}
-
-func (s *ProbabilisticSampler) init(samplingRate float64) *ProbabilisticSampler {
- s.samplingRate = math.Max(0.0, math.Min(samplingRate, 1.0))
- s.samplingBoundary = uint64(float64(maxRandomNumber) * s.samplingRate)
- s.tags = []Tag{
- {key: SamplerTypeTagKey, value: SamplerTypeProbabilistic},
- {key: SamplerParamTagKey, value: s.samplingRate},
- }
- return s
-}
-
-// SamplingRate returns the sampling probability this sampled was constructed with.
-func (s *ProbabilisticSampler) SamplingRate() float64 {
- return s.samplingRate
-}
-
-// IsSampled implements IsSampled() of Sampler.
-func (s *ProbabilisticSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
- return s.samplingBoundary >= id.Low&maxRandomNumber, s.tags
-}
-
-// Close implements Close() of Sampler.
-func (s *ProbabilisticSampler) Close() {
- // nothing to do
-}
-
-// Equal implements Equal() of Sampler.
-func (s *ProbabilisticSampler) Equal(other Sampler) bool {
- if o, ok := other.(*ProbabilisticSampler); ok {
- return s.samplingBoundary == o.samplingBoundary
- }
- return false
-}
-
-// Update modifies in-place the sampling rate. Locking must be done externally.
-func (s *ProbabilisticSampler) Update(samplingRate float64) error {
- if samplingRate < 0.0 || samplingRate > 1.0 {
- return fmt.Errorf("Sampling Rate must be between 0.0 and 1.0, received %f", samplingRate)
- }
- s.init(samplingRate)
- return nil
-}
-
-// String is used to log sampler details.
-func (s *ProbabilisticSampler) String() string {
- return fmt.Sprintf("ProbabilisticSampler(samplingRate=%v)", s.samplingRate)
-}
-
-// -----------------------
-
-// RateLimitingSampler samples at most maxTracesPerSecond. The distribution of sampled traces follows
-// burstiness of the service, i.e. a service with uniformly distributed requests will have those
-// requests sampled uniformly as well, but if requests are bursty, especially sub-second, then a
-// number of sequential requests can be sampled each second.
-type RateLimitingSampler struct {
- legacySamplerV1Base
- maxTracesPerSecond float64
- rateLimiter *utils.ReconfigurableRateLimiter
- tags []Tag
-}
-
-// NewRateLimitingSampler creates new RateLimitingSampler.
-func NewRateLimitingSampler(maxTracesPerSecond float64) *RateLimitingSampler {
- s := new(RateLimitingSampler)
- s.delegate = s.IsSampled
- return s.init(maxTracesPerSecond)
-}
-
-func (s *RateLimitingSampler) init(maxTracesPerSecond float64) *RateLimitingSampler {
- if s.rateLimiter == nil {
- s.rateLimiter = utils.NewRateLimiter(maxTracesPerSecond, math.Max(maxTracesPerSecond, 1.0))
- } else {
- s.rateLimiter.Update(maxTracesPerSecond, math.Max(maxTracesPerSecond, 1.0))
- }
- s.maxTracesPerSecond = maxTracesPerSecond
- s.tags = []Tag{
- {key: SamplerTypeTagKey, value: SamplerTypeRateLimiting},
- {key: SamplerParamTagKey, value: maxTracesPerSecond},
- }
- return s
-}
-
-// IsSampled implements IsSampled() of Sampler.
-func (s *RateLimitingSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
- return s.rateLimiter.CheckCredit(1.0), s.tags
-}
-
-// Update reconfigures the rate limiter, while preserving its accumulated balance.
-// Locking must be done externally.
-func (s *RateLimitingSampler) Update(maxTracesPerSecond float64) {
- if s.maxTracesPerSecond != maxTracesPerSecond {
- s.init(maxTracesPerSecond)
- }
-}
-
-// Close does nothing.
-func (s *RateLimitingSampler) Close() {
- // nothing to do
-}
-
-// Equal compares with another sampler.
-func (s *RateLimitingSampler) Equal(other Sampler) bool {
- if o, ok := other.(*RateLimitingSampler); ok {
- return s.maxTracesPerSecond == o.maxTracesPerSecond
- }
- return false
-}
-
-// String is used to log sampler details.
-func (s *RateLimitingSampler) String() string {
- return fmt.Sprintf("RateLimitingSampler(maxTracesPerSecond=%v)", s.maxTracesPerSecond)
-}
-
-// -----------------------
-
-// GuaranteedThroughputProbabilisticSampler is a sampler that leverages both ProbabilisticSampler and
-// RateLimitingSampler. The RateLimitingSampler is used as a guaranteed lower bound sampler such that
-// every operation is sampled at least once in a time interval defined by the lowerBound. ie a lowerBound
-// of 1.0 / (60 * 10) will sample an operation at least once every 10 minutes.
-//
-// The ProbabilisticSampler is given higher priority when tags are emitted, ie. if IsSampled() for both
-// samplers return true, the tags for ProbabilisticSampler will be used.
-type GuaranteedThroughputProbabilisticSampler struct {
- probabilisticSampler *ProbabilisticSampler
- lowerBoundSampler *RateLimitingSampler
- tags []Tag
- samplingRate float64
- lowerBound float64
-}
-
-// NewGuaranteedThroughputProbabilisticSampler returns a delegating sampler that applies both
-// ProbabilisticSampler and RateLimitingSampler.
-func NewGuaranteedThroughputProbabilisticSampler(
- lowerBound, samplingRate float64,
-) (*GuaranteedThroughputProbabilisticSampler, error) {
- return newGuaranteedThroughputProbabilisticSampler(lowerBound, samplingRate), nil
-}
-
-func newGuaranteedThroughputProbabilisticSampler(lowerBound, samplingRate float64) *GuaranteedThroughputProbabilisticSampler {
- s := &GuaranteedThroughputProbabilisticSampler{
- lowerBoundSampler: NewRateLimitingSampler(lowerBound),
- lowerBound: lowerBound,
- }
- s.setProbabilisticSampler(samplingRate)
- return s
-}
-
-func (s *GuaranteedThroughputProbabilisticSampler) setProbabilisticSampler(samplingRate float64) {
- if s.probabilisticSampler == nil {
- s.probabilisticSampler = newProbabilisticSampler(samplingRate)
- } else if s.samplingRate != samplingRate {
- s.probabilisticSampler.init(samplingRate)
- }
- // since we don't validate samplingRate, sampler may have clamped it to [0, 1] interval
- samplingRate = s.probabilisticSampler.SamplingRate()
- if s.samplingRate != samplingRate || s.tags == nil {
- s.samplingRate = s.probabilisticSampler.SamplingRate()
- s.tags = []Tag{
- {key: SamplerTypeTagKey, value: SamplerTypeLowerBound},
- {key: SamplerParamTagKey, value: s.samplingRate},
- }
- }
-}
-
-// IsSampled implements IsSampled() of Sampler.
-func (s *GuaranteedThroughputProbabilisticSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
- if sampled, tags := s.probabilisticSampler.IsSampled(id, operation); sampled {
- s.lowerBoundSampler.IsSampled(id, operation)
- return true, tags
- }
- sampled, _ := s.lowerBoundSampler.IsSampled(id, operation)
- return sampled, s.tags
-}
-
-// Close implements Close() of Sampler.
-func (s *GuaranteedThroughputProbabilisticSampler) Close() {
- s.probabilisticSampler.Close()
- s.lowerBoundSampler.Close()
-}
-
-// Equal implements Equal() of Sampler.
-func (s *GuaranteedThroughputProbabilisticSampler) Equal(other Sampler) bool {
- // NB The Equal() function is expensive and will be removed. See PerOperationSampler.Equal() for
- // more information.
- return false
-}
-
-// this function should only be called while holding a Write lock
-func (s *GuaranteedThroughputProbabilisticSampler) update(lowerBound, samplingRate float64) {
- s.setProbabilisticSampler(samplingRate)
- if s.lowerBound != lowerBound {
- s.lowerBoundSampler.Update(lowerBound)
- s.lowerBound = lowerBound
- }
-}
-
-func (s GuaranteedThroughputProbabilisticSampler) String() string {
- return fmt.Sprintf("GuaranteedThroughputProbabilisticSampler(lowerBound=%f, samplingRate=%f)", s.lowerBound, s.samplingRate)
-}
-
-// -----------------------
-
-// PerOperationSampler is a delegating sampler that applies GuaranteedThroughputProbabilisticSampler
-// on a per-operation basis.
-type PerOperationSampler struct {
- sync.RWMutex
-
- samplers map[string]*GuaranteedThroughputProbabilisticSampler
- defaultSampler *ProbabilisticSampler
- lowerBound float64
- maxOperations int
-
- // see description in PerOperationSamplerParams
- operationNameLateBinding bool
-}
-
-// NewAdaptiveSampler returns a new PerOperationSampler.
-// Deprecated: please use NewPerOperationSampler.
-func NewAdaptiveSampler(strategies *sampling.PerOperationSamplingStrategies, maxOperations int) (*PerOperationSampler, error) {
- return NewPerOperationSampler(PerOperationSamplerParams{
- MaxOperations: maxOperations,
- Strategies: strategies,
- }), nil
-}
-
-// PerOperationSamplerParams defines parameters when creating PerOperationSampler.
-type PerOperationSamplerParams struct {
- // Max number of operations that will be tracked. Other operations will be given default strategy.
- MaxOperations int
-
- // Opt-in feature for applications that require late binding of span name via explicit call to SetOperationName.
- // When this feature is enabled, the sampler will return retryable=true from OnCreateSpan(), thus leaving
- // the sampling decision as non-final (and the span as writeable). This may lead to degraded performance
- // in applications that always provide the correct span name on trace creation.
- //
- // For backwards compatibility this option is off by default.
- OperationNameLateBinding bool
-
- // Initial configuration of the sampling strategies (usually retrieved from the backend by Remote Sampler).
- Strategies *sampling.PerOperationSamplingStrategies
-}
-
-// NewPerOperationSampler returns a new PerOperationSampler.
-func NewPerOperationSampler(params PerOperationSamplerParams) *PerOperationSampler {
- if params.MaxOperations <= 0 {
- params.MaxOperations = defaultMaxOperations
- }
- samplers := make(map[string]*GuaranteedThroughputProbabilisticSampler)
- for _, strategy := range params.Strategies.PerOperationStrategies {
- sampler := newGuaranteedThroughputProbabilisticSampler(
- params.Strategies.DefaultLowerBoundTracesPerSecond,
- strategy.ProbabilisticSampling.SamplingRate,
- )
- samplers[strategy.Operation] = sampler
- }
- return &PerOperationSampler{
- samplers: samplers,
- defaultSampler: newProbabilisticSampler(params.Strategies.DefaultSamplingProbability),
- lowerBound: params.Strategies.DefaultLowerBoundTracesPerSecond,
- maxOperations: params.MaxOperations,
- operationNameLateBinding: params.OperationNameLateBinding,
- }
-}
-
-// IsSampled is not used and only exists to match Sampler V1 API.
-// TODO (breaking change) remove when upgrading everything to SamplerV2
-func (s *PerOperationSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
- return false, nil
-}
-
-func (s *PerOperationSampler) trySampling(span *Span, operationName string) (bool, []Tag) {
- samplerV1 := s.getSamplerForOperation(operationName)
- var sampled bool
- var tags []Tag
- if span.context.samplingState.isLocalRootSpan(span.context.spanID) {
- sampled, tags = samplerV1.IsSampled(span.context.TraceID(), operationName)
- }
- return sampled, tags
-}
-
-// OnCreateSpan implements OnCreateSpan of SamplerV2.
-func (s *PerOperationSampler) OnCreateSpan(span *Span) SamplingDecision {
- sampled, tags := s.trySampling(span, span.OperationName())
- return SamplingDecision{Sample: sampled, Retryable: s.operationNameLateBinding, Tags: tags}
-}
-
-// OnSetOperationName implements OnSetOperationName of SamplerV2.
-func (s *PerOperationSampler) OnSetOperationName(span *Span, operationName string) SamplingDecision {
- sampled, tags := s.trySampling(span, operationName)
- return SamplingDecision{Sample: sampled, Retryable: false, Tags: tags}
-}
-
-// OnSetTag implements OnSetTag of SamplerV2.
-func (s *PerOperationSampler) OnSetTag(span *Span, key string, value interface{}) SamplingDecision {
- return SamplingDecision{Sample: false, Retryable: true}
-}
-
-// OnFinishSpan implements OnFinishSpan of SamplerV2.
-func (s *PerOperationSampler) OnFinishSpan(span *Span) SamplingDecision {
- return SamplingDecision{Sample: false, Retryable: true}
-}
-
-func (s *PerOperationSampler) getSamplerForOperation(operation string) Sampler {
- s.RLock()
- sampler, ok := s.samplers[operation]
- if ok {
- defer s.RUnlock()
- return sampler
- }
- s.RUnlock()
- s.Lock()
- defer s.Unlock()
-
- // Check if sampler has already been created
- sampler, ok = s.samplers[operation]
- if ok {
- return sampler
- }
- // Store only up to maxOperations of unique ops.
- if len(s.samplers) >= s.maxOperations {
- return s.defaultSampler
- }
- newSampler := newGuaranteedThroughputProbabilisticSampler(s.lowerBound, s.defaultSampler.SamplingRate())
- s.samplers[operation] = newSampler
- return newSampler
-}
-
-// Close invokes Close on all underlying samplers.
-func (s *PerOperationSampler) Close() {
- s.Lock()
- defer s.Unlock()
- for _, sampler := range s.samplers {
- sampler.Close()
- }
- s.defaultSampler.Close()
-}
-
-func (s *PerOperationSampler) String() string {
- var sb strings.Builder
-
- fmt.Fprintf(&sb, "PerOperationSampler(defaultSampler=%v, ", s.defaultSampler)
- fmt.Fprintf(&sb, "lowerBound=%f, ", s.lowerBound)
- fmt.Fprintf(&sb, "maxOperations=%d, ", s.maxOperations)
- fmt.Fprintf(&sb, "operationNameLateBinding=%t, ", s.operationNameLateBinding)
- fmt.Fprintf(&sb, "numOperations=%d,\n", len(s.samplers))
- fmt.Fprintf(&sb, "samplers=[")
- for operationName, sampler := range s.samplers {
- fmt.Fprintf(&sb, "\n(operationName=%s, sampler=%v)", operationName, sampler)
- }
- fmt.Fprintf(&sb, "])")
-
- return sb.String()
-}
-
-// Equal is not used.
-// TODO (breaking change) remove this in the future
-func (s *PerOperationSampler) Equal(other Sampler) bool {
- // NB The Equal() function is overly expensive for PerOperationSampler since it's composed of multiple
- // samplers which all need to be initialized before this function can be called for a comparison.
- // Therefore, PerOperationSampler uses the update() function to only alter the samplers that need
- // changing. Hence this function always returns false so that the update function can be called.
- // Once the Equal() function is removed from the Sampler API, this will no longer be needed.
- return false
-}
-
-func (s *PerOperationSampler) update(strategies *sampling.PerOperationSamplingStrategies) {
- s.Lock()
- defer s.Unlock()
- newSamplers := map[string]*GuaranteedThroughputProbabilisticSampler{}
- for _, strategy := range strategies.PerOperationStrategies {
- operation := strategy.Operation
- samplingRate := strategy.ProbabilisticSampling.SamplingRate
- lowerBound := strategies.DefaultLowerBoundTracesPerSecond
- if sampler, ok := s.samplers[operation]; ok {
- sampler.update(lowerBound, samplingRate)
- newSamplers[operation] = sampler
- } else {
- sampler := newGuaranteedThroughputProbabilisticSampler(
- lowerBound,
- samplingRate,
- )
- newSamplers[operation] = sampler
- }
- }
- s.lowerBound = strategies.DefaultLowerBoundTracesPerSecond
- if s.defaultSampler.SamplingRate() != strategies.DefaultSamplingProbability {
- s.defaultSampler = newProbabilisticSampler(strategies.DefaultSamplingProbability)
- }
- s.samplers = newSamplers
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/sampler_remote.go b/vendor/github.com/uber/jaeger-client-go/sampler_remote.go
deleted file mode 100644
index f2edd5ca9..000000000
--- a/vendor/github.com/uber/jaeger-client-go/sampler_remote.go
+++ /dev/null
@@ -1,337 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-import (
- "encoding/json"
- "fmt"
- "io/ioutil"
- "net/http"
- "net/url"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/uber/jaeger-client-go/log"
- "github.com/uber/jaeger-client-go/thrift-gen/sampling"
-)
-
-const (
- defaultSamplingRefreshInterval = time.Minute
-)
-
-// SamplingStrategyFetcher is used to fetch sampling strategy updates from remote server.
-type SamplingStrategyFetcher interface {
- Fetch(service string) ([]byte, error)
-}
-
-// SamplingStrategyParser is used to parse sampling strategy updates. The output object
-// should be of the type that is recognized by the SamplerUpdaters.
-type SamplingStrategyParser interface {
- Parse(response []byte) (interface{}, error)
-}
-
-// SamplerUpdater is used by RemotelyControlledSampler to apply sampling strategies,
-// retrieved from remote config server, to the current sampler. The updater can modify
-// the sampler in-place if sampler supports it, or create a new one.
-//
-// If the strategy does not contain configuration for the sampler in question,
-// updater must return modifiedSampler=nil to give other updaters a chance to inspect
-// the sampling strategy response.
-//
-// RemotelyControlledSampler invokes the updaters while holding a lock on the main sampler.
-type SamplerUpdater interface {
- Update(sampler SamplerV2, strategy interface{}) (modified SamplerV2, err error)
-}
-
-// RemotelyControlledSampler is a delegating sampler that polls a remote server
-// for the appropriate sampling strategy, constructs a corresponding sampler and
-// delegates to it for sampling decisions.
-type RemotelyControlledSampler struct {
- // These fields must be first in the struct because `sync/atomic` expects 64-bit alignment.
- // Cf. https://github.com/uber/jaeger-client-go/issues/155, https://goo.gl/zW7dgq
- closed int64 // 0 - not closed, 1 - closed
-
- sync.RWMutex // used to serialize access to samplerOptions.sampler
- samplerOptions
-
- serviceName string
- doneChan chan *sync.WaitGroup
-}
-
-// NewRemotelyControlledSampler creates a sampler that periodically pulls
-// the sampling strategy from an HTTP sampling server (e.g. jaeger-agent).
-func NewRemotelyControlledSampler(
- serviceName string,
- opts ...SamplerOption,
-) *RemotelyControlledSampler {
- options := new(samplerOptions).applyOptionsAndDefaults(opts...)
- sampler := &RemotelyControlledSampler{
- samplerOptions: *options,
- serviceName: serviceName,
- doneChan: make(chan *sync.WaitGroup),
- }
- go sampler.pollController()
- return sampler
-}
-
-// IsSampled implements IsSampled() of Sampler.
-// TODO (breaking change) remove when Sampler V1 is removed
-func (s *RemotelyControlledSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
- return false, nil
-}
-
-// OnCreateSpan implements OnCreateSpan of SamplerV2.
-func (s *RemotelyControlledSampler) OnCreateSpan(span *Span) SamplingDecision {
- return s.Sampler().OnCreateSpan(span)
-}
-
-// OnSetOperationName implements OnSetOperationName of SamplerV2.
-func (s *RemotelyControlledSampler) OnSetOperationName(span *Span, operationName string) SamplingDecision {
- return s.Sampler().OnSetOperationName(span, operationName)
-}
-
-// OnSetTag implements OnSetTag of SamplerV2.
-func (s *RemotelyControlledSampler) OnSetTag(span *Span, key string, value interface{}) SamplingDecision {
- return s.Sampler().OnSetTag(span, key, value)
-}
-
-// OnFinishSpan implements OnFinishSpan of SamplerV2.
-func (s *RemotelyControlledSampler) OnFinishSpan(span *Span) SamplingDecision {
- return s.Sampler().OnFinishSpan(span)
-}
-
-// Close implements Close() of Sampler.
-func (s *RemotelyControlledSampler) Close() {
- if swapped := atomic.CompareAndSwapInt64(&s.closed, 0, 1); !swapped {
- s.logger.Error("Repeated attempt to close the sampler is ignored")
- return
- }
-
- var wg sync.WaitGroup
- wg.Add(1)
- s.doneChan <- &wg
- wg.Wait()
-}
-
-// Equal implements Equal() of Sampler.
-func (s *RemotelyControlledSampler) Equal(other Sampler) bool {
- // NB The Equal() function is expensive and will be removed. See PerOperationSampler.Equal() for
- // more information.
- return false
-}
-
-func (s *RemotelyControlledSampler) pollController() {
- ticker := time.NewTicker(s.samplingRefreshInterval)
- defer ticker.Stop()
- s.pollControllerWithTicker(ticker)
-}
-
-func (s *RemotelyControlledSampler) pollControllerWithTicker(ticker *time.Ticker) {
- for {
- select {
- case <-ticker.C:
- s.UpdateSampler()
- case wg := <-s.doneChan:
- wg.Done()
- return
- }
- }
-}
-
-// Sampler returns the currently active sampler.
-func (s *RemotelyControlledSampler) Sampler() SamplerV2 {
- s.RLock()
- defer s.RUnlock()
- return s.sampler
-}
-
-func (s *RemotelyControlledSampler) setSampler(sampler SamplerV2) {
- s.Lock()
- defer s.Unlock()
- s.sampler = sampler
-}
-
-// UpdateSampler forces the sampler to fetch sampling strategy from backend server.
-// This function is called automatically on a timer, but can also be safely called manually, e.g. from tests.
-func (s *RemotelyControlledSampler) UpdateSampler() {
- res, err := s.samplingFetcher.Fetch(s.serviceName)
- if err != nil {
- s.metrics.SamplerQueryFailure.Inc(1)
- s.logger.Infof("failed to fetch sampling strategy: %v", err)
- return
- }
- strategy, err := s.samplingParser.Parse(res)
- if err != nil {
- s.metrics.SamplerUpdateFailure.Inc(1)
- s.logger.Infof("failed to parse sampling strategy response: %v", err)
- return
- }
-
- s.Lock()
- defer s.Unlock()
-
- s.metrics.SamplerRetrieved.Inc(1)
- if err := s.updateSamplerViaUpdaters(strategy); err != nil {
- s.metrics.SamplerUpdateFailure.Inc(1)
- s.logger.Infof("failed to handle sampling strategy response %+v. Got error: %v", res, err)
- return
- }
- s.metrics.SamplerUpdated.Inc(1)
-}
-
-// NB: this function should only be called while holding a Write lock
-func (s *RemotelyControlledSampler) updateSamplerViaUpdaters(strategy interface{}) error {
- for _, updater := range s.updaters {
- sampler, err := updater.Update(s.sampler, strategy)
- if err != nil {
- return err
- }
- if sampler != nil {
- s.logger.Debugf("sampler updated: %+v", sampler)
- s.sampler = sampler
- return nil
- }
- }
- return fmt.Errorf("unsupported sampling strategy %+v", strategy)
-}
-
-// -----------------------
-
-// ProbabilisticSamplerUpdater is used by RemotelyControlledSampler to parse sampling configuration.
-type ProbabilisticSamplerUpdater struct{}
-
-// Update implements Update of SamplerUpdater.
-func (u *ProbabilisticSamplerUpdater) Update(sampler SamplerV2, strategy interface{}) (SamplerV2, error) {
- type response interface {
- GetProbabilisticSampling() *sampling.ProbabilisticSamplingStrategy
- }
- var _ response = new(sampling.SamplingStrategyResponse) // sanity signature check
- if resp, ok := strategy.(response); ok {
- if probabilistic := resp.GetProbabilisticSampling(); probabilistic != nil {
- if ps, ok := sampler.(*ProbabilisticSampler); ok {
- if err := ps.Update(probabilistic.SamplingRate); err != nil {
- return nil, err
- }
- return sampler, nil
- }
- return newProbabilisticSampler(probabilistic.SamplingRate), nil
- }
- }
- return nil, nil
-}
-
-// -----------------------
-
-// RateLimitingSamplerUpdater is used by RemotelyControlledSampler to parse sampling configuration.
-type RateLimitingSamplerUpdater struct{}
-
-// Update implements Update of SamplerUpdater.
-func (u *RateLimitingSamplerUpdater) Update(sampler SamplerV2, strategy interface{}) (SamplerV2, error) {
- type response interface {
- GetRateLimitingSampling() *sampling.RateLimitingSamplingStrategy
- }
- var _ response = new(sampling.SamplingStrategyResponse) // sanity signature check
- if resp, ok := strategy.(response); ok {
- if rateLimiting := resp.GetRateLimitingSampling(); rateLimiting != nil {
- rateLimit := float64(rateLimiting.MaxTracesPerSecond)
- if rl, ok := sampler.(*RateLimitingSampler); ok {
- rl.Update(rateLimit)
- return rl, nil
- }
- return NewRateLimitingSampler(rateLimit), nil
- }
- }
- return nil, nil
-}
-
-// -----------------------
-
-// AdaptiveSamplerUpdater is used by RemotelyControlledSampler to parse sampling configuration.
-// Fields have the same meaning as in PerOperationSamplerParams.
-type AdaptiveSamplerUpdater struct {
- MaxOperations int
- OperationNameLateBinding bool
-}
-
-// Update implements Update of SamplerUpdater.
-func (u *AdaptiveSamplerUpdater) Update(sampler SamplerV2, strategy interface{}) (SamplerV2, error) {
- type response interface {
- GetOperationSampling() *sampling.PerOperationSamplingStrategies
- }
- var _ response = new(sampling.SamplingStrategyResponse) // sanity signature check
- if p, ok := strategy.(response); ok {
- if operations := p.GetOperationSampling(); operations != nil {
- if as, ok := sampler.(*PerOperationSampler); ok {
- as.update(operations)
- return as, nil
- }
- return NewPerOperationSampler(PerOperationSamplerParams{
- MaxOperations: u.MaxOperations,
- OperationNameLateBinding: u.OperationNameLateBinding,
- Strategies: operations,
- }), nil
- }
- }
- return nil, nil
-}
-
-// -----------------------
-
-type httpSamplingStrategyFetcher struct {
- serverURL string
- logger log.DebugLogger
-}
-
-func (f *httpSamplingStrategyFetcher) Fetch(serviceName string) ([]byte, error) {
- v := url.Values{}
- v.Set("service", serviceName)
- uri := f.serverURL + "?" + v.Encode()
-
- // TODO create and reuse http.Client with proper timeout settings, etc.
- resp, err := http.Get(uri)
- if err != nil {
- return nil, err
- }
-
- defer func() {
- if err := resp.Body.Close(); err != nil {
- f.logger.Error(fmt.Sprintf("failed to close HTTP response body: %+v", err))
- }
- }()
-
- body, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- return nil, err
- }
-
- if resp.StatusCode >= 400 {
- return nil, fmt.Errorf("StatusCode: %d, Body: %s", resp.StatusCode, body)
- }
-
- return body, nil
-}
-
-// -----------------------
-
-type samplingStrategyParser struct{}
-
-func (p *samplingStrategyParser) Parse(response []byte) (interface{}, error) {
- strategy := new(sampling.SamplingStrategyResponse)
- if err := json.Unmarshal(response, strategy); err != nil {
- return nil, err
- }
- return strategy, nil
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/sampler_remote_options.go b/vendor/github.com/uber/jaeger-client-go/sampler_remote_options.go
deleted file mode 100644
index e4a6108b7..000000000
--- a/vendor/github.com/uber/jaeger-client-go/sampler_remote_options.go
+++ /dev/null
@@ -1,162 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-import (
- "time"
-
- "github.com/uber/jaeger-client-go/log"
-)
-
-// SamplerOption is a function that sets some option on the sampler
-type SamplerOption func(options *samplerOptions)
-
-// SamplerOptions is a factory for all available SamplerOption's.
-var SamplerOptions SamplerOptionsFactory
-
-// SamplerOptionsFactory is a factory for all available SamplerOption's.
-// The type acts as a namespace for factory functions. It is public to
-// make the functions discoverable via godoc. Recommended to be used
-// via global SamplerOptions variable.
-type SamplerOptionsFactory struct{}
-
-type samplerOptions struct {
- metrics *Metrics
- sampler SamplerV2
- logger log.DebugLogger
- samplingServerURL string
- samplingRefreshInterval time.Duration
- samplingFetcher SamplingStrategyFetcher
- samplingParser SamplingStrategyParser
- updaters []SamplerUpdater
- posParams PerOperationSamplerParams
-}
-
-// Metrics creates a SamplerOption that initializes Metrics on the sampler,
-// which is used to emit statistics.
-func (SamplerOptionsFactory) Metrics(m *Metrics) SamplerOption {
- return func(o *samplerOptions) {
- o.metrics = m
- }
-}
-
-// MaxOperations creates a SamplerOption that sets the maximum number of
-// operations the sampler will keep track of.
-func (SamplerOptionsFactory) MaxOperations(maxOperations int) SamplerOption {
- return func(o *samplerOptions) {
- o.posParams.MaxOperations = maxOperations
- }
-}
-
-// OperationNameLateBinding creates a SamplerOption that sets the respective
-// field in the PerOperationSamplerParams.
-func (SamplerOptionsFactory) OperationNameLateBinding(enable bool) SamplerOption {
- return func(o *samplerOptions) {
- o.posParams.OperationNameLateBinding = enable
- }
-}
-
-// InitialSampler creates a SamplerOption that sets the initial sampler
-// to use before a remote sampler is created and used.
-func (SamplerOptionsFactory) InitialSampler(sampler Sampler) SamplerOption {
- return func(o *samplerOptions) {
- o.sampler = samplerV1toV2(sampler)
- }
-}
-
-// Logger creates a SamplerOption that sets the logger used by the sampler.
-func (SamplerOptionsFactory) Logger(logger Logger) SamplerOption {
- return func(o *samplerOptions) {
- o.logger = log.DebugLogAdapter(logger)
- }
-}
-
-// SamplingServerURL creates a SamplerOption that sets the sampling server url
-// of the local agent that contains the sampling strategies.
-func (SamplerOptionsFactory) SamplingServerURL(samplingServerURL string) SamplerOption {
- return func(o *samplerOptions) {
- o.samplingServerURL = samplingServerURL
- }
-}
-
-// SamplingRefreshInterval creates a SamplerOption that sets how often the
-// sampler will poll local agent for the appropriate sampling strategy.
-func (SamplerOptionsFactory) SamplingRefreshInterval(samplingRefreshInterval time.Duration) SamplerOption {
- return func(o *samplerOptions) {
- o.samplingRefreshInterval = samplingRefreshInterval
- }
-}
-
-// SamplingStrategyFetcher creates a SamplerOption that initializes sampling strategy fetcher.
-func (SamplerOptionsFactory) SamplingStrategyFetcher(fetcher SamplingStrategyFetcher) SamplerOption {
- return func(o *samplerOptions) {
- o.samplingFetcher = fetcher
- }
-}
-
-// SamplingStrategyParser creates a SamplerOption that initializes sampling strategy parser.
-func (SamplerOptionsFactory) SamplingStrategyParser(parser SamplingStrategyParser) SamplerOption {
- return func(o *samplerOptions) {
- o.samplingParser = parser
- }
-}
-
-// Updaters creates a SamplerOption that initializes sampler updaters.
-func (SamplerOptionsFactory) Updaters(updaters ...SamplerUpdater) SamplerOption {
- return func(o *samplerOptions) {
- o.updaters = updaters
- }
-}
-
-func (o *samplerOptions) applyOptionsAndDefaults(opts ...SamplerOption) *samplerOptions {
- for _, option := range opts {
- option(o)
- }
- if o.sampler == nil {
- o.sampler = newProbabilisticSampler(0.001)
- }
- if o.logger == nil {
- o.logger = log.NullLogger
- }
- if o.samplingServerURL == "" {
- o.samplingServerURL = DefaultSamplingServerURL
- }
- if o.metrics == nil {
- o.metrics = NewNullMetrics()
- }
- if o.samplingRefreshInterval <= 0 {
- o.samplingRefreshInterval = defaultSamplingRefreshInterval
- }
- if o.samplingFetcher == nil {
- o.samplingFetcher = &httpSamplingStrategyFetcher{
- serverURL: o.samplingServerURL,
- logger: o.logger,
- }
- }
- if o.samplingParser == nil {
- o.samplingParser = new(samplingStrategyParser)
- }
- if o.updaters == nil {
- o.updaters = []SamplerUpdater{
- &AdaptiveSamplerUpdater{
- MaxOperations: o.posParams.MaxOperations,
- OperationNameLateBinding: o.posParams.OperationNameLateBinding,
- },
- new(ProbabilisticSamplerUpdater),
- new(RateLimitingSamplerUpdater),
- }
- }
- return o
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/sampler_v2.go b/vendor/github.com/uber/jaeger-client-go/sampler_v2.go
deleted file mode 100644
index a50671a23..000000000
--- a/vendor/github.com/uber/jaeger-client-go/sampler_v2.go
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright (c) 2019 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-// SamplingDecision is returned by the V2 samplers.
-type SamplingDecision struct {
- Sample bool
- Retryable bool
- Tags []Tag
-}
-
-// SamplerV2 is an extension of the V1 samplers that allows sampling decisions
-// be made at different points of the span lifecycle.
-type SamplerV2 interface {
- OnCreateSpan(span *Span) SamplingDecision
- OnSetOperationName(span *Span, operationName string) SamplingDecision
- OnSetTag(span *Span, key string, value interface{}) SamplingDecision
- OnFinishSpan(span *Span) SamplingDecision
-
- // Close does a clean shutdown of the sampler, stopping any background
- // go-routines it may have started.
- Close()
-}
-
-// samplerV1toV2 wraps legacy V1 sampler into an adapter that make it look like V2.
-func samplerV1toV2(s Sampler) SamplerV2 {
- if s2, ok := s.(SamplerV2); ok {
- return s2
- }
- type legacySamplerV1toV2Adapter struct {
- legacySamplerV1Base
- }
- return &legacySamplerV1toV2Adapter{
- legacySamplerV1Base: legacySamplerV1Base{
- delegate: s.IsSampled,
- },
- }
-}
-
-// SamplerV2Base can be used by V2 samplers to implement dummy V1 methods.
-// Supporting V1 API is required because Tracer configuration only accepts V1 Sampler
-// for backwards compatibility reasons.
-// TODO (breaking change) remove this in the next major release
-type SamplerV2Base struct{}
-
-// IsSampled implements IsSampled of Sampler.
-func (SamplerV2Base) IsSampled(id TraceID, operation string) (sampled bool, tags []Tag) {
- return false, nil
-}
-
-// Close implements Close of Sampler.
-func (SamplerV2Base) Close() {}
-
-// Equal implements Equal of Sampler.
-func (SamplerV2Base) Equal(other Sampler) bool { return false }
-
-// legacySamplerV1Base is used as a base for simple samplers that only implement
-// the legacy isSampled() function that is not sensitive to its arguments.
-type legacySamplerV1Base struct {
- delegate func(id TraceID, operation string) (sampled bool, tags []Tag)
-}
-
-func (s *legacySamplerV1Base) OnCreateSpan(span *Span) SamplingDecision {
- isSampled, tags := s.delegate(span.context.traceID, span.operationName)
- return SamplingDecision{Sample: isSampled, Retryable: false, Tags: tags}
-}
-
-func (s *legacySamplerV1Base) OnSetOperationName(span *Span, operationName string) SamplingDecision {
- isSampled, tags := s.delegate(span.context.traceID, span.operationName)
- return SamplingDecision{Sample: isSampled, Retryable: false, Tags: tags}
-}
-
-func (s *legacySamplerV1Base) OnSetTag(span *Span, key string, value interface{}) SamplingDecision {
- return SamplingDecision{Sample: false, Retryable: true}
-}
-
-func (s *legacySamplerV1Base) OnFinishSpan(span *Span) SamplingDecision {
- return SamplingDecision{Sample: false, Retryable: true}
-}
-
-func (s *legacySamplerV1Base) Close() {}
diff --git a/vendor/github.com/uber/jaeger-client-go/span.go b/vendor/github.com/uber/jaeger-client-go/span.go
deleted file mode 100644
index 42c9112c0..000000000
--- a/vendor/github.com/uber/jaeger-client-go/span.go
+++ /dev/null
@@ -1,487 +0,0 @@
-// Copyright (c) 2017-2018 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-import (
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/opentracing/opentracing-go"
- "github.com/opentracing/opentracing-go/ext"
- "github.com/opentracing/opentracing-go/log"
-)
-
-// Span implements opentracing.Span
-type Span struct {
- // referenceCounter used to increase the lifetime of
- // the object before return it into the pool.
- referenceCounter int32
-
- sync.RWMutex
-
- tracer *Tracer
-
- // TODO: (breaking change) change to use a pointer
- context SpanContext
-
- // The name of the "operation" this span is an instance of.
- // Known as a "span name" in some implementations.
- operationName string
-
- // firstInProcess, if true, indicates that this span is the root of the (sub)tree
- // of spans in the current process. In other words it's true for the root spans,
- // and the ingress spans when the process joins another trace.
- firstInProcess bool
-
- // startTime is the timestamp indicating when the span began, with microseconds precision.
- startTime time.Time
-
- // duration returns duration of the span with microseconds precision.
- // Zero value means duration is unknown.
- duration time.Duration
-
- // tags attached to this span
- tags []Tag
-
- // The span's "micro-log"
- logs []opentracing.LogRecord
-
- // The number of logs dropped because of MaxLogsPerSpan.
- numDroppedLogs int
-
- // references for this span
- references []Reference
-
- observer ContribSpanObserver
-}
-
-// Tag is a simple key value wrapper.
-// TODO (breaking change) deprecate in the next major release, use opentracing.Tag instead.
-type Tag struct {
- key string
- value interface{}
-}
-
-// NewTag creates a new Tag.
-// TODO (breaking change) deprecate in the next major release, use opentracing.Tag instead.
-func NewTag(key string, value interface{}) Tag {
- return Tag{key: key, value: value}
-}
-
-// SetOperationName sets or changes the operation name.
-func (s *Span) SetOperationName(operationName string) opentracing.Span {
- s.Lock()
- s.operationName = operationName
- s.Unlock()
- if !s.isSamplingFinalized() {
- decision := s.tracer.sampler.OnSetOperationName(s, operationName)
- s.applySamplingDecision(decision, true)
- }
- s.observer.OnSetOperationName(operationName)
- return s
-}
-
-// SetTag implements SetTag() of opentracing.Span
-func (s *Span) SetTag(key string, value interface{}) opentracing.Span {
- return s.setTagInternal(key, value, true)
-}
-
-func (s *Span) setTagInternal(key string, value interface{}, lock bool) opentracing.Span {
- s.observer.OnSetTag(key, value)
- if key == string(ext.SamplingPriority) && !setSamplingPriority(s, value) {
- return s
- }
- if !s.isSamplingFinalized() {
- decision := s.tracer.sampler.OnSetTag(s, key, value)
- s.applySamplingDecision(decision, lock)
- }
- if s.isWriteable() {
- if lock {
- s.Lock()
- defer s.Unlock()
- }
- s.appendTagNoLocking(key, value)
- }
- return s
-}
-
-// SpanContext returns span context
-func (s *Span) SpanContext() SpanContext {
- s.Lock()
- defer s.Unlock()
- return s.context
-}
-
-// StartTime returns span start time
-func (s *Span) StartTime() time.Time {
- s.Lock()
- defer s.Unlock()
- return s.startTime
-}
-
-// Duration returns span duration
-func (s *Span) Duration() time.Duration {
- s.Lock()
- defer s.Unlock()
- return s.duration
-}
-
-// Tags returns tags for span
-func (s *Span) Tags() opentracing.Tags {
- s.Lock()
- defer s.Unlock()
- var result = make(opentracing.Tags, len(s.tags))
- for _, tag := range s.tags {
- result[tag.key] = tag.value
- }
- return result
-}
-
-// Logs returns micro logs for span
-func (s *Span) Logs() []opentracing.LogRecord {
- s.Lock()
- defer s.Unlock()
-
- logs := append([]opentracing.LogRecord(nil), s.logs...)
- if s.numDroppedLogs != 0 {
- fixLogs(logs, s.numDroppedLogs)
- }
-
- return logs
-}
-
-// References returns references for this span
-func (s *Span) References() []opentracing.SpanReference {
- s.Lock()
- defer s.Unlock()
-
- if s.references == nil || len(s.references) == 0 {
- return nil
- }
-
- result := make([]opentracing.SpanReference, len(s.references))
- for i, r := range s.references {
- result[i] = opentracing.SpanReference{Type: r.Type, ReferencedContext: r.Context}
- }
- return result
-}
-
-func (s *Span) appendTagNoLocking(key string, value interface{}) {
- s.tags = append(s.tags, Tag{key: key, value: value})
-}
-
-// LogFields implements opentracing.Span API
-func (s *Span) LogFields(fields ...log.Field) {
- s.Lock()
- defer s.Unlock()
- if !s.context.IsSampled() {
- return
- }
- s.logFieldsNoLocking(fields...)
-}
-
-// this function should only be called while holding a Write lock
-func (s *Span) logFieldsNoLocking(fields ...log.Field) {
- lr := opentracing.LogRecord{
- Fields: fields,
- Timestamp: time.Now(),
- }
- s.appendLogNoLocking(lr)
-}
-
-// LogKV implements opentracing.Span API
-func (s *Span) LogKV(alternatingKeyValues ...interface{}) {
- s.RLock()
- sampled := s.context.IsSampled()
- s.RUnlock()
- if !sampled {
- return
- }
- fields, err := log.InterleavedKVToFields(alternatingKeyValues...)
- if err != nil {
- s.LogFields(log.Error(err), log.String("function", "LogKV"))
- return
- }
- s.LogFields(fields...)
-}
-
-// LogEvent implements opentracing.Span API
-func (s *Span) LogEvent(event string) {
- s.Log(opentracing.LogData{Event: event})
-}
-
-// LogEventWithPayload implements opentracing.Span API
-func (s *Span) LogEventWithPayload(event string, payload interface{}) {
- s.Log(opentracing.LogData{Event: event, Payload: payload})
-}
-
-// Log implements opentracing.Span API
-func (s *Span) Log(ld opentracing.LogData) {
- s.Lock()
- defer s.Unlock()
- if s.context.IsSampled() {
- if ld.Timestamp.IsZero() {
- ld.Timestamp = s.tracer.timeNow()
- }
- s.appendLogNoLocking(ld.ToLogRecord())
- }
-}
-
-// this function should only be called while holding a Write lock
-func (s *Span) appendLogNoLocking(lr opentracing.LogRecord) {
- maxLogs := s.tracer.options.maxLogsPerSpan
- if maxLogs == 0 || len(s.logs) < maxLogs {
- s.logs = append(s.logs, lr)
- return
- }
-
- // We have too many logs. We don't touch the first numOld logs; we treat the
- // rest as a circular buffer and overwrite the oldest log among those.
- numOld := (maxLogs - 1) / 2
- numNew := maxLogs - numOld
- s.logs[numOld+s.numDroppedLogs%numNew] = lr
- s.numDroppedLogs++
-}
-
-// rotateLogBuffer rotates the records in the buffer: records 0 to pos-1 move at
-// the end (i.e. pos circular left shifts).
-func rotateLogBuffer(buf []opentracing.LogRecord, pos int) {
- // This algorithm is described in:
- // http://www.cplusplus.com/reference/algorithm/rotate
- for first, middle, next := 0, pos, pos; first != middle; {
- buf[first], buf[next] = buf[next], buf[first]
- first++
- next++
- if next == len(buf) {
- next = middle
- } else if first == middle {
- middle = next
- }
- }
-}
-
-func fixLogs(logs []opentracing.LogRecord, numDroppedLogs int) {
- // We dropped some log events, which means that we used part of Logs as a
- // circular buffer (see appendLog). De-circularize it.
- numOld := (len(logs) - 1) / 2
- numNew := len(logs) - numOld
- rotateLogBuffer(logs[numOld:], numDroppedLogs%numNew)
-
- // Replace the log in the middle (the oldest "new" log) with information
- // about the dropped logs. This means that we are effectively dropping one
- // more "new" log.
- numDropped := numDroppedLogs + 1
- logs[numOld] = opentracing.LogRecord{
- // Keep the timestamp of the last dropped event.
- Timestamp: logs[numOld].Timestamp,
- Fields: []log.Field{
- log.String("event", "dropped Span logs"),
- log.Int("dropped_log_count", numDropped),
- log.String("component", "jaeger-client"),
- },
- }
-}
-
-func (s *Span) fixLogsIfDropped() {
- if s.numDroppedLogs == 0 {
- return
- }
- fixLogs(s.logs, s.numDroppedLogs)
- s.numDroppedLogs = 0
-}
-
-// SetBaggageItem implements SetBaggageItem() of opentracing.SpanContext
-func (s *Span) SetBaggageItem(key, value string) opentracing.Span {
- s.Lock()
- defer s.Unlock()
- s.tracer.setBaggage(s, key, value)
- return s
-}
-
-// BaggageItem implements BaggageItem() of opentracing.SpanContext
-func (s *Span) BaggageItem(key string) string {
- s.RLock()
- defer s.RUnlock()
- return s.context.baggage[key]
-}
-
-// Finish implements opentracing.Span API
-// After finishing the Span object it returns back to the allocator unless the reporter retains it again,
-// so after that, the Span object should no longer be used because it won't be valid anymore.
-func (s *Span) Finish() {
- s.FinishWithOptions(opentracing.FinishOptions{})
-}
-
-// FinishWithOptions implements opentracing.Span API
-func (s *Span) FinishWithOptions(options opentracing.FinishOptions) {
- if options.FinishTime.IsZero() {
- options.FinishTime = s.tracer.timeNow()
- }
- s.observer.OnFinish(options)
- s.Lock()
- s.duration = options.FinishTime.Sub(s.startTime)
- s.Unlock()
- if !s.isSamplingFinalized() {
- decision := s.tracer.sampler.OnFinishSpan(s)
- s.applySamplingDecision(decision, true)
- }
- if s.context.IsSampled() {
- s.Lock()
- s.fixLogsIfDropped()
- if len(options.LogRecords) > 0 || len(options.BulkLogData) > 0 {
- // Note: bulk logs are not subject to maxLogsPerSpan limit
- if options.LogRecords != nil {
- s.logs = append(s.logs, options.LogRecords...)
- }
- for _, ld := range options.BulkLogData {
- s.logs = append(s.logs, ld.ToLogRecord())
- }
- }
- s.Unlock()
- }
- // call reportSpan even for non-sampled traces, to return span to the pool
- // and update metrics counter
- s.tracer.reportSpan(s)
-}
-
-// Context implements opentracing.Span API
-func (s *Span) Context() opentracing.SpanContext {
- s.Lock()
- defer s.Unlock()
- return s.context
-}
-
-// Tracer implements opentracing.Span API
-func (s *Span) Tracer() opentracing.Tracer {
- return s.tracer
-}
-
-func (s *Span) String() string {
- s.RLock()
- defer s.RUnlock()
- return s.context.String()
-}
-
-// OperationName allows retrieving current operation name.
-func (s *Span) OperationName() string {
- s.RLock()
- defer s.RUnlock()
- return s.operationName
-}
-
-// Retain increases object counter to increase the lifetime of the object
-func (s *Span) Retain() *Span {
- atomic.AddInt32(&s.referenceCounter, 1)
- return s
-}
-
-// Release decrements object counter and return to the
-// allocator manager when counter will below zero
-func (s *Span) Release() {
- if atomic.AddInt32(&s.referenceCounter, -1) == -1 {
- s.tracer.spanAllocator.Put(s)
- }
-}
-
-// reset span state and release unused data
-func (s *Span) reset() {
- s.firstInProcess = false
- s.context = emptyContext
- s.operationName = ""
- s.tracer = nil
- s.startTime = time.Time{}
- s.duration = 0
- s.observer = nil
- atomic.StoreInt32(&s.referenceCounter, 0)
-
- // Note: To reuse memory we can save the pointers on the heap
- s.tags = s.tags[:0]
- s.logs = s.logs[:0]
- s.numDroppedLogs = 0
- s.references = s.references[:0]
-}
-
-func (s *Span) serviceName() string {
- return s.tracer.serviceName
-}
-
-func (s *Span) applySamplingDecision(decision SamplingDecision, lock bool) {
- if !decision.Retryable {
- s.context.samplingState.setFinal()
- }
- if decision.Sample {
- s.context.samplingState.setSampled()
- if len(decision.Tags) > 0 {
- if lock {
- s.Lock()
- defer s.Unlock()
- }
- for _, tag := range decision.Tags {
- s.appendTagNoLocking(tag.key, tag.value)
- }
- }
- }
-}
-
-// Span can be written to if it is sampled or the sampling decision has not been finalized.
-func (s *Span) isWriteable() bool {
- state := s.context.samplingState
- return !state.isFinal() || state.isSampled()
-}
-
-func (s *Span) isSamplingFinalized() bool {
- return s.context.samplingState.isFinal()
-}
-
-// setSamplingPriority returns true if the flag was updated successfully, false otherwise.
-// The behavior of setSamplingPriority is surprising
-// If noDebugFlagOnForcedSampling is set
-// setSamplingPriority(span, 1) always sets only flagSampled
-// If noDebugFlagOnForcedSampling is unset, and isDebugAllowed passes
-// setSamplingPriority(span, 1) sets both flagSampled and flagDebug
-// However,
-// setSamplingPriority(span, 0) always only resets flagSampled
-//
-// This means that doing a setSamplingPriority(span, 1) followed by setSamplingPriority(span, 0) can
-// leave flagDebug set
-func setSamplingPriority(s *Span, value interface{}) bool {
- val, ok := value.(uint16)
- if !ok {
- return false
- }
- if val == 0 {
- s.context.samplingState.unsetSampled()
- s.context.samplingState.setFinal()
- return true
- }
- if s.tracer.options.noDebugFlagOnForcedSampling {
- s.context.samplingState.setSampled()
- s.context.samplingState.setFinal()
- return true
- } else if s.tracer.isDebugAllowed(s.operationName) {
- s.context.samplingState.setDebugAndSampled()
- s.context.samplingState.setFinal()
- return true
- }
- return false
-}
-
-// EnableFirehose enables firehose flag on the span context
-func EnableFirehose(s *Span) {
- s.Lock()
- defer s.Unlock()
- s.context.samplingState.setFirehose()
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/span_allocator.go b/vendor/github.com/uber/jaeger-client-go/span_allocator.go
deleted file mode 100644
index 6fe0cd0ce..000000000
--- a/vendor/github.com/uber/jaeger-client-go/span_allocator.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright (c) 2019 The Jaeger Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-import "sync"
-
-// SpanAllocator abstraction of managign span allocations
-type SpanAllocator interface {
- Get() *Span
- Put(*Span)
-}
-
-type syncPollSpanAllocator struct {
- spanPool sync.Pool
-}
-
-func newSyncPollSpanAllocator() SpanAllocator {
- return &syncPollSpanAllocator{
- spanPool: sync.Pool{New: func() interface{} {
- return &Span{}
- }},
- }
-}
-
-func (pool *syncPollSpanAllocator) Get() *Span {
- return pool.spanPool.Get().(*Span)
-}
-
-func (pool *syncPollSpanAllocator) Put(span *Span) {
- span.reset()
- pool.spanPool.Put(span)
-}
-
-type simpleSpanAllocator struct{}
-
-func (pool simpleSpanAllocator) Get() *Span {
- return &Span{}
-}
-
-func (pool simpleSpanAllocator) Put(span *Span) {
- // @comment https://github.com/jaegertracing/jaeger-client-go/pull/381#issuecomment-475904351
- // since finished spans are not reused, no need to reset them
- // span.reset()
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/span_context.go b/vendor/github.com/uber/jaeger-client-go/span_context.go
deleted file mode 100644
index ae9d94a9a..000000000
--- a/vendor/github.com/uber/jaeger-client-go/span_context.go
+++ /dev/null
@@ -1,387 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-import (
- "errors"
- "fmt"
- "strconv"
- "strings"
- "sync"
-
- "go.uber.org/atomic"
-)
-
-const (
- flagSampled = 1
- flagDebug = 2
- flagFirehose = 8
-)
-
-var (
- errEmptyTracerStateString = errors.New("Cannot convert empty string to tracer state")
- errMalformedTracerStateString = errors.New("String does not match tracer state format")
-
- emptyContext = SpanContext{}
-)
-
-// TraceID represents unique 128bit identifier of a trace
-type TraceID struct {
- High, Low uint64
-}
-
-// SpanID represents unique 64bit identifier of a span
-type SpanID uint64
-
-// SpanContext represents propagated span identity and state
-type SpanContext struct {
- // traceID represents globally unique ID of the trace.
- // Usually generated as a random number.
- traceID TraceID
-
- // spanID represents span ID that must be unique within its trace,
- // but does not have to be globally unique.
- spanID SpanID
-
- // parentID refers to the ID of the parent span.
- // Should be 0 if the current span is a root span.
- parentID SpanID
-
- // Distributed Context baggage. The is a snapshot in time.
- baggage map[string]string
-
- // debugID can be set to some correlation ID when the context is being
- // extracted from a TextMap carrier.
- //
- // See JaegerDebugHeader in constants.go
- debugID string
-
- // samplingState is shared across all spans
- samplingState *samplingState
-
- // remote indicates that span context represents a remote parent
- remote bool
-}
-
-type samplingState struct {
- // Span context's state flags that are propagated across processes. Only lower 8 bits are used.
- // We use an int32 instead of byte to be able to use CAS operations.
- stateFlags atomic.Int32
-
- // When state is not final, sampling will be retried on other span write operations,
- // like SetOperationName / SetTag, and the spans will remain writable.
- final atomic.Bool
-
- // localRootSpan stores the SpanID of the first span created in this process for a given trace.
- localRootSpan SpanID
-
- // extendedState allows samplers to keep intermediate state.
- // The keys and values in this map are completely opaque: interface{} -> interface{}.
- extendedState sync.Map
-}
-
-func (s *samplingState) isLocalRootSpan(id SpanID) bool {
- return id == s.localRootSpan
-}
-
-func (s *samplingState) setFlag(newFlag int32) {
- swapped := false
- for !swapped {
- old := s.stateFlags.Load()
- swapped = s.stateFlags.CAS(old, old|newFlag)
- }
-}
-
-func (s *samplingState) unsetFlag(newFlag int32) {
- swapped := false
- for !swapped {
- old := s.stateFlags.Load()
- swapped = s.stateFlags.CAS(old, old&^newFlag)
- }
-}
-
-func (s *samplingState) setSampled() {
- s.setFlag(flagSampled)
-}
-
-func (s *samplingState) unsetSampled() {
- s.unsetFlag(flagSampled)
-}
-
-func (s *samplingState) setDebugAndSampled() {
- s.setFlag(flagDebug | flagSampled)
-}
-
-func (s *samplingState) setFirehose() {
- s.setFlag(flagFirehose)
-}
-
-func (s *samplingState) setFlags(flags byte) {
- s.stateFlags.Store(int32(flags))
-}
-
-func (s *samplingState) setFinal() {
- s.final.Store(true)
-}
-
-func (s *samplingState) flags() byte {
- return byte(s.stateFlags.Load())
-}
-
-func (s *samplingState) isSampled() bool {
- return s.stateFlags.Load()&flagSampled == flagSampled
-}
-
-func (s *samplingState) isDebug() bool {
- return s.stateFlags.Load()&flagDebug == flagDebug
-}
-
-func (s *samplingState) isFirehose() bool {
- return s.stateFlags.Load()&flagFirehose == flagFirehose
-}
-
-func (s *samplingState) isFinal() bool {
- return s.final.Load()
-}
-
-func (s *samplingState) extendedStateForKey(key interface{}, initValue func() interface{}) interface{} {
- if value, ok := s.extendedState.Load(key); ok {
- return value
- }
- value := initValue()
- value, _ = s.extendedState.LoadOrStore(key, value)
- return value
-}
-
-// ForeachBaggageItem implements ForeachBaggageItem() of opentracing.SpanContext
-func (c SpanContext) ForeachBaggageItem(handler func(k, v string) bool) {
- for k, v := range c.baggage {
- if !handler(k, v) {
- break
- }
- }
-}
-
-// IsSampled returns whether this trace was chosen for permanent storage
-// by the sampling mechanism of the tracer.
-func (c SpanContext) IsSampled() bool {
- return c.samplingState.isSampled()
-}
-
-// IsDebug indicates whether sampling was explicitly requested by the service.
-func (c SpanContext) IsDebug() bool {
- return c.samplingState.isDebug()
-}
-
-// IsSamplingFinalized indicates whether the sampling decision has been finalized.
-func (c SpanContext) IsSamplingFinalized() bool {
- return c.samplingState.isFinal()
-}
-
-// IsFirehose indicates whether the firehose flag was set
-func (c SpanContext) IsFirehose() bool {
- return c.samplingState.isFirehose()
-}
-
-// ExtendedSamplingState returns the custom state object for a given key. If the value for this key does not exist,
-// it is initialized via initValue function. This state can be used by samplers (e.g. x.PrioritySampler).
-func (c SpanContext) ExtendedSamplingState(key interface{}, initValue func() interface{}) interface{} {
- return c.samplingState.extendedStateForKey(key, initValue)
-}
-
-// IsValid indicates whether this context actually represents a valid trace.
-func (c SpanContext) IsValid() bool {
- return c.traceID.IsValid() && c.spanID != 0
-}
-
-// SetFirehose enables firehose mode for this trace.
-func (c SpanContext) SetFirehose() {
- c.samplingState.setFirehose()
-}
-
-func (c SpanContext) String() string {
- var flags int32
- if c.samplingState != nil {
- flags = c.samplingState.stateFlags.Load()
- }
- if c.traceID.High == 0 {
- return fmt.Sprintf("%016x:%016x:%016x:%x", c.traceID.Low, uint64(c.spanID), uint64(c.parentID), flags)
- }
- return fmt.Sprintf("%016x%016x:%016x:%016x:%x", c.traceID.High, c.traceID.Low, uint64(c.spanID), uint64(c.parentID), flags)
-}
-
-// ContextFromString reconstructs the Context encoded in a string
-func ContextFromString(value string) (SpanContext, error) {
- var context SpanContext
- if value == "" {
- return emptyContext, errEmptyTracerStateString
- }
- parts := strings.Split(value, ":")
- if len(parts) != 4 {
- return emptyContext, errMalformedTracerStateString
- }
- var err error
- if context.traceID, err = TraceIDFromString(parts[0]); err != nil {
- return emptyContext, err
- }
- if context.spanID, err = SpanIDFromString(parts[1]); err != nil {
- return emptyContext, err
- }
- if context.parentID, err = SpanIDFromString(parts[2]); err != nil {
- return emptyContext, err
- }
- flags, err := strconv.ParseUint(parts[3], 10, 8)
- if err != nil {
- return emptyContext, err
- }
- context.samplingState = &samplingState{}
- context.samplingState.setFlags(byte(flags))
- return context, nil
-}
-
-// TraceID returns the trace ID of this span context
-func (c SpanContext) TraceID() TraceID {
- return c.traceID
-}
-
-// SpanID returns the span ID of this span context
-func (c SpanContext) SpanID() SpanID {
- return c.spanID
-}
-
-// ParentID returns the parent span ID of this span context
-func (c SpanContext) ParentID() SpanID {
- return c.parentID
-}
-
-// Flags returns the bitmap containing such bits as 'sampled' and 'debug'.
-func (c SpanContext) Flags() byte {
- return c.samplingState.flags()
-}
-
-// NewSpanContext creates a new instance of SpanContext
-func NewSpanContext(traceID TraceID, spanID, parentID SpanID, sampled bool, baggage map[string]string) SpanContext {
- samplingState := &samplingState{}
- if sampled {
- samplingState.setSampled()
- }
-
- return SpanContext{
- traceID: traceID,
- spanID: spanID,
- parentID: parentID,
- samplingState: samplingState,
- baggage: baggage}
-}
-
-// CopyFrom copies data from ctx into this context, including span identity and baggage.
-// TODO This is only used by interop.go. Remove once TChannel Go supports OpenTracing.
-func (c *SpanContext) CopyFrom(ctx *SpanContext) {
- c.traceID = ctx.traceID
- c.spanID = ctx.spanID
- c.parentID = ctx.parentID
- c.samplingState = ctx.samplingState
- if l := len(ctx.baggage); l > 0 {
- c.baggage = make(map[string]string, l)
- for k, v := range ctx.baggage {
- c.baggage[k] = v
- }
- } else {
- c.baggage = nil
- }
-}
-
-// WithBaggageItem creates a new context with an extra baggage item.
-func (c SpanContext) WithBaggageItem(key, value string) SpanContext {
- var newBaggage map[string]string
- if c.baggage == nil {
- newBaggage = map[string]string{key: value}
- } else {
- newBaggage = make(map[string]string, len(c.baggage)+1)
- for k, v := range c.baggage {
- newBaggage[k] = v
- }
- newBaggage[key] = value
- }
- // Use positional parameters so the compiler will help catch new fields.
- return SpanContext{c.traceID, c.spanID, c.parentID, newBaggage, "", c.samplingState, c.remote}
-}
-
-// isDebugIDContainerOnly returns true when the instance of the context is only
-// used to return the debug/correlation ID from extract() method. This happens
-// in the situation when "jaeger-debug-id" header is passed in the carrier to
-// the extract() method, but the request otherwise has no span context in it.
-// Previously this would've returned opentracing.ErrSpanContextNotFound from the
-// extract method, but now it returns a dummy context with only debugID filled in.
-//
-// See JaegerDebugHeader in constants.go
-// See TextMapPropagator#Extract
-func (c *SpanContext) isDebugIDContainerOnly() bool {
- return !c.traceID.IsValid() && c.debugID != ""
-}
-
-// ------- TraceID -------
-
-func (t TraceID) String() string {
- if t.High == 0 {
- return fmt.Sprintf("%x", t.Low)
- }
- return fmt.Sprintf("%x%016x", t.High, t.Low)
-}
-
-// TraceIDFromString creates a TraceID from a hexadecimal string
-func TraceIDFromString(s string) (TraceID, error) {
- var hi, lo uint64
- var err error
- if len(s) > 32 {
- return TraceID{}, fmt.Errorf("TraceID cannot be longer than 32 hex characters: %s", s)
- } else if len(s) > 16 {
- hiLen := len(s) - 16
- if hi, err = strconv.ParseUint(s[0:hiLen], 16, 64); err != nil {
- return TraceID{}, err
- }
- if lo, err = strconv.ParseUint(s[hiLen:], 16, 64); err != nil {
- return TraceID{}, err
- }
- } else {
- if lo, err = strconv.ParseUint(s, 16, 64); err != nil {
- return TraceID{}, err
- }
- }
- return TraceID{High: hi, Low: lo}, nil
-}
-
-// IsValid checks if the trace ID is valid, i.e. not zero.
-func (t TraceID) IsValid() bool {
- return t.High != 0 || t.Low != 0
-}
-
-// ------- SpanID -------
-
-func (s SpanID) String() string {
- return fmt.Sprintf("%x", uint64(s))
-}
-
-// SpanIDFromString creates a SpanID from a hexadecimal string
-func SpanIDFromString(s string) (SpanID, error) {
- if len(s) > 16 {
- return SpanID(0), fmt.Errorf("SpanID cannot be longer than 16 hex characters: %s", s)
- }
- id, err := strconv.ParseUint(s, 16, 64)
- if err != nil {
- return SpanID(0), err
- }
- return SpanID(id), nil
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggagerestrictionmanager.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggagerestrictionmanager.go
deleted file mode 100644
index 1f79c1255..000000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggagerestrictionmanager.go
+++ /dev/null
@@ -1,435 +0,0 @@
-// Autogenerated by Thrift Compiler (0.9.3)
-// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-
-package baggage
-
-import (
- "bytes"
- "fmt"
- "github.com/uber/jaeger-client-go/thrift"
-)
-
-// (needed to ensure safety because of naive import list construction.)
-var _ = thrift.ZERO
-var _ = fmt.Printf
-var _ = bytes.Equal
-
-type BaggageRestrictionManager interface {
- // getBaggageRestrictions retrieves the baggage restrictions for a specific service.
- // Usually, baggageRestrictions apply to all services however there may be situations
- // where a baggageKey might only be allowed to be set by a specific service.
- //
- // Parameters:
- // - ServiceName
- GetBaggageRestrictions(serviceName string) (r []*BaggageRestriction, err error)
-}
-
-type BaggageRestrictionManagerClient struct {
- Transport thrift.TTransport
- ProtocolFactory thrift.TProtocolFactory
- InputProtocol thrift.TProtocol
- OutputProtocol thrift.TProtocol
- SeqId int32
-}
-
-func NewBaggageRestrictionManagerClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *BaggageRestrictionManagerClient {
- return &BaggageRestrictionManagerClient{Transport: t,
- ProtocolFactory: f,
- InputProtocol: f.GetProtocol(t),
- OutputProtocol: f.GetProtocol(t),
- SeqId: 0,
- }
-}
-
-func NewBaggageRestrictionManagerClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *BaggageRestrictionManagerClient {
- return &BaggageRestrictionManagerClient{Transport: t,
- ProtocolFactory: nil,
- InputProtocol: iprot,
- OutputProtocol: oprot,
- SeqId: 0,
- }
-}
-
-// getBaggageRestrictions retrieves the baggage restrictions for a specific service.
-// Usually, baggageRestrictions apply to all services however there may be situations
-// where a baggageKey might only be allowed to be set by a specific service.
-//
-// Parameters:
-// - ServiceName
-func (p *BaggageRestrictionManagerClient) GetBaggageRestrictions(serviceName string) (r []*BaggageRestriction, err error) {
- if err = p.sendGetBaggageRestrictions(serviceName); err != nil {
- return
- }
- return p.recvGetBaggageRestrictions()
-}
-
-func (p *BaggageRestrictionManagerClient) sendGetBaggageRestrictions(serviceName string) (err error) {
- oprot := p.OutputProtocol
- if oprot == nil {
- oprot = p.ProtocolFactory.GetProtocol(p.Transport)
- p.OutputProtocol = oprot
- }
- p.SeqId++
- if err = oprot.WriteMessageBegin("getBaggageRestrictions", thrift.CALL, p.SeqId); err != nil {
- return
- }
- args := BaggageRestrictionManagerGetBaggageRestrictionsArgs{
- ServiceName: serviceName,
- }
- if err = args.Write(oprot); err != nil {
- return
- }
- if err = oprot.WriteMessageEnd(); err != nil {
- return
- }
- return oprot.Flush()
-}
-
-func (p *BaggageRestrictionManagerClient) recvGetBaggageRestrictions() (value []*BaggageRestriction, err error) {
- iprot := p.InputProtocol
- if iprot == nil {
- iprot = p.ProtocolFactory.GetProtocol(p.Transport)
- p.InputProtocol = iprot
- }
- method, mTypeId, seqId, err := iprot.ReadMessageBegin()
- if err != nil {
- return
- }
- if method != "getBaggageRestrictions" {
- err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "getBaggageRestrictions failed: wrong method name")
- return
- }
- if p.SeqId != seqId {
- err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "getBaggageRestrictions failed: out of sequence response")
- return
- }
- if mTypeId == thrift.EXCEPTION {
- error0 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
- var error1 error
- error1, err = error0.Read(iprot)
- if err != nil {
- return
- }
- if err = iprot.ReadMessageEnd(); err != nil {
- return
- }
- err = error1
- return
- }
- if mTypeId != thrift.REPLY {
- err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "getBaggageRestrictions failed: invalid message type")
- return
- }
- result := BaggageRestrictionManagerGetBaggageRestrictionsResult{}
- if err = result.Read(iprot); err != nil {
- return
- }
- if err = iprot.ReadMessageEnd(); err != nil {
- return
- }
- value = result.GetSuccess()
- return
-}
-
-type BaggageRestrictionManagerProcessor struct {
- processorMap map[string]thrift.TProcessorFunction
- handler BaggageRestrictionManager
-}
-
-func (p *BaggageRestrictionManagerProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
- p.processorMap[key] = processor
-}
-
-func (p *BaggageRestrictionManagerProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
- processor, ok = p.processorMap[key]
- return processor, ok
-}
-
-func (p *BaggageRestrictionManagerProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
- return p.processorMap
-}
-
-func NewBaggageRestrictionManagerProcessor(handler BaggageRestrictionManager) *BaggageRestrictionManagerProcessor {
-
- self2 := &BaggageRestrictionManagerProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)}
- self2.processorMap["getBaggageRestrictions"] = &baggageRestrictionManagerProcessorGetBaggageRestrictions{handler: handler}
- return self2
-}
-
-func (p *BaggageRestrictionManagerProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- name, _, seqId, err := iprot.ReadMessageBegin()
- if err != nil {
- return false, err
- }
- if processor, ok := p.GetProcessorFunction(name); ok {
- return processor.Process(seqId, iprot, oprot)
- }
- iprot.Skip(thrift.STRUCT)
- iprot.ReadMessageEnd()
- x3 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name)
- oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId)
- x3.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush()
- return false, x3
-
-}
-
-type baggageRestrictionManagerProcessorGetBaggageRestrictions struct {
- handler BaggageRestrictionManager
-}
-
-func (p *baggageRestrictionManagerProcessorGetBaggageRestrictions) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := BaggageRestrictionManagerGetBaggageRestrictionsArgs{}
- if err = args.Read(iprot); err != nil {
- iprot.ReadMessageEnd()
- x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
- oprot.WriteMessageBegin("getBaggageRestrictions", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush()
- return false, err
- }
-
- iprot.ReadMessageEnd()
- result := BaggageRestrictionManagerGetBaggageRestrictionsResult{}
- var retval []*BaggageRestriction
- var err2 error
- if retval, err2 = p.handler.GetBaggageRestrictions(args.ServiceName); err2 != nil {
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getBaggageRestrictions: "+err2.Error())
- oprot.WriteMessageBegin("getBaggageRestrictions", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush()
- return true, err2
- } else {
- result.Success = retval
- }
- if err2 = oprot.WriteMessageBegin("getBaggageRestrictions", thrift.REPLY, seqId); err2 != nil {
- err = err2
- }
- if err2 = result.Write(oprot); err == nil && err2 != nil {
- err = err2
- }
- if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
- err = err2
- }
- if err2 = oprot.Flush(); err == nil && err2 != nil {
- err = err2
- }
- if err != nil {
- return
- }
- return true, err
-}
-
-// HELPER FUNCTIONS AND STRUCTURES
-
-// Attributes:
-// - ServiceName
-type BaggageRestrictionManagerGetBaggageRestrictionsArgs struct {
- ServiceName string `thrift:"serviceName,1" json:"serviceName"`
-}
-
-func NewBaggageRestrictionManagerGetBaggageRestrictionsArgs() *BaggageRestrictionManagerGetBaggageRestrictionsArgs {
- return &BaggageRestrictionManagerGetBaggageRestrictionsArgs{}
-}
-
-func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) GetServiceName() string {
- return p.ServiceName
-}
-func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) Read(iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP {
- break
- }
- switch fieldId {
- case 1:
- if err := p.readField1(iprot); err != nil {
- return err
- }
- default:
- if err := iprot.Skip(fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- return nil
-}
-
-func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) readField1(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadString(); err != nil {
- return thrift.PrependError("error reading field 1: ", err)
- } else {
- p.ServiceName = v
- }
- return nil
-}
-
-func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("getBaggageRestrictions_args"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
- }
- if err := p.writeField1(oprot); err != nil {
- return err
- }
- if err := oprot.WriteFieldStop(); err != nil {
- return thrift.PrependError("write field stop error: ", err)
- }
- if err := oprot.WriteStructEnd(); err != nil {
- return thrift.PrependError("write struct stop error: ", err)
- }
- return nil
-}
-
-func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) writeField1(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("serviceName", thrift.STRING, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err)
- }
- if err := oprot.WriteString(string(p.ServiceName)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err)
- }
- return err
-}
-
-func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) String() string {
- if p == nil {
- return "<nil>"
- }
- return fmt.Sprintf("BaggageRestrictionManagerGetBaggageRestrictionsArgs(%+v)", *p)
-}
-
-// Attributes:
-// - Success
-type BaggageRestrictionManagerGetBaggageRestrictionsResult struct {
- Success []*BaggageRestriction `thrift:"success,0" json:"success,omitempty"`
-}
-
-func NewBaggageRestrictionManagerGetBaggageRestrictionsResult() *BaggageRestrictionManagerGetBaggageRestrictionsResult {
- return &BaggageRestrictionManagerGetBaggageRestrictionsResult{}
-}
-
-var BaggageRestrictionManagerGetBaggageRestrictionsResult_Success_DEFAULT []*BaggageRestriction
-
-func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) GetSuccess() []*BaggageRestriction {
- return p.Success
-}
-func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) IsSetSuccess() bool {
- return p.Success != nil
-}
-
-func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) Read(iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP {
- break
- }
- switch fieldId {
- case 0:
- if err := p.readField0(iprot); err != nil {
- return err
- }
- default:
- if err := iprot.Skip(fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- return nil
-}
-
-func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) readField0(iprot thrift.TProtocol) error {
- _, size, err := iprot.ReadListBegin()
- if err != nil {
- return thrift.PrependError("error reading list begin: ", err)
- }
- tSlice := make([]*BaggageRestriction, 0, size)
- p.Success = tSlice
- for i := 0; i < size; i++ {
- _elem4 := &BaggageRestriction{}
- if err := _elem4.Read(iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err)
- }
- p.Success = append(p.Success, _elem4)
- }
- if err := iprot.ReadListEnd(); err != nil {
- return thrift.PrependError("error reading list end: ", err)
- }
- return nil
-}
-
-func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("getBaggageRestrictions_result"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
- }
- if err := p.writeField0(oprot); err != nil {
- return err
- }
- if err := oprot.WriteFieldStop(); err != nil {
- return thrift.PrependError("write field stop error: ", err)
- }
- if err := oprot.WriteStructEnd(); err != nil {
- return thrift.PrependError("write struct stop error: ", err)
- }
- return nil
-}
-
-func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) writeField0(oprot thrift.TProtocol) (err error) {
- if p.IsSetSuccess() {
- if err := oprot.WriteFieldBegin("success", thrift.LIST, 0); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err)
- }
- if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Success)); err != nil {
- return thrift.PrependError("error writing list begin: ", err)
- }
- for _, v := range p.Success {
- if err := v.Write(oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
- }
- }
- if err := oprot.WriteListEnd(); err != nil {
- return thrift.PrependError("error writing list end: ", err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err)
- }
- }
- return err
-}
-
-func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) String() string {
- if p == nil {
- return "<nil>"
- }
- return fmt.Sprintf("BaggageRestrictionManagerGetBaggageRestrictionsResult(%+v)", *p)
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/constants.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/constants.go
deleted file mode 100644
index ed35ce9ab..000000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/constants.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// Autogenerated by Thrift Compiler (0.9.3)
-// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-
-package baggage
-
-import (
- "bytes"
- "fmt"
- "github.com/uber/jaeger-client-go/thrift"
-)
-
-// (needed to ensure safety because of naive import list construction.)
-var _ = thrift.ZERO
-var _ = fmt.Printf
-var _ = bytes.Equal
-
-func init() {
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/ttypes.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/ttypes.go
deleted file mode 100644
index 7888892f6..000000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/ttypes.go
+++ /dev/null
@@ -1,154 +0,0 @@
-// Autogenerated by Thrift Compiler (0.9.3)
-// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-
-package baggage
-
-import (
- "bytes"
- "fmt"
- "github.com/uber/jaeger-client-go/thrift"
-)
-
-// (needed to ensure safety because of naive import list construction.)
-var _ = thrift.ZERO
-var _ = fmt.Printf
-var _ = bytes.Equal
-
-var GoUnusedProtection__ int
-
-// Attributes:
-// - BaggageKey
-// - MaxValueLength
-type BaggageRestriction struct {
- BaggageKey string `thrift:"baggageKey,1,required" json:"baggageKey"`
- MaxValueLength int32 `thrift:"maxValueLength,2,required" json:"maxValueLength"`
-}
-
-func NewBaggageRestriction() *BaggageRestriction {
- return &BaggageRestriction{}
-}
-
-func (p *BaggageRestriction) GetBaggageKey() string {
- return p.BaggageKey
-}
-
-func (p *BaggageRestriction) GetMaxValueLength() int32 {
- return p.MaxValueLength
-}
-func (p *BaggageRestriction) Read(iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- var issetBaggageKey bool = false
- var issetMaxValueLength bool = false
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP {
- break
- }
- switch fieldId {
- case 1:
- if err := p.readField1(iprot); err != nil {
- return err
- }
- issetBaggageKey = true
- case 2:
- if err := p.readField2(iprot); err != nil {
- return err
- }
- issetMaxValueLength = true
- default:
- if err := iprot.Skip(fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- if !issetBaggageKey {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field BaggageKey is not set"))
- }
- if !issetMaxValueLength {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field MaxValueLength is not set"))
- }
- return nil
-}
-
-func (p *BaggageRestriction) readField1(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadString(); err != nil {
- return thrift.PrependError("error reading field 1: ", err)
- } else {
- p.BaggageKey = v
- }
- return nil
-}
-
-func (p *BaggageRestriction) readField2(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI32(); err != nil {
- return thrift.PrependError("error reading field 2: ", err)
- } else {
- p.MaxValueLength = v
- }
- return nil
-}
-
-func (p *BaggageRestriction) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("BaggageRestriction"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
- }
- if err := p.writeField1(oprot); err != nil {
- return err
- }
- if err := p.writeField2(oprot); err != nil {
- return err
- }
- if err := oprot.WriteFieldStop(); err != nil {
- return thrift.PrependError("write field stop error: ", err)
- }
- if err := oprot.WriteStructEnd(); err != nil {
- return thrift.PrependError("write struct stop error: ", err)
- }
- return nil
-}
-
-func (p *BaggageRestriction) writeField1(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("baggageKey", thrift.STRING, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:baggageKey: ", p), err)
- }
- if err := oprot.WriteString(string(p.BaggageKey)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.baggageKey (1) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:baggageKey: ", p), err)
- }
- return err
-}
-
-func (p *BaggageRestriction) writeField2(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("maxValueLength", thrift.I32, 2); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:maxValueLength: ", p), err)
- }
- if err := oprot.WriteI32(int32(p.MaxValueLength)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.maxValueLength (2) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 2:maxValueLength: ", p), err)
- }
- return err
-}
-
-func (p *BaggageRestriction) String() string {
- if p == nil {
- return "<nil>"
- }
- return fmt.Sprintf("BaggageRestriction(%+v)", *p)
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/constants.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/constants.go
deleted file mode 100644
index 0f6e3a884..000000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/constants.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// Autogenerated by Thrift Compiler (0.9.3)
-// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-
-package sampling
-
-import (
- "bytes"
- "fmt"
- "github.com/uber/jaeger-client-go/thrift"
-)
-
-// (needed to ensure safety because of naive import list construction.)
-var _ = thrift.ZERO
-var _ = fmt.Printf
-var _ = bytes.Equal
-
-func init() {
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/samplingmanager.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/samplingmanager.go
deleted file mode 100644
index 33179cfeb..000000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/samplingmanager.go
+++ /dev/null
@@ -1,410 +0,0 @@
-// Autogenerated by Thrift Compiler (0.9.3)
-// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-
-package sampling
-
-import (
- "bytes"
- "fmt"
- "github.com/uber/jaeger-client-go/thrift"
-)
-
-// (needed to ensure safety because of naive import list construction.)
-var _ = thrift.ZERO
-var _ = fmt.Printf
-var _ = bytes.Equal
-
-type SamplingManager interface {
- // Parameters:
- // - ServiceName
- GetSamplingStrategy(serviceName string) (r *SamplingStrategyResponse, err error)
-}
-
-type SamplingManagerClient struct {
- Transport thrift.TTransport
- ProtocolFactory thrift.TProtocolFactory
- InputProtocol thrift.TProtocol
- OutputProtocol thrift.TProtocol
- SeqId int32
-}
-
-func NewSamplingManagerClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *SamplingManagerClient {
- return &SamplingManagerClient{Transport: t,
- ProtocolFactory: f,
- InputProtocol: f.GetProtocol(t),
- OutputProtocol: f.GetProtocol(t),
- SeqId: 0,
- }
-}
-
-func NewSamplingManagerClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *SamplingManagerClient {
- return &SamplingManagerClient{Transport: t,
- ProtocolFactory: nil,
- InputProtocol: iprot,
- OutputProtocol: oprot,
- SeqId: 0,
- }
-}
-
-// Parameters:
-// - ServiceName
-func (p *SamplingManagerClient) GetSamplingStrategy(serviceName string) (r *SamplingStrategyResponse, err error) {
- if err = p.sendGetSamplingStrategy(serviceName); err != nil {
- return
- }
- return p.recvGetSamplingStrategy()
-}
-
-func (p *SamplingManagerClient) sendGetSamplingStrategy(serviceName string) (err error) {
- oprot := p.OutputProtocol
- if oprot == nil {
- oprot = p.ProtocolFactory.GetProtocol(p.Transport)
- p.OutputProtocol = oprot
- }
- p.SeqId++
- if err = oprot.WriteMessageBegin("getSamplingStrategy", thrift.CALL, p.SeqId); err != nil {
- return
- }
- args := SamplingManagerGetSamplingStrategyArgs{
- ServiceName: serviceName,
- }
- if err = args.Write(oprot); err != nil {
- return
- }
- if err = oprot.WriteMessageEnd(); err != nil {
- return
- }
- return oprot.Flush()
-}
-
-func (p *SamplingManagerClient) recvGetSamplingStrategy() (value *SamplingStrategyResponse, err error) {
- iprot := p.InputProtocol
- if iprot == nil {
- iprot = p.ProtocolFactory.GetProtocol(p.Transport)
- p.InputProtocol = iprot
- }
- method, mTypeId, seqId, err := iprot.ReadMessageBegin()
- if err != nil {
- return
- }
- if method != "getSamplingStrategy" {
- err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "getSamplingStrategy failed: wrong method name")
- return
- }
- if p.SeqId != seqId {
- err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "getSamplingStrategy failed: out of sequence response")
- return
- }
- if mTypeId == thrift.EXCEPTION {
- error1 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
- var error2 error
- error2, err = error1.Read(iprot)
- if err != nil {
- return
- }
- if err = iprot.ReadMessageEnd(); err != nil {
- return
- }
- err = error2
- return
- }
- if mTypeId != thrift.REPLY {
- err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "getSamplingStrategy failed: invalid message type")
- return
- }
- result := SamplingManagerGetSamplingStrategyResult{}
- if err = result.Read(iprot); err != nil {
- return
- }
- if err = iprot.ReadMessageEnd(); err != nil {
- return
- }
- value = result.GetSuccess()
- return
-}
-
-type SamplingManagerProcessor struct {
- processorMap map[string]thrift.TProcessorFunction
- handler SamplingManager
-}
-
-func (p *SamplingManagerProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
- p.processorMap[key] = processor
-}
-
-func (p *SamplingManagerProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
- processor, ok = p.processorMap[key]
- return processor, ok
-}
-
-func (p *SamplingManagerProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
- return p.processorMap
-}
-
-func NewSamplingManagerProcessor(handler SamplingManager) *SamplingManagerProcessor {
-
- self3 := &SamplingManagerProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)}
- self3.processorMap["getSamplingStrategy"] = &samplingManagerProcessorGetSamplingStrategy{handler: handler}
- return self3
-}
-
-func (p *SamplingManagerProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- name, _, seqId, err := iprot.ReadMessageBegin()
- if err != nil {
- return false, err
- }
- if processor, ok := p.GetProcessorFunction(name); ok {
- return processor.Process(seqId, iprot, oprot)
- }
- iprot.Skip(thrift.STRUCT)
- iprot.ReadMessageEnd()
- x4 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name)
- oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId)
- x4.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush()
- return false, x4
-
-}
-
-type samplingManagerProcessorGetSamplingStrategy struct {
- handler SamplingManager
-}
-
-func (p *samplingManagerProcessorGetSamplingStrategy) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := SamplingManagerGetSamplingStrategyArgs{}
- if err = args.Read(iprot); err != nil {
- iprot.ReadMessageEnd()
- x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
- oprot.WriteMessageBegin("getSamplingStrategy", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush()
- return false, err
- }
-
- iprot.ReadMessageEnd()
- result := SamplingManagerGetSamplingStrategyResult{}
- var retval *SamplingStrategyResponse
- var err2 error
- if retval, err2 = p.handler.GetSamplingStrategy(args.ServiceName); err2 != nil {
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getSamplingStrategy: "+err2.Error())
- oprot.WriteMessageBegin("getSamplingStrategy", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush()
- return true, err2
- } else {
- result.Success = retval
- }
- if err2 = oprot.WriteMessageBegin("getSamplingStrategy", thrift.REPLY, seqId); err2 != nil {
- err = err2
- }
- if err2 = result.Write(oprot); err == nil && err2 != nil {
- err = err2
- }
- if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
- err = err2
- }
- if err2 = oprot.Flush(); err == nil && err2 != nil {
- err = err2
- }
- if err != nil {
- return
- }
- return true, err
-}
-
-// HELPER FUNCTIONS AND STRUCTURES
-
-// Attributes:
-// - ServiceName
-type SamplingManagerGetSamplingStrategyArgs struct {
- ServiceName string `thrift:"serviceName,1" json:"serviceName"`
-}
-
-func NewSamplingManagerGetSamplingStrategyArgs() *SamplingManagerGetSamplingStrategyArgs {
- return &SamplingManagerGetSamplingStrategyArgs{}
-}
-
-func (p *SamplingManagerGetSamplingStrategyArgs) GetServiceName() string {
- return p.ServiceName
-}
-func (p *SamplingManagerGetSamplingStrategyArgs) Read(iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP {
- break
- }
- switch fieldId {
- case 1:
- if err := p.readField1(iprot); err != nil {
- return err
- }
- default:
- if err := iprot.Skip(fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- return nil
-}
-
-func (p *SamplingManagerGetSamplingStrategyArgs) readField1(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadString(); err != nil {
- return thrift.PrependError("error reading field 1: ", err)
- } else {
- p.ServiceName = v
- }
- return nil
-}
-
-func (p *SamplingManagerGetSamplingStrategyArgs) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("getSamplingStrategy_args"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
- }
- if err := p.writeField1(oprot); err != nil {
- return err
- }
- if err := oprot.WriteFieldStop(); err != nil {
- return thrift.PrependError("write field stop error: ", err)
- }
- if err := oprot.WriteStructEnd(); err != nil {
- return thrift.PrependError("write struct stop error: ", err)
- }
- return nil
-}
-
-func (p *SamplingManagerGetSamplingStrategyArgs) writeField1(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("serviceName", thrift.STRING, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err)
- }
- if err := oprot.WriteString(string(p.ServiceName)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err)
- }
- return err
-}
-
-func (p *SamplingManagerGetSamplingStrategyArgs) String() string {
- if p == nil {
- return "<nil>"
- }
- return fmt.Sprintf("SamplingManagerGetSamplingStrategyArgs(%+v)", *p)
-}
-
-// Attributes:
-// - Success
-type SamplingManagerGetSamplingStrategyResult struct {
- Success *SamplingStrategyResponse `thrift:"success,0" json:"success,omitempty"`
-}
-
-func NewSamplingManagerGetSamplingStrategyResult() *SamplingManagerGetSamplingStrategyResult {
- return &SamplingManagerGetSamplingStrategyResult{}
-}
-
-var SamplingManagerGetSamplingStrategyResult_Success_DEFAULT *SamplingStrategyResponse
-
-func (p *SamplingManagerGetSamplingStrategyResult) GetSuccess() *SamplingStrategyResponse {
- if !p.IsSetSuccess() {
- return SamplingManagerGetSamplingStrategyResult_Success_DEFAULT
- }
- return p.Success
-}
-func (p *SamplingManagerGetSamplingStrategyResult) IsSetSuccess() bool {
- return p.Success != nil
-}
-
-func (p *SamplingManagerGetSamplingStrategyResult) Read(iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP {
- break
- }
- switch fieldId {
- case 0:
- if err := p.readField0(iprot); err != nil {
- return err
- }
- default:
- if err := iprot.Skip(fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- return nil
-}
-
-func (p *SamplingManagerGetSamplingStrategyResult) readField0(iprot thrift.TProtocol) error {
- p.Success = &SamplingStrategyResponse{}
- if err := p.Success.Read(iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err)
- }
- return nil
-}
-
-func (p *SamplingManagerGetSamplingStrategyResult) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("getSamplingStrategy_result"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
- }
- if err := p.writeField0(oprot); err != nil {
- return err
- }
- if err := oprot.WriteFieldStop(); err != nil {
- return thrift.PrependError("write field stop error: ", err)
- }
- if err := oprot.WriteStructEnd(); err != nil {
- return thrift.PrependError("write struct stop error: ", err)
- }
- return nil
-}
-
-func (p *SamplingManagerGetSamplingStrategyResult) writeField0(oprot thrift.TProtocol) (err error) {
- if p.IsSetSuccess() {
- if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err)
- }
- if err := p.Success.Write(oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err)
- }
- }
- return err
-}
-
-func (p *SamplingManagerGetSamplingStrategyResult) String() string {
- if p == nil {
- return "<nil>"
- }
- return fmt.Sprintf("SamplingManagerGetSamplingStrategyResult(%+v)", *p)
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/ttypes.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/ttypes.go
deleted file mode 100644
index 9abaf0542..000000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/ttypes.go
+++ /dev/null
@@ -1,873 +0,0 @@
-// Autogenerated by Thrift Compiler (0.9.3)
-// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-
-package sampling
-
-import (
- "bytes"
- "fmt"
- "github.com/uber/jaeger-client-go/thrift"
-)
-
-// (needed to ensure safety because of naive import list construction.)
-var _ = thrift.ZERO
-var _ = fmt.Printf
-var _ = bytes.Equal
-
-var GoUnusedProtection__ int
-
-type SamplingStrategyType int64
-
-const (
- SamplingStrategyType_PROBABILISTIC SamplingStrategyType = 0
- SamplingStrategyType_RATE_LIMITING SamplingStrategyType = 1
-)
-
-func (p SamplingStrategyType) String() string {
- switch p {
- case SamplingStrategyType_PROBABILISTIC:
- return "PROBABILISTIC"
- case SamplingStrategyType_RATE_LIMITING:
- return "RATE_LIMITING"
- }
- return "<UNSET>"
-}
-
-func SamplingStrategyTypeFromString(s string) (SamplingStrategyType, error) {
- switch s {
- case "PROBABILISTIC":
- return SamplingStrategyType_PROBABILISTIC, nil
- case "RATE_LIMITING":
- return SamplingStrategyType_RATE_LIMITING, nil
- }
- return SamplingStrategyType(0), fmt.Errorf("not a valid SamplingStrategyType string")
-}
-
-func SamplingStrategyTypePtr(v SamplingStrategyType) *SamplingStrategyType { return &v }
-
-func (p SamplingStrategyType) MarshalText() ([]byte, error) {
- return []byte(p.String()), nil
-}
-
-func (p *SamplingStrategyType) UnmarshalText(text []byte) error {
- q, err := SamplingStrategyTypeFromString(string(text))
- if err != nil {
- return err
- }
- *p = q
- return nil
-}
-
-// Attributes:
-// - SamplingRate
-type ProbabilisticSamplingStrategy struct {
- SamplingRate float64 `thrift:"samplingRate,1,required" json:"samplingRate"`
-}
-
-func NewProbabilisticSamplingStrategy() *ProbabilisticSamplingStrategy {
- return &ProbabilisticSamplingStrategy{}
-}
-
-func (p *ProbabilisticSamplingStrategy) GetSamplingRate() float64 {
- return p.SamplingRate
-}
-func (p *ProbabilisticSamplingStrategy) Read(iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- var issetSamplingRate bool = false
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP {
- break
- }
- switch fieldId {
- case 1:
- if err := p.readField1(iprot); err != nil {
- return err
- }
- issetSamplingRate = true
- default:
- if err := iprot.Skip(fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- if !issetSamplingRate {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SamplingRate is not set"))
- }
- return nil
-}
-
-func (p *ProbabilisticSamplingStrategy) readField1(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadDouble(); err != nil {
- return thrift.PrependError("error reading field 1: ", err)
- } else {
- p.SamplingRate = v
- }
- return nil
-}
-
-func (p *ProbabilisticSamplingStrategy) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("ProbabilisticSamplingStrategy"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
- }
- if err := p.writeField1(oprot); err != nil {
- return err
- }
- if err := oprot.WriteFieldStop(); err != nil {
- return thrift.PrependError("write field stop error: ", err)
- }
- if err := oprot.WriteStructEnd(); err != nil {
- return thrift.PrependError("write struct stop error: ", err)
- }
- return nil
-}
-
-func (p *ProbabilisticSamplingStrategy) writeField1(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("samplingRate", thrift.DOUBLE, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:samplingRate: ", p), err)
- }
- if err := oprot.WriteDouble(float64(p.SamplingRate)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.samplingRate (1) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:samplingRate: ", p), err)
- }
- return err
-}
-
-func (p *ProbabilisticSamplingStrategy) String() string {
- if p == nil {
- return "<nil>"
- }
- return fmt.Sprintf("ProbabilisticSamplingStrategy(%+v)", *p)
-}
-
-// Attributes:
-// - MaxTracesPerSecond
-type RateLimitingSamplingStrategy struct {
- MaxTracesPerSecond int16 `thrift:"maxTracesPerSecond,1,required" json:"maxTracesPerSecond"`
-}
-
-func NewRateLimitingSamplingStrategy() *RateLimitingSamplingStrategy {
- return &RateLimitingSamplingStrategy{}
-}
-
-func (p *RateLimitingSamplingStrategy) GetMaxTracesPerSecond() int16 {
- return p.MaxTracesPerSecond
-}
-func (p *RateLimitingSamplingStrategy) Read(iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- var issetMaxTracesPerSecond bool = false
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP {
- break
- }
- switch fieldId {
- case 1:
- if err := p.readField1(iprot); err != nil {
- return err
- }
- issetMaxTracesPerSecond = true
- default:
- if err := iprot.Skip(fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- if !issetMaxTracesPerSecond {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field MaxTracesPerSecond is not set"))
- }
- return nil
-}
-
-func (p *RateLimitingSamplingStrategy) readField1(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI16(); err != nil {
- return thrift.PrependError("error reading field 1: ", err)
- } else {
- p.MaxTracesPerSecond = v
- }
- return nil
-}
-
-func (p *RateLimitingSamplingStrategy) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("RateLimitingSamplingStrategy"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
- }
- if err := p.writeField1(oprot); err != nil {
- return err
- }
- if err := oprot.WriteFieldStop(); err != nil {
- return thrift.PrependError("write field stop error: ", err)
- }
- if err := oprot.WriteStructEnd(); err != nil {
- return thrift.PrependError("write struct stop error: ", err)
- }
- return nil
-}
-
-func (p *RateLimitingSamplingStrategy) writeField1(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("maxTracesPerSecond", thrift.I16, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:maxTracesPerSecond: ", p), err)
- }
- if err := oprot.WriteI16(int16(p.MaxTracesPerSecond)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.maxTracesPerSecond (1) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:maxTracesPerSecond: ", p), err)
- }
- return err
-}
-
-func (p *RateLimitingSamplingStrategy) String() string {
- if p == nil {
- return "<nil>"
- }
- return fmt.Sprintf("RateLimitingSamplingStrategy(%+v)", *p)
-}
-
-// Attributes:
-// - Operation
-// - ProbabilisticSampling
-type OperationSamplingStrategy struct {
- Operation string `thrift:"operation,1,required" json:"operation"`
- ProbabilisticSampling *ProbabilisticSamplingStrategy `thrift:"probabilisticSampling,2,required" json:"probabilisticSampling"`
-}
-
-func NewOperationSamplingStrategy() *OperationSamplingStrategy {
- return &OperationSamplingStrategy{}
-}
-
-func (p *OperationSamplingStrategy) GetOperation() string {
- return p.Operation
-}
-
-var OperationSamplingStrategy_ProbabilisticSampling_DEFAULT *ProbabilisticSamplingStrategy
-
-func (p *OperationSamplingStrategy) GetProbabilisticSampling() *ProbabilisticSamplingStrategy {
- if !p.IsSetProbabilisticSampling() {
- return OperationSamplingStrategy_ProbabilisticSampling_DEFAULT
- }
- return p.ProbabilisticSampling
-}
-func (p *OperationSamplingStrategy) IsSetProbabilisticSampling() bool {
- return p.ProbabilisticSampling != nil
-}
-
-func (p *OperationSamplingStrategy) Read(iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- var issetOperation bool = false
- var issetProbabilisticSampling bool = false
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP {
- break
- }
- switch fieldId {
- case 1:
- if err := p.readField1(iprot); err != nil {
- return err
- }
- issetOperation = true
- case 2:
- if err := p.readField2(iprot); err != nil {
- return err
- }
- issetProbabilisticSampling = true
- default:
- if err := iprot.Skip(fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- if !issetOperation {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Operation is not set"))
- }
- if !issetProbabilisticSampling {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ProbabilisticSampling is not set"))
- }
- return nil
-}
-
-func (p *OperationSamplingStrategy) readField1(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadString(); err != nil {
- return thrift.PrependError("error reading field 1: ", err)
- } else {
- p.Operation = v
- }
- return nil
-}
-
-func (p *OperationSamplingStrategy) readField2(iprot thrift.TProtocol) error {
- p.ProbabilisticSampling = &ProbabilisticSamplingStrategy{}
- if err := p.ProbabilisticSampling.Read(iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ProbabilisticSampling), err)
- }
- return nil
-}
-
-func (p *OperationSamplingStrategy) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("OperationSamplingStrategy"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
- }
- if err := p.writeField1(oprot); err != nil {
- return err
- }
- if err := p.writeField2(oprot); err != nil {
- return err
- }
- if err := oprot.WriteFieldStop(); err != nil {
- return thrift.PrependError("write field stop error: ", err)
- }
- if err := oprot.WriteStructEnd(); err != nil {
- return thrift.PrependError("write struct stop error: ", err)
- }
- return nil
-}
-
-func (p *OperationSamplingStrategy) writeField1(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("operation", thrift.STRING, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:operation: ", p), err)
- }
- if err := oprot.WriteString(string(p.Operation)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.operation (1) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:operation: ", p), err)
- }
- return err
-}
-
-func (p *OperationSamplingStrategy) writeField2(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("probabilisticSampling", thrift.STRUCT, 2); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:probabilisticSampling: ", p), err)
- }
- if err := p.ProbabilisticSampling.Write(oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ProbabilisticSampling), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 2:probabilisticSampling: ", p), err)
- }
- return err
-}
-
-func (p *OperationSamplingStrategy) String() string {
- if p == nil {
- return "<nil>"
- }
- return fmt.Sprintf("OperationSamplingStrategy(%+v)", *p)
-}
-
-// Attributes:
-// - DefaultSamplingProbability
-// - DefaultLowerBoundTracesPerSecond
-// - PerOperationStrategies
-// - DefaultUpperBoundTracesPerSecond
-type PerOperationSamplingStrategies struct {
- DefaultSamplingProbability float64 `thrift:"defaultSamplingProbability,1,required" json:"defaultSamplingProbability"`
- DefaultLowerBoundTracesPerSecond float64 `thrift:"defaultLowerBoundTracesPerSecond,2,required" json:"defaultLowerBoundTracesPerSecond"`
- PerOperationStrategies []*OperationSamplingStrategy `thrift:"perOperationStrategies,3,required" json:"perOperationStrategies"`
- DefaultUpperBoundTracesPerSecond *float64 `thrift:"defaultUpperBoundTracesPerSecond,4" json:"defaultUpperBoundTracesPerSecond,omitempty"`
-}
-
-func NewPerOperationSamplingStrategies() *PerOperationSamplingStrategies {
- return &PerOperationSamplingStrategies{}
-}
-
-func (p *PerOperationSamplingStrategies) GetDefaultSamplingProbability() float64 {
- return p.DefaultSamplingProbability
-}
-
-func (p *PerOperationSamplingStrategies) GetDefaultLowerBoundTracesPerSecond() float64 {
- return p.DefaultLowerBoundTracesPerSecond
-}
-
-func (p *PerOperationSamplingStrategies) GetPerOperationStrategies() []*OperationSamplingStrategy {
- return p.PerOperationStrategies
-}
-
-var PerOperationSamplingStrategies_DefaultUpperBoundTracesPerSecond_DEFAULT float64
-
-func (p *PerOperationSamplingStrategies) GetDefaultUpperBoundTracesPerSecond() float64 {
- if !p.IsSetDefaultUpperBoundTracesPerSecond() {
- return PerOperationSamplingStrategies_DefaultUpperBoundTracesPerSecond_DEFAULT
- }
- return *p.DefaultUpperBoundTracesPerSecond
-}
-func (p *PerOperationSamplingStrategies) IsSetDefaultUpperBoundTracesPerSecond() bool {
- return p.DefaultUpperBoundTracesPerSecond != nil
-}
-
-func (p *PerOperationSamplingStrategies) Read(iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- var issetDefaultSamplingProbability bool = false
- var issetDefaultLowerBoundTracesPerSecond bool = false
- var issetPerOperationStrategies bool = false
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP {
- break
- }
- switch fieldId {
- case 1:
- if err := p.readField1(iprot); err != nil {
- return err
- }
- issetDefaultSamplingProbability = true
- case 2:
- if err := p.readField2(iprot); err != nil {
- return err
- }
- issetDefaultLowerBoundTracesPerSecond = true
- case 3:
- if err := p.readField3(iprot); err != nil {
- return err
- }
- issetPerOperationStrategies = true
- case 4:
- if err := p.readField4(iprot); err != nil {
- return err
- }
- default:
- if err := iprot.Skip(fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- if !issetDefaultSamplingProbability {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field DefaultSamplingProbability is not set"))
- }
- if !issetDefaultLowerBoundTracesPerSecond {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field DefaultLowerBoundTracesPerSecond is not set"))
- }
- if !issetPerOperationStrategies {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field PerOperationStrategies is not set"))
- }
- return nil
-}
-
-func (p *PerOperationSamplingStrategies) readField1(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadDouble(); err != nil {
- return thrift.PrependError("error reading field 1: ", err)
- } else {
- p.DefaultSamplingProbability = v
- }
- return nil
-}
-
-func (p *PerOperationSamplingStrategies) readField2(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadDouble(); err != nil {
- return thrift.PrependError("error reading field 2: ", err)
- } else {
- p.DefaultLowerBoundTracesPerSecond = v
- }
- return nil
-}
-
-func (p *PerOperationSamplingStrategies) readField3(iprot thrift.TProtocol) error {
- _, size, err := iprot.ReadListBegin()
- if err != nil {
- return thrift.PrependError("error reading list begin: ", err)
- }
- tSlice := make([]*OperationSamplingStrategy, 0, size)
- p.PerOperationStrategies = tSlice
- for i := 0; i < size; i++ {
- _elem0 := &OperationSamplingStrategy{}
- if err := _elem0.Read(iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err)
- }
- p.PerOperationStrategies = append(p.PerOperationStrategies, _elem0)
- }
- if err := iprot.ReadListEnd(); err != nil {
- return thrift.PrependError("error reading list end: ", err)
- }
- return nil
-}
-
-func (p *PerOperationSamplingStrategies) readField4(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadDouble(); err != nil {
- return thrift.PrependError("error reading field 4: ", err)
- } else {
- p.DefaultUpperBoundTracesPerSecond = &v
- }
- return nil
-}
-
-func (p *PerOperationSamplingStrategies) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("PerOperationSamplingStrategies"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
- }
- if err := p.writeField1(oprot); err != nil {
- return err
- }
- if err := p.writeField2(oprot); err != nil {
- return err
- }
- if err := p.writeField3(oprot); err != nil {
- return err
- }
- if err := p.writeField4(oprot); err != nil {
- return err
- }
- if err := oprot.WriteFieldStop(); err != nil {
- return thrift.PrependError("write field stop error: ", err)
- }
- if err := oprot.WriteStructEnd(); err != nil {
- return thrift.PrependError("write struct stop error: ", err)
- }
- return nil
-}
-
-func (p *PerOperationSamplingStrategies) writeField1(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("defaultSamplingProbability", thrift.DOUBLE, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:defaultSamplingProbability: ", p), err)
- }
- if err := oprot.WriteDouble(float64(p.DefaultSamplingProbability)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.defaultSamplingProbability (1) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:defaultSamplingProbability: ", p), err)
- }
- return err
-}
-
-func (p *PerOperationSamplingStrategies) writeField2(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("defaultLowerBoundTracesPerSecond", thrift.DOUBLE, 2); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:defaultLowerBoundTracesPerSecond: ", p), err)
- }
- if err := oprot.WriteDouble(float64(p.DefaultLowerBoundTracesPerSecond)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.defaultLowerBoundTracesPerSecond (2) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 2:defaultLowerBoundTracesPerSecond: ", p), err)
- }
- return err
-}
-
-func (p *PerOperationSamplingStrategies) writeField3(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("perOperationStrategies", thrift.LIST, 3); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:perOperationStrategies: ", p), err)
- }
- if err := oprot.WriteListBegin(thrift.STRUCT, len(p.PerOperationStrategies)); err != nil {
- return thrift.PrependError("error writing list begin: ", err)
- }
- for _, v := range p.PerOperationStrategies {
- if err := v.Write(oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
- }
- }
- if err := oprot.WriteListEnd(); err != nil {
- return thrift.PrependError("error writing list end: ", err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 3:perOperationStrategies: ", p), err)
- }
- return err
-}
-
-func (p *PerOperationSamplingStrategies) writeField4(oprot thrift.TProtocol) (err error) {
- if p.IsSetDefaultUpperBoundTracesPerSecond() {
- if err := oprot.WriteFieldBegin("defaultUpperBoundTracesPerSecond", thrift.DOUBLE, 4); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:defaultUpperBoundTracesPerSecond: ", p), err)
- }
- if err := oprot.WriteDouble(float64(*p.DefaultUpperBoundTracesPerSecond)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.defaultUpperBoundTracesPerSecond (4) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 4:defaultUpperBoundTracesPerSecond: ", p), err)
- }
- }
- return err
-}
-
-func (p *PerOperationSamplingStrategies) String() string {
- if p == nil {
- return "<nil>"
- }
- return fmt.Sprintf("PerOperationSamplingStrategies(%+v)", *p)
-}
-
-// Attributes:
-// - StrategyType
-// - ProbabilisticSampling
-// - RateLimitingSampling
-// - OperationSampling
-type SamplingStrategyResponse struct {
- StrategyType SamplingStrategyType `thrift:"strategyType,1,required" json:"strategyType"`
- ProbabilisticSampling *ProbabilisticSamplingStrategy `thrift:"probabilisticSampling,2" json:"probabilisticSampling,omitempty"`
- RateLimitingSampling *RateLimitingSamplingStrategy `thrift:"rateLimitingSampling,3" json:"rateLimitingSampling,omitempty"`
- OperationSampling *PerOperationSamplingStrategies `thrift:"operationSampling,4" json:"operationSampling,omitempty"`
-}
-
-func NewSamplingStrategyResponse() *SamplingStrategyResponse {
- return &SamplingStrategyResponse{}
-}
-
-func (p *SamplingStrategyResponse) GetStrategyType() SamplingStrategyType {
- return p.StrategyType
-}
-
-var SamplingStrategyResponse_ProbabilisticSampling_DEFAULT *ProbabilisticSamplingStrategy
-
-func (p *SamplingStrategyResponse) GetProbabilisticSampling() *ProbabilisticSamplingStrategy {
- if !p.IsSetProbabilisticSampling() {
- return SamplingStrategyResponse_ProbabilisticSampling_DEFAULT
- }
- return p.ProbabilisticSampling
-}
-
-var SamplingStrategyResponse_RateLimitingSampling_DEFAULT *RateLimitingSamplingStrategy
-
-func (p *SamplingStrategyResponse) GetRateLimitingSampling() *RateLimitingSamplingStrategy {
- if !p.IsSetRateLimitingSampling() {
- return SamplingStrategyResponse_RateLimitingSampling_DEFAULT
- }
- return p.RateLimitingSampling
-}
-
-var SamplingStrategyResponse_OperationSampling_DEFAULT *PerOperationSamplingStrategies
-
-func (p *SamplingStrategyResponse) GetOperationSampling() *PerOperationSamplingStrategies {
- if !p.IsSetOperationSampling() {
- return SamplingStrategyResponse_OperationSampling_DEFAULT
- }
- return p.OperationSampling
-}
-func (p *SamplingStrategyResponse) IsSetProbabilisticSampling() bool {
- return p.ProbabilisticSampling != nil
-}
-
-func (p *SamplingStrategyResponse) IsSetRateLimitingSampling() bool {
- return p.RateLimitingSampling != nil
-}
-
-func (p *SamplingStrategyResponse) IsSetOperationSampling() bool {
- return p.OperationSampling != nil
-}
-
-func (p *SamplingStrategyResponse) Read(iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- var issetStrategyType bool = false
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP {
- break
- }
- switch fieldId {
- case 1:
- if err := p.readField1(iprot); err != nil {
- return err
- }
- issetStrategyType = true
- case 2:
- if err := p.readField2(iprot); err != nil {
- return err
- }
- case 3:
- if err := p.readField3(iprot); err != nil {
- return err
- }
- case 4:
- if err := p.readField4(iprot); err != nil {
- return err
- }
- default:
- if err := iprot.Skip(fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- if !issetStrategyType {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field StrategyType is not set"))
- }
- return nil
-}
-
-func (p *SamplingStrategyResponse) readField1(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI32(); err != nil {
- return thrift.PrependError("error reading field 1: ", err)
- } else {
- temp := SamplingStrategyType(v)
- p.StrategyType = temp
- }
- return nil
-}
-
-func (p *SamplingStrategyResponse) readField2(iprot thrift.TProtocol) error {
- p.ProbabilisticSampling = &ProbabilisticSamplingStrategy{}
- if err := p.ProbabilisticSampling.Read(iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ProbabilisticSampling), err)
- }
- return nil
-}
-
-func (p *SamplingStrategyResponse) readField3(iprot thrift.TProtocol) error {
- p.RateLimitingSampling = &RateLimitingSamplingStrategy{}
- if err := p.RateLimitingSampling.Read(iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.RateLimitingSampling), err)
- }
- return nil
-}
-
-func (p *SamplingStrategyResponse) readField4(iprot thrift.TProtocol) error {
- p.OperationSampling = &PerOperationSamplingStrategies{}
- if err := p.OperationSampling.Read(iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.OperationSampling), err)
- }
- return nil
-}
-
-func (p *SamplingStrategyResponse) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("SamplingStrategyResponse"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
- }
- if err := p.writeField1(oprot); err != nil {
- return err
- }
- if err := p.writeField2(oprot); err != nil {
- return err
- }
- if err := p.writeField3(oprot); err != nil {
- return err
- }
- if err := p.writeField4(oprot); err != nil {
- return err
- }
- if err := oprot.WriteFieldStop(); err != nil {
- return thrift.PrependError("write field stop error: ", err)
- }
- if err := oprot.WriteStructEnd(); err != nil {
- return thrift.PrependError("write struct stop error: ", err)
- }
- return nil
-}
-
-func (p *SamplingStrategyResponse) writeField1(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("strategyType", thrift.I32, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:strategyType: ", p), err)
- }
- if err := oprot.WriteI32(int32(p.StrategyType)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.strategyType (1) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:strategyType: ", p), err)
- }
- return err
-}
-
-func (p *SamplingStrategyResponse) writeField2(oprot thrift.TProtocol) (err error) {
- if p.IsSetProbabilisticSampling() {
- if err := oprot.WriteFieldBegin("probabilisticSampling", thrift.STRUCT, 2); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:probabilisticSampling: ", p), err)
- }
- if err := p.ProbabilisticSampling.Write(oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ProbabilisticSampling), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 2:probabilisticSampling: ", p), err)
- }
- }
- return err
-}
-
-func (p *SamplingStrategyResponse) writeField3(oprot thrift.TProtocol) (err error) {
- if p.IsSetRateLimitingSampling() {
- if err := oprot.WriteFieldBegin("rateLimitingSampling", thrift.STRUCT, 3); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:rateLimitingSampling: ", p), err)
- }
- if err := p.RateLimitingSampling.Write(oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.RateLimitingSampling), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 3:rateLimitingSampling: ", p), err)
- }
- }
- return err
-}
-
-func (p *SamplingStrategyResponse) writeField4(oprot thrift.TProtocol) (err error) {
- if p.IsSetOperationSampling() {
- if err := oprot.WriteFieldBegin("operationSampling", thrift.STRUCT, 4); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:operationSampling: ", p), err)
- }
- if err := p.OperationSampling.Write(oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.OperationSampling), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 4:operationSampling: ", p), err)
- }
- }
- return err
-}
-
-func (p *SamplingStrategyResponse) String() string {
- if p == nil {
- return "<nil>"
- }
- return fmt.Sprintf("SamplingStrategyResponse(%+v)", *p)
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/tracer.go b/vendor/github.com/uber/jaeger-client-go/tracer.go
deleted file mode 100644
index 477c6eae3..000000000
--- a/vendor/github.com/uber/jaeger-client-go/tracer.go
+++ /dev/null
@@ -1,491 +0,0 @@
-// Copyright (c) 2017-2018 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-import (
- "fmt"
- "io"
- "math/rand"
- "os"
- "reflect"
- "strconv"
- "sync"
- "time"
-
- "github.com/opentracing/opentracing-go"
- "github.com/opentracing/opentracing-go/ext"
-
- "github.com/uber/jaeger-client-go/internal/baggage"
- "github.com/uber/jaeger-client-go/internal/throttler"
- "github.com/uber/jaeger-client-go/log"
- "github.com/uber/jaeger-client-go/utils"
-)
-
-// Tracer implements opentracing.Tracer.
-type Tracer struct {
- serviceName string
- hostIPv4 uint32 // this is for zipkin endpoint conversion
-
- sampler SamplerV2
- reporter Reporter
- metrics Metrics
- logger log.DebugLogger
-
- timeNow func() time.Time
- randomNumber func() uint64
-
- options struct {
- gen128Bit bool // whether to generate 128bit trace IDs
- zipkinSharedRPCSpan bool
- highTraceIDGenerator func() uint64 // custom high trace ID generator
- maxTagValueLength int
- noDebugFlagOnForcedSampling bool
- maxLogsPerSpan int
- // more options to come
- }
- // allocator of Span objects
- spanAllocator SpanAllocator
-
- injectors map[interface{}]Injector
- extractors map[interface{}]Extractor
-
- observer compositeObserver
-
- tags []Tag
- process Process
-
- baggageRestrictionManager baggage.RestrictionManager
- baggageSetter *baggageSetter
-
- debugThrottler throttler.Throttler
-}
-
-// NewTracer creates Tracer implementation that reports tracing to Jaeger.
-// The returned io.Closer can be used in shutdown hooks to ensure that the internal
-// queue of the Reporter is drained and all buffered spans are submitted to collectors.
-// TODO (breaking change) return *Tracer only, without closer.
-func NewTracer(
- serviceName string,
- sampler Sampler,
- reporter Reporter,
- options ...TracerOption,
-) (opentracing.Tracer, io.Closer) {
- t := &Tracer{
- serviceName: serviceName,
- sampler: samplerV1toV2(sampler),
- reporter: reporter,
- injectors: make(map[interface{}]Injector),
- extractors: make(map[interface{}]Extractor),
- metrics: *NewNullMetrics(),
- spanAllocator: simpleSpanAllocator{},
- }
-
- for _, option := range options {
- option(t)
- }
-
- // register default injectors/extractors unless they are already provided via options
- textPropagator := NewTextMapPropagator(getDefaultHeadersConfig(), t.metrics)
- t.addCodec(opentracing.TextMap, textPropagator, textPropagator)
-
- httpHeaderPropagator := NewHTTPHeaderPropagator(getDefaultHeadersConfig(), t.metrics)
- t.addCodec(opentracing.HTTPHeaders, httpHeaderPropagator, httpHeaderPropagator)
-
- binaryPropagator := NewBinaryPropagator(t)
- t.addCodec(opentracing.Binary, binaryPropagator, binaryPropagator)
-
- // TODO remove after TChannel supports OpenTracing
- interopPropagator := &jaegerTraceContextPropagator{tracer: t}
- t.addCodec(SpanContextFormat, interopPropagator, interopPropagator)
-
- zipkinPropagator := &zipkinPropagator{tracer: t}
- t.addCodec(ZipkinSpanFormat, zipkinPropagator, zipkinPropagator)
-
- if t.baggageRestrictionManager != nil {
- t.baggageSetter = newBaggageSetter(t.baggageRestrictionManager, &t.metrics)
- } else {
- t.baggageSetter = newBaggageSetter(baggage.NewDefaultRestrictionManager(0), &t.metrics)
- }
- if t.debugThrottler == nil {
- t.debugThrottler = throttler.DefaultThrottler{}
- }
-
- if t.randomNumber == nil {
- seedGenerator := utils.NewRand(time.Now().UnixNano())
- pool := sync.Pool{
- New: func() interface{} {
- return rand.NewSource(seedGenerator.Int63())
- },
- }
-
- t.randomNumber = func() uint64 {
- generator := pool.Get().(rand.Source)
- number := uint64(generator.Int63())
- pool.Put(generator)
- return number
- }
- }
- if t.timeNow == nil {
- t.timeNow = time.Now
- }
- if t.logger == nil {
- t.logger = log.NullLogger
- }
- // Set tracer-level tags
- t.tags = append(t.tags, Tag{key: JaegerClientVersionTagKey, value: JaegerClientVersion})
- if hostname, err := os.Hostname(); err == nil {
- t.tags = append(t.tags, Tag{key: TracerHostnameTagKey, value: hostname})
- }
- if ipval, ok := t.getTag(TracerIPTagKey); ok {
- ipv4, err := utils.ParseIPToUint32(ipval.(string))
- if err != nil {
- t.hostIPv4 = 0
- t.logger.Error("Unable to convert the externally provided ip to uint32: " + err.Error())
- } else {
- t.hostIPv4 = ipv4
- }
- } else if ip, err := utils.HostIP(); err == nil {
- t.tags = append(t.tags, Tag{key: TracerIPTagKey, value: ip.String()})
- t.hostIPv4 = utils.PackIPAsUint32(ip)
- } else {
- t.logger.Error("Unable to determine this host's IP address: " + err.Error())
- }
-
- if t.options.gen128Bit {
- if t.options.highTraceIDGenerator == nil {
- t.options.highTraceIDGenerator = t.randomNumber
- }
- } else if t.options.highTraceIDGenerator != nil {
- t.logger.Error("Overriding high trace ID generator but not generating " +
- "128 bit trace IDs, consider enabling the \"Gen128Bit\" option")
- }
- if t.options.maxTagValueLength == 0 {
- t.options.maxTagValueLength = DefaultMaxTagValueLength
- }
- t.process = Process{
- Service: serviceName,
- UUID: strconv.FormatUint(t.randomNumber(), 16),
- Tags: t.tags,
- }
- if throttler, ok := t.debugThrottler.(ProcessSetter); ok {
- throttler.SetProcess(t.process)
- }
-
- return t, t
-}
-
-// addCodec adds registers injector and extractor for given propagation format if not already defined.
-func (t *Tracer) addCodec(format interface{}, injector Injector, extractor Extractor) {
- if _, ok := t.injectors[format]; !ok {
- t.injectors[format] = injector
- }
- if _, ok := t.extractors[format]; !ok {
- t.extractors[format] = extractor
- }
-}
-
-// StartSpan implements StartSpan() method of opentracing.Tracer.
-func (t *Tracer) StartSpan(
- operationName string,
- options ...opentracing.StartSpanOption,
-) opentracing.Span {
- sso := opentracing.StartSpanOptions{}
- for _, o := range options {
- o.Apply(&sso)
- }
- return t.startSpanWithOptions(operationName, sso)
-}
-
-func (t *Tracer) startSpanWithOptions(
- operationName string,
- options opentracing.StartSpanOptions,
-) opentracing.Span {
- if options.StartTime.IsZero() {
- options.StartTime = t.timeNow()
- }
-
- // Predicate whether the given span context is an empty reference
- // or may be used as parent / debug ID / baggage items source
- isEmptyReference := func(ctx SpanContext) bool {
- return !ctx.IsValid() && !ctx.isDebugIDContainerOnly() && len(ctx.baggage) == 0
- }
-
- var references []Reference
- var parent SpanContext
- var hasParent bool // need this because `parent` is a value, not reference
- var ctx SpanContext
- var isSelfRef bool
- for _, ref := range options.References {
- ctxRef, ok := ref.ReferencedContext.(SpanContext)
- if !ok {
- t.logger.Error(fmt.Sprintf(
- "Reference contains invalid type of SpanReference: %s",
- reflect.ValueOf(ref.ReferencedContext)))
- continue
- }
- if isEmptyReference(ctxRef) {
- continue
- }
-
- if ref.Type == selfRefType {
- isSelfRef = true
- ctx = ctxRef
- continue
- }
-
- if ctxRef.IsValid() {
- // we don't want empty context that contains only debug-id or baggage
- references = append(references, Reference{Type: ref.Type, Context: ctxRef})
- }
-
- if !hasParent {
- parent = ctxRef
- hasParent = ref.Type == opentracing.ChildOfRef
- }
- }
- if !hasParent && !isEmptyReference(parent) {
- // If ChildOfRef wasn't found but a FollowFromRef exists, use the context from
- // the FollowFromRef as the parent
- hasParent = true
- }
-
- rpcServer := false
- if v, ok := options.Tags[ext.SpanKindRPCServer.Key]; ok {
- rpcServer = (v == ext.SpanKindRPCServerEnum || v == string(ext.SpanKindRPCServerEnum))
- }
-
- var internalTags []Tag
- newTrace := false
- if !isSelfRef {
- if !hasParent || !parent.IsValid() {
- newTrace = true
- ctx.traceID.Low = t.randomID()
- if t.options.gen128Bit {
- ctx.traceID.High = t.options.highTraceIDGenerator()
- }
- ctx.spanID = SpanID(ctx.traceID.Low)
- ctx.parentID = 0
- ctx.samplingState = &samplingState{
- localRootSpan: ctx.spanID,
- }
- if hasParent && parent.isDebugIDContainerOnly() && t.isDebugAllowed(operationName) {
- ctx.samplingState.setDebugAndSampled()
- internalTags = append(internalTags, Tag{key: JaegerDebugHeader, value: parent.debugID})
- }
- } else {
- ctx.traceID = parent.traceID
- if rpcServer && t.options.zipkinSharedRPCSpan {
- // Support Zipkin's one-span-per-RPC model
- ctx.spanID = parent.spanID
- ctx.parentID = parent.parentID
- } else {
- ctx.spanID = SpanID(t.randomID())
- ctx.parentID = parent.spanID
- }
- ctx.samplingState = parent.samplingState
- if parent.remote {
- ctx.samplingState.setFinal()
- ctx.samplingState.localRootSpan = ctx.spanID
- }
- }
- if hasParent {
- // copy baggage items
- if l := len(parent.baggage); l > 0 {
- ctx.baggage = make(map[string]string, len(parent.baggage))
- for k, v := range parent.baggage {
- ctx.baggage[k] = v
- }
- }
- }
- }
-
- sp := t.newSpan()
- sp.context = ctx
- sp.tracer = t
- sp.operationName = operationName
- sp.startTime = options.StartTime
- sp.duration = 0
- sp.references = references
- sp.firstInProcess = rpcServer || sp.context.parentID == 0
-
- if !sp.isSamplingFinalized() {
- decision := t.sampler.OnCreateSpan(sp)
- sp.applySamplingDecision(decision, false)
- }
- sp.observer = t.observer.OnStartSpan(sp, operationName, options)
-
- if tagsTotalLength := len(options.Tags) + len(internalTags); tagsTotalLength > 0 {
- if sp.tags == nil || cap(sp.tags) < tagsTotalLength {
- sp.tags = make([]Tag, 0, tagsTotalLength)
- }
- sp.tags = append(sp.tags, internalTags...)
- for k, v := range options.Tags {
- sp.setTagInternal(k, v, false)
- }
- }
- t.emitNewSpanMetrics(sp, newTrace)
- return sp
-}
-
-// Inject implements Inject() method of opentracing.Tracer
-func (t *Tracer) Inject(ctx opentracing.SpanContext, format interface{}, carrier interface{}) error {
- c, ok := ctx.(SpanContext)
- if !ok {
- return opentracing.ErrInvalidSpanContext
- }
- if injector, ok := t.injectors[format]; ok {
- return injector.Inject(c, carrier)
- }
- return opentracing.ErrUnsupportedFormat
-}
-
-// Extract implements Extract() method of opentracing.Tracer
-func (t *Tracer) Extract(
- format interface{},
- carrier interface{},
-) (opentracing.SpanContext, error) {
- if extractor, ok := t.extractors[format]; ok {
- spanCtx, err := extractor.Extract(carrier)
- if err != nil {
- return nil, err // ensure returned spanCtx is nil
- }
- spanCtx.remote = true
- return spanCtx, nil
- }
- return nil, opentracing.ErrUnsupportedFormat
-}
-
-// Close releases all resources used by the Tracer and flushes any remaining buffered spans.
-func (t *Tracer) Close() error {
- t.logger.Debugf("closing tracer")
- t.reporter.Close()
- t.sampler.Close()
- if mgr, ok := t.baggageRestrictionManager.(io.Closer); ok {
- _ = mgr.Close()
- }
- if throttler, ok := t.debugThrottler.(io.Closer); ok {
- _ = throttler.Close()
- }
- return nil
-}
-
-// Tags returns a slice of tracer-level tags.
-func (t *Tracer) Tags() []opentracing.Tag {
- tags := make([]opentracing.Tag, len(t.tags))
- for i, tag := range t.tags {
- tags[i] = opentracing.Tag{Key: tag.key, Value: tag.value}
- }
- return tags
-}
-
-// getTag returns the value of specific tag, if not exists, return nil.
-// TODO only used by tests, move there.
-func (t *Tracer) getTag(key string) (interface{}, bool) {
- for _, tag := range t.tags {
- if tag.key == key {
- return tag.value, true
- }
- }
- return nil, false
-}
-
-// newSpan returns an instance of a clean Span object.
-// If options.PoolSpans is true, the spans are retrieved from an object pool.
-func (t *Tracer) newSpan() *Span {
- return t.spanAllocator.Get()
-}
-
-// emitNewSpanMetrics generates metrics on the number of started spans and traces.
-// newTrace param: we cannot simply check for parentID==0 because in Zipkin model the
-// server-side RPC span has the exact same trace/span/parent IDs as the
-// calling client-side span, but obviously the server side span is
-// no longer a root span of the trace.
-func (t *Tracer) emitNewSpanMetrics(sp *Span, newTrace bool) {
- if !sp.isSamplingFinalized() {
- t.metrics.SpansStartedDelayedSampling.Inc(1)
- if newTrace {
- t.metrics.TracesStartedDelayedSampling.Inc(1)
- }
- // joining a trace is not possible, because sampling decision inherited from upstream is final
- } else if sp.context.IsSampled() {
- t.metrics.SpansStartedSampled.Inc(1)
- if newTrace {
- t.metrics.TracesStartedSampled.Inc(1)
- } else if sp.firstInProcess {
- t.metrics.TracesJoinedSampled.Inc(1)
- }
- } else {
- t.metrics.SpansStartedNotSampled.Inc(1)
- if newTrace {
- t.metrics.TracesStartedNotSampled.Inc(1)
- } else if sp.firstInProcess {
- t.metrics.TracesJoinedNotSampled.Inc(1)
- }
- }
-}
-
-func (t *Tracer) reportSpan(sp *Span) {
- if !sp.isSamplingFinalized() {
- t.metrics.SpansFinishedDelayedSampling.Inc(1)
- } else if sp.context.IsSampled() {
- t.metrics.SpansFinishedSampled.Inc(1)
- } else {
- t.metrics.SpansFinishedNotSampled.Inc(1)
- }
-
- // Note: if the reporter is processing Span asynchronously then it needs to Retain() the span,
- // and then Release() it when no longer needed.
- // Otherwise, the span may be reused for another trace and its data may be overwritten.
- if sp.context.IsSampled() {
- t.reporter.Report(sp)
- }
-
- sp.Release()
-}
-
-// randomID generates a random trace/span ID, using tracer.random() generator.
-// It never returns 0.
-func (t *Tracer) randomID() uint64 {
- val := t.randomNumber()
- for val == 0 {
- val = t.randomNumber()
- }
- return val
-}
-
-// (NB) span must hold the lock before making this call
-func (t *Tracer) setBaggage(sp *Span, key, value string) {
- t.baggageSetter.setBaggage(sp, key, value)
-}
-
-// (NB) span must hold the lock before making this call
-func (t *Tracer) isDebugAllowed(operation string) bool {
- return t.debugThrottler.IsAllowed(operation)
-}
-
-// Sampler returns the sampler given to the tracer at creation.
-func (t *Tracer) Sampler() SamplerV2 {
- return t.sampler
-}
-
-// SelfRef creates an opentracing compliant SpanReference from a jaeger
-// SpanContext. This is a factory function in order to encapsulate jaeger specific
-// types.
-func SelfRef(ctx SpanContext) opentracing.SpanReference {
- return opentracing.SpanReference{
- Type: selfRefType,
- ReferencedContext: ctx,
- }
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/tracer_options.go b/vendor/github.com/uber/jaeger-client-go/tracer_options.go
deleted file mode 100644
index f0734b772..000000000
--- a/vendor/github.com/uber/jaeger-client-go/tracer_options.go
+++ /dev/null
@@ -1,182 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-import (
- "time"
-
- "github.com/opentracing/opentracing-go"
-
- "github.com/uber/jaeger-client-go/internal/baggage"
- "github.com/uber/jaeger-client-go/internal/throttler"
- "github.com/uber/jaeger-client-go/log"
-)
-
-// TracerOption is a function that sets some option on the tracer
-type TracerOption func(tracer *Tracer)
-
-// TracerOptions is a factory for all available TracerOption's
-var TracerOptions tracerOptions
-
-type tracerOptions struct{}
-
-// Metrics creates a TracerOption that initializes Metrics on the tracer,
-// which is used to emit statistics.
-func (tracerOptions) Metrics(m *Metrics) TracerOption {
- return func(tracer *Tracer) {
- tracer.metrics = *m
- }
-}
-
-// Logger creates a TracerOption that gives the tracer a Logger.
-func (tracerOptions) Logger(logger Logger) TracerOption {
- return func(tracer *Tracer) {
- tracer.logger = log.DebugLogAdapter(logger)
- }
-}
-
-func (tracerOptions) CustomHeaderKeys(headerKeys *HeadersConfig) TracerOption {
- return func(tracer *Tracer) {
- if headerKeys == nil {
- return
- }
- textPropagator := NewTextMapPropagator(headerKeys.ApplyDefaults(), tracer.metrics)
- tracer.addCodec(opentracing.TextMap, textPropagator, textPropagator)
-
- httpHeaderPropagator := NewHTTPHeaderPropagator(headerKeys.ApplyDefaults(), tracer.metrics)
- tracer.addCodec(opentracing.HTTPHeaders, httpHeaderPropagator, httpHeaderPropagator)
- }
-}
-
-// TimeNow creates a TracerOption that gives the tracer a function
-// used to generate timestamps for spans.
-func (tracerOptions) TimeNow(timeNow func() time.Time) TracerOption {
- return func(tracer *Tracer) {
- tracer.timeNow = timeNow
- }
-}
-
-// RandomNumber creates a TracerOption that gives the tracer
-// a thread-safe random number generator function for generating trace IDs.
-func (tracerOptions) RandomNumber(randomNumber func() uint64) TracerOption {
- return func(tracer *Tracer) {
- tracer.randomNumber = randomNumber
- }
-}
-
-// PoolSpans creates a TracerOption that tells the tracer whether it should use
-// an object pool to minimize span allocations.
-// This should be used with care, only if the service is not running any async tasks
-// that can access parent spans after those spans have been finished.
-func (tracerOptions) PoolSpans(poolSpans bool) TracerOption {
- return func(tracer *Tracer) {
- if poolSpans {
- tracer.spanAllocator = newSyncPollSpanAllocator()
- } else {
- tracer.spanAllocator = simpleSpanAllocator{}
- }
- }
-}
-
-// Deprecated: HostIPv4 creates a TracerOption that identifies the current service/process.
-// If not set, the factory method will obtain the current IP address.
-// The TracerOption is deprecated; the tracer will attempt to automatically detect the IP.
-func (tracerOptions) HostIPv4(hostIPv4 uint32) TracerOption {
- return func(tracer *Tracer) {
- tracer.hostIPv4 = hostIPv4
- }
-}
-
-func (tracerOptions) Injector(format interface{}, injector Injector) TracerOption {
- return func(tracer *Tracer) {
- tracer.injectors[format] = injector
- }
-}
-
-func (tracerOptions) Extractor(format interface{}, extractor Extractor) TracerOption {
- return func(tracer *Tracer) {
- tracer.extractors[format] = extractor
- }
-}
-
-func (t tracerOptions) Observer(observer Observer) TracerOption {
- return t.ContribObserver(&oldObserver{obs: observer})
-}
-
-func (tracerOptions) ContribObserver(observer ContribObserver) TracerOption {
- return func(tracer *Tracer) {
- tracer.observer.append(observer)
- }
-}
-
-func (tracerOptions) Gen128Bit(gen128Bit bool) TracerOption {
- return func(tracer *Tracer) {
- tracer.options.gen128Bit = gen128Bit
- }
-}
-
-func (tracerOptions) NoDebugFlagOnForcedSampling(noDebugFlagOnForcedSampling bool) TracerOption {
- return func(tracer *Tracer) {
- tracer.options.noDebugFlagOnForcedSampling = noDebugFlagOnForcedSampling
- }
-}
-
-func (tracerOptions) HighTraceIDGenerator(highTraceIDGenerator func() uint64) TracerOption {
- return func(tracer *Tracer) {
- tracer.options.highTraceIDGenerator = highTraceIDGenerator
- }
-}
-
-func (tracerOptions) MaxTagValueLength(maxTagValueLength int) TracerOption {
- return func(tracer *Tracer) {
- tracer.options.maxTagValueLength = maxTagValueLength
- }
-}
-
-// MaxLogsPerSpan limits the number of Logs in a span (if set to a nonzero
-// value). If a span has more logs than this value, logs are dropped as
-// necessary (and replaced with a log describing how many were dropped).
-//
-// About half of the MaxLogsPerSpan logs kept are the oldest logs, and about
-// half are the newest logs.
-func (tracerOptions) MaxLogsPerSpan(maxLogsPerSpan int) TracerOption {
- return func(tracer *Tracer) {
- tracer.options.maxLogsPerSpan = maxLogsPerSpan
- }
-}
-
-func (tracerOptions) ZipkinSharedRPCSpan(zipkinSharedRPCSpan bool) TracerOption {
- return func(tracer *Tracer) {
- tracer.options.zipkinSharedRPCSpan = zipkinSharedRPCSpan
- }
-}
-
-func (tracerOptions) Tag(key string, value interface{}) TracerOption {
- return func(tracer *Tracer) {
- tracer.tags = append(tracer.tags, Tag{key: key, value: value})
- }
-}
-
-func (tracerOptions) BaggageRestrictionManager(mgr baggage.RestrictionManager) TracerOption {
- return func(tracer *Tracer) {
- tracer.baggageRestrictionManager = mgr
- }
-}
-
-func (tracerOptions) DebugThrottler(throttler throttler.Throttler) TracerOption {
- return func(tracer *Tracer) {
- tracer.debugThrottler = throttler
- }
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/transport.go b/vendor/github.com/uber/jaeger-client-go/transport.go
deleted file mode 100644
index c5f5b1955..000000000
--- a/vendor/github.com/uber/jaeger-client-go/transport.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-import (
- "io"
-)
-
-// Transport abstracts the method of sending spans out of process.
-// Implementations are NOT required to be thread-safe; the RemoteReporter
-// is expected to only call methods on the Transport from the same go-routine.
-type Transport interface {
- // Append converts the span to the wire representation and adds it
- // to sender's internal buffer. If the buffer exceeds its designated
- // size, the transport should call Flush() and return the number of spans
- // flushed, otherwise return 0. If error is returned, the returned number
- // of spans is treated as failed span, and reported to metrics accordingly.
- Append(span *Span) (int, error)
-
- // Flush submits the internal buffer to the remote server. It returns the
- // number of spans flushed. If error is returned, the returned number of
- // spans is treated as failed span, and reported to metrics accordingly.
- Flush() (int, error)
-
- io.Closer
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/transport/doc.go b/vendor/github.com/uber/jaeger-client-go/transport/doc.go
deleted file mode 100644
index 6b961fb63..000000000
--- a/vendor/github.com/uber/jaeger-client-go/transport/doc.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package transport defines various transports that can be used with
-// RemoteReporter to send spans out of process. Transport is responsible
-// for serializing the spans into a specific format suitable for sending
-// to the tracing backend. Examples may include Thrift over UDP, Thrift
-// or JSON over HTTP, Thrift over Kafka, etc.
-//
-// Implementations are NOT required to be thread-safe; the RemoteReporter
-// is expected to only call methods on the Transport from the same go-routine.
-package transport
diff --git a/vendor/github.com/uber/jaeger-client-go/transport/http.go b/vendor/github.com/uber/jaeger-client-go/transport/http.go
deleted file mode 100644
index bb7eb00c9..000000000
--- a/vendor/github.com/uber/jaeger-client-go/transport/http.go
+++ /dev/null
@@ -1,174 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package transport
-
-import (
- "bytes"
- "fmt"
- "io"
- "io/ioutil"
- "net/http"
- "time"
-
- "github.com/uber/jaeger-client-go/thrift"
-
- "github.com/uber/jaeger-client-go"
- j "github.com/uber/jaeger-client-go/thrift-gen/jaeger"
-)
-
-// Default timeout for http request in seconds
-const defaultHTTPTimeout = time.Second * 5
-
-// HTTPTransport implements Transport by forwarding spans to a http server.
-type HTTPTransport struct {
- url string
- client *http.Client
- batchSize int
- spans []*j.Span
- process *j.Process
- httpCredentials *HTTPBasicAuthCredentials
- headers map[string]string
-}
-
-// HTTPBasicAuthCredentials stores credentials for HTTP basic auth.
-type HTTPBasicAuthCredentials struct {
- username string
- password string
-}
-
-// HTTPOption sets a parameter for the HttpCollector
-type HTTPOption func(c *HTTPTransport)
-
-// HTTPTimeout sets maximum timeout for http request.
-func HTTPTimeout(duration time.Duration) HTTPOption {
- return func(c *HTTPTransport) { c.client.Timeout = duration }
-}
-
-// HTTPBatchSize sets the maximum batch size, after which a collect will be
-// triggered. The default batch size is 100 spans.
-func HTTPBatchSize(n int) HTTPOption {
- return func(c *HTTPTransport) { c.batchSize = n }
-}
-
-// HTTPBasicAuth sets the credentials required to perform HTTP basic auth
-func HTTPBasicAuth(username string, password string) HTTPOption {
- return func(c *HTTPTransport) {
- c.httpCredentials = &HTTPBasicAuthCredentials{username: username, password: password}
- }
-}
-
-// HTTPRoundTripper configures the underlying Transport on the *http.Client
-// that is used
-func HTTPRoundTripper(transport http.RoundTripper) HTTPOption {
- return func(c *HTTPTransport) {
- c.client.Transport = transport
- }
-}
-
-// HTTPHeaders defines the HTTP headers that will be attached to the jaeger client's HTTP request
-func HTTPHeaders(headers map[string]string) HTTPOption {
- return func(c *HTTPTransport) {
- c.headers = headers
- }
-}
-
-// NewHTTPTransport returns a new HTTP-backend transport. url should be an http
-// url of the collector to handle POST request, typically something like:
-// http://hostname:14268/api/traces?format=jaeger.thrift
-func NewHTTPTransport(url string, options ...HTTPOption) *HTTPTransport {
- c := &HTTPTransport{
- url: url,
- client: &http.Client{Timeout: defaultHTTPTimeout},
- batchSize: 100,
- spans: []*j.Span{},
- }
-
- for _, option := range options {
- option(c)
- }
- return c
-}
-
-// Append implements Transport.
-func (c *HTTPTransport) Append(span *jaeger.Span) (int, error) {
- if c.process == nil {
- c.process = jaeger.BuildJaegerProcessThrift(span)
- }
- jSpan := jaeger.BuildJaegerThrift(span)
- c.spans = append(c.spans, jSpan)
- if len(c.spans) >= c.batchSize {
- return c.Flush()
- }
- return 0, nil
-}
-
-// Flush implements Transport.
-func (c *HTTPTransport) Flush() (int, error) {
- count := len(c.spans)
- if count == 0 {
- return 0, nil
- }
- err := c.send(c.spans)
- c.spans = c.spans[:0]
- return count, err
-}
-
-// Close implements Transport.
-func (c *HTTPTransport) Close() error {
- return nil
-}
-
-func (c *HTTPTransport) send(spans []*j.Span) error {
- batch := &j.Batch{
- Spans: spans,
- Process: c.process,
- }
- body, err := serializeThrift(batch)
- if err != nil {
- return err
- }
- req, err := http.NewRequest("POST", c.url, body)
- if err != nil {
- return err
- }
- req.Header.Set("Content-Type", "application/x-thrift")
- for k, v := range c.headers {
- req.Header.Set(k, v)
- }
-
- if c.httpCredentials != nil {
- req.SetBasicAuth(c.httpCredentials.username, c.httpCredentials.password)
- }
-
- resp, err := c.client.Do(req)
- if err != nil {
- return err
- }
- io.Copy(ioutil.Discard, resp.Body)
- resp.Body.Close()
- if resp.StatusCode >= http.StatusBadRequest {
- return fmt.Errorf("error from collector: %d", resp.StatusCode)
- }
- return nil
-}
-
-func serializeThrift(obj thrift.TStruct) (*bytes.Buffer, error) {
- t := thrift.NewTMemoryBuffer()
- p := thrift.NewTBinaryProtocolTransport(t)
- if err := obj.Write(p); err != nil {
- return nil, err
- }
- return t.Buffer, nil
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/transport_udp.go b/vendor/github.com/uber/jaeger-client-go/transport_udp.go
deleted file mode 100644
index 5734819ab..000000000
--- a/vendor/github.com/uber/jaeger-client-go/transport_udp.go
+++ /dev/null
@@ -1,193 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-import (
- "errors"
- "fmt"
-
- "github.com/uber/jaeger-client-go/internal/reporterstats"
- "github.com/uber/jaeger-client-go/log"
- "github.com/uber/jaeger-client-go/thrift"
- j "github.com/uber/jaeger-client-go/thrift-gen/jaeger"
- "github.com/uber/jaeger-client-go/utils"
-)
-
-// Empirically obtained constant for how many bytes in the message are used for envelope.
-// The total datagram size is:
-// sizeof(Span) * numSpans + processByteSize + emitBatchOverhead <= maxPacketSize
-//
-// Note that due to the use of Compact Thrift protocol, overhead grows with the number of spans
-// in the batch, because the length of the list is encoded as varint32, as well as SeqId.
-//
-// There is a unit test `TestEmitBatchOverhead` that validates this number, it fails at <68.
-const emitBatchOverhead = 70
-
-var errSpanTooLarge = errors.New("span is too large")
-
-type udpSender struct {
- client *utils.AgentClientUDP
- maxPacketSize int // max size of datagram in bytes
- maxSpanBytes int // max number of bytes to record spans (excluding envelope) in the datagram
- byteBufferSize int // current number of span bytes accumulated in the buffer
- spanBuffer []*j.Span // spans buffered before a flush
- thriftBuffer *thrift.TMemoryBuffer // buffer used to calculate byte size of a span
- thriftProtocol thrift.TProtocol
- process *j.Process
- processByteSize int
-
- // reporterStats provides access to stats that are only known to Reporter
- reporterStats reporterstats.ReporterStats
-
- // The following counters are always non-negative, but we need to send them in signed i64 Thrift fields,
- // so we keep them as signed. At 10k QPS, overflow happens in about 300 million years.
- batchSeqNo int64
- tooLargeDroppedSpans int64
- failedToEmitSpans int64
-}
-
-// UDPTransportParams allows specifying options for initializing a UDPTransport. An instance of this struct should
-// be passed to NewUDPTransportWithParams.
-type UDPTransportParams struct {
- utils.AgentClientUDPParams
-}
-
-// NewUDPTransportWithParams creates a reporter that submits spans to jaeger-agent.
-// TODO: (breaking change) move to transport/ package.
-func NewUDPTransportWithParams(params UDPTransportParams) (Transport, error) {
- if len(params.HostPort) == 0 {
- params.HostPort = fmt.Sprintf("%s:%d", DefaultUDPSpanServerHost, DefaultUDPSpanServerPort)
- }
-
- if params.Logger == nil {
- params.Logger = log.StdLogger
- }
-
- if params.MaxPacketSize == 0 {
- params.MaxPacketSize = utils.UDPPacketMaxLength
- }
-
- protocolFactory := thrift.NewTCompactProtocolFactory()
-
- // Each span is first written to thriftBuffer to determine its size in bytes.
- thriftBuffer := thrift.NewTMemoryBufferLen(params.MaxPacketSize)
- thriftProtocol := protocolFactory.GetProtocol(thriftBuffer)
-
- client, err := utils.NewAgentClientUDPWithParams(params.AgentClientUDPParams)
- if err != nil {
- return nil, err
- }
-
- return &udpSender{
- client: client,
- maxSpanBytes: params.MaxPacketSize - emitBatchOverhead,
- thriftBuffer: thriftBuffer,
- thriftProtocol: thriftProtocol,
- }, nil
-}
-
-// NewUDPTransport creates a reporter that submits spans to jaeger-agent.
-// TODO: (breaking change) move to transport/ package.
-func NewUDPTransport(hostPort string, maxPacketSize int) (Transport, error) {
- return NewUDPTransportWithParams(UDPTransportParams{
- AgentClientUDPParams: utils.AgentClientUDPParams{
- HostPort: hostPort,
- MaxPacketSize: maxPacketSize,
- },
- })
-}
-
-// SetReporterStats implements reporterstats.Receiver.
-func (s *udpSender) SetReporterStats(rs reporterstats.ReporterStats) {
- s.reporterStats = rs
-}
-
-func (s *udpSender) calcSizeOfSerializedThrift(thriftStruct thrift.TStruct) int {
- s.thriftBuffer.Reset()
- _ = thriftStruct.Write(s.thriftProtocol)
- return s.thriftBuffer.Len()
-}
-
-func (s *udpSender) Append(span *Span) (int, error) {
- if s.process == nil {
- s.process = BuildJaegerProcessThrift(span)
- s.processByteSize = s.calcSizeOfSerializedThrift(s.process)
- s.byteBufferSize += s.processByteSize
- }
- jSpan := BuildJaegerThrift(span)
- spanSize := s.calcSizeOfSerializedThrift(jSpan)
- if spanSize > s.maxSpanBytes {
- s.tooLargeDroppedSpans++
- return 1, errSpanTooLarge
- }
-
- s.byteBufferSize += spanSize
- if s.byteBufferSize <= s.maxSpanBytes {
- s.spanBuffer = append(s.spanBuffer, jSpan)
- if s.byteBufferSize < s.maxSpanBytes {
- return 0, nil
- }
- return s.Flush()
- }
- // the latest span did not fit in the buffer
- n, err := s.Flush()
- s.spanBuffer = append(s.spanBuffer, jSpan)
- s.byteBufferSize = spanSize + s.processByteSize
- return n, err
-}
-
-func (s *udpSender) Flush() (int, error) {
- n := len(s.spanBuffer)
- if n == 0 {
- return 0, nil
- }
- s.batchSeqNo++
- batchSeqNo := int64(s.batchSeqNo)
- err := s.client.EmitBatch(&j.Batch{
- Process: s.process,
- Spans: s.spanBuffer,
- SeqNo: &batchSeqNo,
- Stats: s.makeStats(),
- })
- s.resetBuffers()
- if err != nil {
- s.failedToEmitSpans += int64(n)
- }
- return n, err
-}
-
-func (s *udpSender) Close() error {
- return s.client.Close()
-}
-
-func (s *udpSender) resetBuffers() {
- for i := range s.spanBuffer {
- s.spanBuffer[i] = nil
- }
- s.spanBuffer = s.spanBuffer[:0]
- s.byteBufferSize = s.processByteSize
-}
-
-func (s *udpSender) makeStats() *j.ClientStats {
- var dropped int64
- if s.reporterStats != nil {
- dropped = s.reporterStats.SpansDroppedFromQueue()
- }
- return &j.ClientStats{
- FullQueueDroppedSpans: dropped,
- TooLargeDroppedSpans: s.tooLargeDroppedSpans,
- FailedToEmitSpans: s.failedToEmitSpans,
- }
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/zipkin.go b/vendor/github.com/uber/jaeger-client-go/zipkin.go
deleted file mode 100644
index 98cab4b6e..000000000
--- a/vendor/github.com/uber/jaeger-client-go/zipkin.go
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-import (
- "github.com/opentracing/opentracing-go"
-)
-
-// ZipkinSpanFormat is an OpenTracing carrier format constant
-const ZipkinSpanFormat = "zipkin-span-format"
-
-// ExtractableZipkinSpan is a type of Carrier used for integration with Zipkin-aware
-// RPC frameworks (like TChannel). It does not support baggage, only trace IDs.
-type ExtractableZipkinSpan interface {
- TraceID() uint64
- SpanID() uint64
- ParentID() uint64
- Flags() byte
-}
-
-// InjectableZipkinSpan is a type of Carrier used for integration with Zipkin-aware
-// RPC frameworks (like TChannel). It does not support baggage, only trace IDs.
-type InjectableZipkinSpan interface {
- SetTraceID(traceID uint64)
- SetSpanID(spanID uint64)
- SetParentID(parentID uint64)
- SetFlags(flags byte)
-}
-
-type zipkinPropagator struct {
- tracer *Tracer
-}
-
-func (p *zipkinPropagator) Inject(
- ctx SpanContext,
- abstractCarrier interface{},
-) error {
- carrier, ok := abstractCarrier.(InjectableZipkinSpan)
- if !ok {
- return opentracing.ErrInvalidCarrier
- }
-
- carrier.SetTraceID(ctx.TraceID().Low) // TODO this cannot work with 128bit IDs
- carrier.SetSpanID(uint64(ctx.SpanID()))
- carrier.SetParentID(uint64(ctx.ParentID()))
- carrier.SetFlags(ctx.samplingState.flags())
- return nil
-}
-
-func (p *zipkinPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) {
- carrier, ok := abstractCarrier.(ExtractableZipkinSpan)
- if !ok {
- return emptyContext, opentracing.ErrInvalidCarrier
- }
- if carrier.TraceID() == 0 {
- return emptyContext, opentracing.ErrSpanContextNotFound
- }
- var ctx SpanContext
- ctx.traceID.Low = carrier.TraceID()
- ctx.spanID = SpanID(carrier.SpanID())
- ctx.parentID = SpanID(carrier.ParentID())
- ctx.samplingState = &samplingState{}
- ctx.samplingState.setFlags(carrier.Flags())
- return ctx, nil
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/zipkin_thrift_span.go b/vendor/github.com/uber/jaeger-client-go/zipkin_thrift_span.go
deleted file mode 100644
index 73aeb000f..000000000
--- a/vendor/github.com/uber/jaeger-client-go/zipkin_thrift_span.go
+++ /dev/null
@@ -1,329 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-import (
- "encoding/binary"
- "fmt"
- "time"
-
- "github.com/opentracing/opentracing-go/ext"
-
- "github.com/uber/jaeger-client-go/internal/spanlog"
- z "github.com/uber/jaeger-client-go/thrift-gen/zipkincore"
- "github.com/uber/jaeger-client-go/utils"
-)
-
-const (
- // Zipkin UI does not work well with non-string tag values
- allowPackedNumbers = false
-)
-
-var specialTagHandlers = map[string]func(*zipkinSpan, interface{}){
- string(ext.SpanKind): setSpanKind,
- string(ext.PeerHostIPv4): setPeerIPv4,
- string(ext.PeerPort): setPeerPort,
- string(ext.PeerService): setPeerService,
- TracerIPTagKey: removeTag,
-}
-
-// BuildZipkinThrift builds thrift span based on internal span.
-// TODO: (breaking change) move to transport/zipkin and make private.
-func BuildZipkinThrift(s *Span) *z.Span {
- span := &zipkinSpan{Span: s}
- span.handleSpecialTags()
- parentID := int64(span.context.parentID)
- var ptrParentID *int64
- if parentID != 0 {
- ptrParentID = &parentID
- }
- traceIDHigh := int64(span.context.traceID.High)
- var ptrTraceIDHigh *int64
- if traceIDHigh != 0 {
- ptrTraceIDHigh = &traceIDHigh
- }
- timestamp := utils.TimeToMicrosecondsSinceEpochInt64(span.startTime)
- duration := span.duration.Nanoseconds() / int64(time.Microsecond)
- endpoint := &z.Endpoint{
- ServiceName: span.tracer.serviceName,
- Ipv4: int32(span.tracer.hostIPv4)}
- thriftSpan := &z.Span{
- TraceID: int64(span.context.traceID.Low),
- TraceIDHigh: ptrTraceIDHigh,
- ID: int64(span.context.spanID),
- ParentID: ptrParentID,
- Name: span.operationName,
- Timestamp: &timestamp,
- Duration: &duration,
- Debug: span.context.IsDebug(),
- Annotations: buildAnnotations(span, endpoint),
- BinaryAnnotations: buildBinaryAnnotations(span, endpoint)}
- return thriftSpan
-}
-
-func buildAnnotations(span *zipkinSpan, endpoint *z.Endpoint) []*z.Annotation {
- // automatically adding 2 Zipkin CoreAnnotations
- annotations := make([]*z.Annotation, 0, 2+len(span.logs))
- var startLabel, endLabel string
- if span.spanKind == string(ext.SpanKindRPCClientEnum) {
- startLabel, endLabel = z.CLIENT_SEND, z.CLIENT_RECV
- } else if span.spanKind == string(ext.SpanKindRPCServerEnum) {
- startLabel, endLabel = z.SERVER_RECV, z.SERVER_SEND
- }
- if !span.startTime.IsZero() && startLabel != "" {
- start := &z.Annotation{
- Timestamp: utils.TimeToMicrosecondsSinceEpochInt64(span.startTime),
- Value: startLabel,
- Host: endpoint}
- annotations = append(annotations, start)
- if span.duration != 0 {
- endTs := span.startTime.Add(span.duration)
- end := &z.Annotation{
- Timestamp: utils.TimeToMicrosecondsSinceEpochInt64(endTs),
- Value: endLabel,
- Host: endpoint}
- annotations = append(annotations, end)
- }
- }
- for _, log := range span.logs {
- anno := &z.Annotation{
- Timestamp: utils.TimeToMicrosecondsSinceEpochInt64(log.Timestamp),
- Host: endpoint}
- if content, err := spanlog.MaterializeWithJSON(log.Fields); err == nil {
- anno.Value = truncateString(string(content), span.tracer.options.maxTagValueLength)
- } else {
- anno.Value = err.Error()
- }
- annotations = append(annotations, anno)
- }
- return annotations
-}
-
-func buildBinaryAnnotations(span *zipkinSpan, endpoint *z.Endpoint) []*z.BinaryAnnotation {
- // automatically adding local component or server/client address tag, and client version
- annotations := make([]*z.BinaryAnnotation, 0, 2+len(span.tags))
-
- if span.peerDefined() && span.isRPC() {
- peer := z.Endpoint{
- Ipv4: span.peer.Ipv4,
- Port: span.peer.Port,
- ServiceName: span.peer.ServiceName}
- label := z.CLIENT_ADDR
- if span.isRPCClient() {
- label = z.SERVER_ADDR
- }
- anno := &z.BinaryAnnotation{
- Key: label,
- Value: []byte{1},
- AnnotationType: z.AnnotationType_BOOL,
- Host: &peer}
- annotations = append(annotations, anno)
- }
- if !span.isRPC() {
- componentName := endpoint.ServiceName
- for _, tag := range span.tags {
- if tag.key == string(ext.Component) {
- componentName = stringify(tag.value)
- break
- }
- }
- local := &z.BinaryAnnotation{
- Key: z.LOCAL_COMPONENT,
- Value: []byte(componentName),
- AnnotationType: z.AnnotationType_STRING,
- Host: endpoint}
- annotations = append(annotations, local)
- }
- for _, tag := range span.tags {
- // "Special tags" are already handled by this point, we'd be double reporting the
- // tags if we don't skip here
- if _, ok := specialTagHandlers[tag.key]; ok {
- continue
- }
- if anno := buildBinaryAnnotation(tag.key, tag.value, span.tracer.options.maxTagValueLength, nil); anno != nil {
- annotations = append(annotations, anno)
- }
- }
- return annotations
-}
-
-func buildBinaryAnnotation(key string, val interface{}, maxTagValueLength int, endpoint *z.Endpoint) *z.BinaryAnnotation {
- bann := &z.BinaryAnnotation{Key: key, Host: endpoint}
- if value, ok := val.(string); ok {
- bann.Value = []byte(truncateString(value, maxTagValueLength))
- bann.AnnotationType = z.AnnotationType_STRING
- } else if value, ok := val.([]byte); ok {
- if len(value) > maxTagValueLength {
- value = value[:maxTagValueLength]
- }
- bann.Value = value
- bann.AnnotationType = z.AnnotationType_BYTES
- } else if value, ok := val.(int32); ok && allowPackedNumbers {
- bann.Value = int32ToBytes(value)
- bann.AnnotationType = z.AnnotationType_I32
- } else if value, ok := val.(int64); ok && allowPackedNumbers {
- bann.Value = int64ToBytes(value)
- bann.AnnotationType = z.AnnotationType_I64
- } else if value, ok := val.(int); ok && allowPackedNumbers {
- bann.Value = int64ToBytes(int64(value))
- bann.AnnotationType = z.AnnotationType_I64
- } else if value, ok := val.(bool); ok {
- bann.Value = []byte{boolToByte(value)}
- bann.AnnotationType = z.AnnotationType_BOOL
- } else {
- value := stringify(val)
- bann.Value = []byte(truncateString(value, maxTagValueLength))
- bann.AnnotationType = z.AnnotationType_STRING
- }
- return bann
-}
-
-func stringify(value interface{}) string {
- if s, ok := value.(string); ok {
- return s
- }
- return fmt.Sprintf("%+v", value)
-}
-
-func truncateString(value string, maxLength int) string {
- // we ignore the problem of utf8 runes possibly being sliced in the middle,
- // as it is rather expensive to iterate through each tag just to find rune
- // boundaries.
- if len(value) > maxLength {
- return value[:maxLength]
- }
- return value
-}
-
-func boolToByte(b bool) byte {
- if b {
- return 1
- }
- return 0
-}
-
-// int32ToBytes converts int32 to bytes.
-func int32ToBytes(i int32) []byte {
- buf := make([]byte, 4)
- binary.BigEndian.PutUint32(buf, uint32(i))
- return buf
-}
-
-// int64ToBytes converts int64 to bytes.
-func int64ToBytes(i int64) []byte {
- buf := make([]byte, 8)
- binary.BigEndian.PutUint64(buf, uint64(i))
- return buf
-}
-
-type zipkinSpan struct {
- *Span
-
- // peer points to the peer service participating in this span,
- // e.g. the Client if this span is a server span,
- // or Server if this span is a client span
- peer struct {
- Ipv4 int32
- Port int16
- ServiceName string
- }
-
- // used to distinguish local vs. RPC Server vs. RPC Client spans
- spanKind string
-}
-
-func (s *zipkinSpan) handleSpecialTags() {
- s.Lock()
- defer s.Unlock()
- if s.firstInProcess {
- // append the process tags
- s.tags = append(s.tags, s.tracer.tags...)
- }
- filteredTags := make([]Tag, 0, len(s.tags))
- for _, tag := range s.tags {
- if handler, ok := specialTagHandlers[tag.key]; ok {
- handler(s, tag.value)
- } else {
- filteredTags = append(filteredTags, tag)
- }
- }
- s.tags = filteredTags
-}
-
-func setSpanKind(s *zipkinSpan, value interface{}) {
- if val, ok := value.(string); ok {
- s.spanKind = val
- return
- }
- if val, ok := value.(ext.SpanKindEnum); ok {
- s.spanKind = string(val)
- }
-}
-
-func setPeerIPv4(s *zipkinSpan, value interface{}) {
- if val, ok := value.(string); ok {
- if ip, err := utils.ParseIPToUint32(val); err == nil {
- s.peer.Ipv4 = int32(ip)
- return
- }
- }
- if val, ok := value.(uint32); ok {
- s.peer.Ipv4 = int32(val)
- return
- }
- if val, ok := value.(int32); ok {
- s.peer.Ipv4 = val
- }
-}
-
-func setPeerPort(s *zipkinSpan, value interface{}) {
- if val, ok := value.(string); ok {
- if port, err := utils.ParsePort(val); err == nil {
- s.peer.Port = int16(port)
- return
- }
- }
- if val, ok := value.(uint16); ok {
- s.peer.Port = int16(val)
- return
- }
- if val, ok := value.(int); ok {
- s.peer.Port = int16(val)
- }
-}
-
-func setPeerService(s *zipkinSpan, value interface{}) {
- if val, ok := value.(string); ok {
- s.peer.ServiceName = val
- }
-}
-
-func removeTag(s *zipkinSpan, value interface{}) {}
-
-func (s *zipkinSpan) peerDefined() bool {
- return s.peer.ServiceName != "" || s.peer.Ipv4 != 0 || s.peer.Port != 0
-}
-
-func (s *zipkinSpan) isRPC() bool {
- s.RLock()
- defer s.RUnlock()
- return s.spanKind == string(ext.SpanKindRPCClientEnum) || s.spanKind == string(ext.SpanKindRPCServerEnum)
-}
-
-func (s *zipkinSpan) isRPCClient() bool {
- s.RLock()
- defer s.RUnlock()
- return s.spanKind == string(ext.SpanKindRPCClientEnum)
-}
diff --git a/vendor/github.com/uber/jaeger-lib/LICENSE b/vendor/github.com/uber/jaeger-lib/LICENSE
deleted file mode 100644
index 261eeb9e9..000000000
--- a/vendor/github.com/uber/jaeger-lib/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/counter.go b/vendor/github.com/uber/jaeger-lib/metrics/counter.go
deleted file mode 100644
index 2a6a43efd..000000000
--- a/vendor/github.com/uber/jaeger-lib/metrics/counter.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metrics
-
-// Counter tracks the number of times an event has occurred
-type Counter interface {
- // Inc adds the given value to the counter.
- Inc(int64)
-}
-
-// NullCounter counter that does nothing
-var NullCounter Counter = nullCounter{}
-
-type nullCounter struct{}
-
-func (nullCounter) Inc(int64) {}
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/factory.go b/vendor/github.com/uber/jaeger-lib/metrics/factory.go
deleted file mode 100644
index 0ead061eb..000000000
--- a/vendor/github.com/uber/jaeger-lib/metrics/factory.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metrics
-
-import (
- "time"
-)
-
-// NSOptions defines the name and tags map associated with a factory namespace
-type NSOptions struct {
- Name string
- Tags map[string]string
-}
-
-// Options defines the information associated with a metric
-type Options struct {
- Name string
- Tags map[string]string
- Help string
-}
-
-// TimerOptions defines the information associated with a metric
-type TimerOptions struct {
- Name string
- Tags map[string]string
- Help string
- Buckets []time.Duration
-}
-
-// HistogramOptions defines the information associated with a metric
-type HistogramOptions struct {
- Name string
- Tags map[string]string
- Help string
- Buckets []float64
-}
-
-// Factory creates new metrics
-type Factory interface {
- Counter(metric Options) Counter
- Timer(metric TimerOptions) Timer
- Gauge(metric Options) Gauge
- Histogram(metric HistogramOptions) Histogram
-
- // Namespace returns a nested metrics factory.
- Namespace(scope NSOptions) Factory
-}
-
-// NullFactory is a metrics factory that returns NullCounter, NullTimer, and NullGauge.
-var NullFactory Factory = nullFactory{}
-
-type nullFactory struct{}
-
-func (nullFactory) Counter(options Options) Counter {
- return NullCounter
-}
-func (nullFactory) Timer(options TimerOptions) Timer {
- return NullTimer
-}
-func (nullFactory) Gauge(options Options) Gauge {
- return NullGauge
-}
-func (nullFactory) Histogram(options HistogramOptions) Histogram {
- return NullHistogram
-}
-func (nullFactory) Namespace(scope NSOptions) Factory { return NullFactory }
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/gauge.go b/vendor/github.com/uber/jaeger-lib/metrics/gauge.go
deleted file mode 100644
index 3c606391a..000000000
--- a/vendor/github.com/uber/jaeger-lib/metrics/gauge.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metrics
-
-// Gauge returns instantaneous measurements of something as an int64 value
-type Gauge interface {
- // Update the gauge to the value passed in.
- Update(int64)
-}
-
-// NullGauge gauge that does nothing
-var NullGauge Gauge = nullGauge{}
-
-type nullGauge struct{}
-
-func (nullGauge) Update(int64) {}
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/histogram.go b/vendor/github.com/uber/jaeger-lib/metrics/histogram.go
deleted file mode 100644
index d3bd6174f..000000000
--- a/vendor/github.com/uber/jaeger-lib/metrics/histogram.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright (c) 2018 The Jaeger Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metrics
-
-// Histogram that keeps track of a distribution of values.
-type Histogram interface {
- // Records the value passed in.
- Record(float64)
-}
-
-// NullHistogram that does nothing
-var NullHistogram Histogram = nullHistogram{}
-
-type nullHistogram struct{}
-
-func (nullHistogram) Record(float64) {}
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/keys.go b/vendor/github.com/uber/jaeger-lib/metrics/keys.go
deleted file mode 100644
index c24445a10..000000000
--- a/vendor/github.com/uber/jaeger-lib/metrics/keys.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metrics
-
-import (
- "sort"
-)
-
-// GetKey converts name+tags into a single string of the form
-// "name|tag1=value1|...|tagN=valueN", where tag names are
-// sorted alphabetically.
-func GetKey(name string, tags map[string]string, tagsSep string, tagKVSep string) string {
- keys := make([]string, 0, len(tags))
- for k := range tags {
- keys = append(keys, k)
- }
- sort.Strings(keys)
- key := name
- for _, k := range keys {
- key = key + tagsSep + k + tagKVSep + tags[k]
- }
- return key
-}
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/metrics.go b/vendor/github.com/uber/jaeger-lib/metrics/metrics.go
deleted file mode 100644
index 0df0c662e..000000000
--- a/vendor/github.com/uber/jaeger-lib/metrics/metrics.go
+++ /dev/null
@@ -1,137 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metrics
-
-import (
- "fmt"
- "reflect"
- "strconv"
- "strings"
-)
-
-// MustInit initializes the passed in metrics and initializes its fields using the passed in factory.
-//
-// It uses reflection to initialize a struct containing metrics fields
-// by assigning new Counter/Gauge/Timer values with the metric name retrieved
-// from the `metric` tag and stats tags retrieved from the `tags` tag.
-//
-// Note: all fields of the struct must be exported, have a `metric` tag, and be
-// of type Counter or Gauge or Timer.
-//
-// Errors during Init lead to a panic.
-func MustInit(metrics interface{}, factory Factory, globalTags map[string]string) {
- if err := Init(metrics, factory, globalTags); err != nil {
- panic(err.Error())
- }
-}
-
-// Init does the same as MustInit, but returns an error instead of
-// panicking.
-func Init(m interface{}, factory Factory, globalTags map[string]string) error {
- // Allow user to opt out of reporting metrics by passing in nil.
- if factory == nil {
- factory = NullFactory
- }
-
- counterPtrType := reflect.TypeOf((*Counter)(nil)).Elem()
- gaugePtrType := reflect.TypeOf((*Gauge)(nil)).Elem()
- timerPtrType := reflect.TypeOf((*Timer)(nil)).Elem()
- histogramPtrType := reflect.TypeOf((*Histogram)(nil)).Elem()
-
- v := reflect.ValueOf(m).Elem()
- t := v.Type()
- for i := 0; i < t.NumField(); i++ {
- tags := make(map[string]string)
- for k, v := range globalTags {
- tags[k] = v
- }
- var buckets []float64
- field := t.Field(i)
- metric := field.Tag.Get("metric")
- if metric == "" {
- return fmt.Errorf("Field %s is missing a tag 'metric'", field.Name)
- }
- if tagString := field.Tag.Get("tags"); tagString != "" {
- tagPairs := strings.Split(tagString, ",")
- for _, tagPair := range tagPairs {
- tag := strings.Split(tagPair, "=")
- if len(tag) != 2 {
- return fmt.Errorf(
- "Field [%s]: Tag [%s] is not of the form key=value in 'tags' string [%s]",
- field.Name, tagPair, tagString)
- }
- tags[tag[0]] = tag[1]
- }
- }
- if bucketString := field.Tag.Get("buckets"); bucketString != "" {
- if field.Type.AssignableTo(timerPtrType) {
- // TODO: Parse timer duration buckets
- return fmt.Errorf(
- "Field [%s]: Buckets are not currently initialized for timer metrics",
- field.Name)
- } else if field.Type.AssignableTo(histogramPtrType) {
- bucketValues := strings.Split(bucketString, ",")
- for _, bucket := range bucketValues {
- b, err := strconv.ParseFloat(bucket, 64)
- if err != nil {
- return fmt.Errorf(
- "Field [%s]: Bucket [%s] could not be converted to float64 in 'buckets' string [%s]",
- field.Name, bucket, bucketString)
- }
- buckets = append(buckets, b)
- }
- } else {
- return fmt.Errorf(
- "Field [%s]: Buckets should only be defined for Timer and Histogram metric types",
- field.Name)
- }
- }
- help := field.Tag.Get("help")
- var obj interface{}
- if field.Type.AssignableTo(counterPtrType) {
- obj = factory.Counter(Options{
- Name: metric,
- Tags: tags,
- Help: help,
- })
- } else if field.Type.AssignableTo(gaugePtrType) {
- obj = factory.Gauge(Options{
- Name: metric,
- Tags: tags,
- Help: help,
- })
- } else if field.Type.AssignableTo(timerPtrType) {
- // TODO: Add buckets once parsed (see TODO above)
- obj = factory.Timer(TimerOptions{
- Name: metric,
- Tags: tags,
- Help: help,
- })
- } else if field.Type.AssignableTo(histogramPtrType) {
- obj = factory.Histogram(HistogramOptions{
- Name: metric,
- Tags: tags,
- Help: help,
- Buckets: buckets,
- })
- } else {
- return fmt.Errorf(
- "Field %s is not a pointer to timer, gauge, or counter",
- field.Name)
- }
- v.Field(i).Set(reflect.ValueOf(obj))
- }
- return nil
-}
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/stopwatch.go b/vendor/github.com/uber/jaeger-lib/metrics/stopwatch.go
deleted file mode 100644
index 4a8abdb53..000000000
--- a/vendor/github.com/uber/jaeger-lib/metrics/stopwatch.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metrics
-
-import (
- "time"
-)
-
-// StartStopwatch begins recording the executing time of an event, returning
-// a Stopwatch that should be used to stop the recording the time for
-// that event. Multiple events can be occurring simultaneously each
-// represented by different active Stopwatches
-func StartStopwatch(timer Timer) Stopwatch {
- return Stopwatch{t: timer, start: time.Now()}
-}
-
-// A Stopwatch tracks the execution time of a specific event
-type Stopwatch struct {
- t Timer
- start time.Time
-}
-
-// Stop stops executing of the stopwatch and records the amount of elapsed time
-func (s Stopwatch) Stop() {
- s.t.Record(s.ElapsedTime())
-}
-
-// ElapsedTime returns the amount of elapsed time (in time.Duration)
-func (s Stopwatch) ElapsedTime() time.Duration {
- return time.Since(s.start)
-}
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/timer.go b/vendor/github.com/uber/jaeger-lib/metrics/timer.go
deleted file mode 100644
index e18d222ab..000000000
--- a/vendor/github.com/uber/jaeger-lib/metrics/timer.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metrics
-
-import (
- "time"
-)
-
-// Timer accumulates observations about how long some operation took,
-// and also maintains a historgam of percentiles.
-type Timer interface {
- // Records the time passed in.
- Record(time.Duration)
-}
-
-// NullTimer timer that does nothing
-var NullTimer Timer = nullTimer{}
-
-type nullTimer struct{}
-
-func (nullTimer) Record(time.Duration) {}
diff --git a/vendor/go.uber.org/atomic/.codecov.yml b/vendor/go.uber.org/atomic/.codecov.yml
deleted file mode 100644
index 571116cc3..000000000
--- a/vendor/go.uber.org/atomic/.codecov.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-coverage:
- range: 80..100
- round: down
- precision: 2
-
- status:
- project: # measuring the overall project coverage
- default: # context, you can create multiple ones with custom titles
- enabled: yes # must be yes|true to enable this status
- target: 100 # specify the target coverage for each commit status
- # option: "auto" (must increase from parent commit or pull request base)
- # option: "X%" a static target percentage to hit
- if_not_found: success # if parent is not found report status as success, error, or failure
- if_ci_failed: error # if ci fails report status as success, error, or failure
-
-# Also update COVER_IGNORE_PKGS in the Makefile.
-ignore:
- - /internal/gen-atomicint/
- - /internal/gen-valuewrapper/
diff --git a/vendor/go.uber.org/atomic/.gitignore b/vendor/go.uber.org/atomic/.gitignore
deleted file mode 100644
index c3fa25389..000000000
--- a/vendor/go.uber.org/atomic/.gitignore
+++ /dev/null
@@ -1,12 +0,0 @@
-/bin
-.DS_Store
-/vendor
-cover.html
-cover.out
-lint.log
-
-# Binaries
-*.test
-
-# Profiling output
-*.prof
diff --git a/vendor/go.uber.org/atomic/.travis.yml b/vendor/go.uber.org/atomic/.travis.yml
deleted file mode 100644
index 13d0a4f25..000000000
--- a/vendor/go.uber.org/atomic/.travis.yml
+++ /dev/null
@@ -1,27 +0,0 @@
-sudo: false
-language: go
-go_import_path: go.uber.org/atomic
-
-env:
- global:
- - GO111MODULE=on
-
-matrix:
- include:
- - go: oldstable
- - go: stable
- env: LINT=1
-
-cache:
- directories:
- - vendor
-
-before_install:
- - go version
-
-script:
- - test -z "$LINT" || make lint
- - make cover
-
-after_success:
- - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/go.uber.org/atomic/CHANGELOG.md b/vendor/go.uber.org/atomic/CHANGELOG.md
deleted file mode 100644
index 24c0274dc..000000000
--- a/vendor/go.uber.org/atomic/CHANGELOG.md
+++ /dev/null
@@ -1,76 +0,0 @@
-# Changelog
-All notable changes to this project will be documented in this file.
-
-The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
-and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
-
-## [1.7.0] - 2020-09-14
-### Added
-- Support JSON serialization and deserialization of primitive atomic types.
-- Support Text marshalling and unmarshalling for string atomics.
-
-### Changed
-- Disallow incorrect comparison of atomic values in a non-atomic way.
-
-### Removed
-- Remove dependency on `golang.org/x/{lint, tools}`.
-
-## [1.6.0] - 2020-02-24
-### Changed
-- Drop library dependency on `golang.org/x/{lint, tools}`.
-
-## [1.5.1] - 2019-11-19
-- Fix bug where `Bool.CAS` and `Bool.Toggle` do work correctly together
- causing `CAS` to fail even though the old value matches.
-
-## [1.5.0] - 2019-10-29
-### Changed
-- With Go modules, only the `go.uber.org/atomic` import path is supported now.
- If you need to use the old import path, please add a `replace` directive to
- your `go.mod`.
-
-## [1.4.0] - 2019-05-01
-### Added
- - Add `atomic.Error` type for atomic operations on `error` values.
-
-## [1.3.2] - 2018-05-02
-### Added
-- Add `atomic.Duration` type for atomic operations on `time.Duration` values.
-
-## [1.3.1] - 2017-11-14
-### Fixed
-- Revert optimization for `atomic.String.Store("")` which caused data races.
-
-## [1.3.0] - 2017-11-13
-### Added
-- Add `atomic.Bool.CAS` for compare-and-swap semantics on bools.
-
-### Changed
-- Optimize `atomic.String.Store("")` by avoiding an allocation.
-
-## [1.2.0] - 2017-04-12
-### Added
-- Shadow `atomic.Value` from `sync/atomic`.
-
-## [1.1.0] - 2017-03-10
-### Added
-- Add atomic `Float64` type.
-
-### Changed
-- Support new `go.uber.org/atomic` import path.
-
-## [1.0.0] - 2016-07-18
-
-- Initial release.
-
-[1.7.0]: https://github.com/uber-go/atomic/compare/v1.6.0...v1.7.0
-[1.6.0]: https://github.com/uber-go/atomic/compare/v1.5.1...v1.6.0
-[1.5.1]: https://github.com/uber-go/atomic/compare/v1.5.0...v1.5.1
-[1.5.0]: https://github.com/uber-go/atomic/compare/v1.4.0...v1.5.0
-[1.4.0]: https://github.com/uber-go/atomic/compare/v1.3.2...v1.4.0
-[1.3.2]: https://github.com/uber-go/atomic/compare/v1.3.1...v1.3.2
-[1.3.1]: https://github.com/uber-go/atomic/compare/v1.3.0...v1.3.1
-[1.3.0]: https://github.com/uber-go/atomic/compare/v1.2.0...v1.3.0
-[1.2.0]: https://github.com/uber-go/atomic/compare/v1.1.0...v1.2.0
-[1.1.0]: https://github.com/uber-go/atomic/compare/v1.0.0...v1.1.0
-[1.0.0]: https://github.com/uber-go/atomic/releases/tag/v1.0.0
diff --git a/vendor/go.uber.org/atomic/LICENSE.txt b/vendor/go.uber.org/atomic/LICENSE.txt
deleted file mode 100644
index 8765c9fbc..000000000
--- a/vendor/go.uber.org/atomic/LICENSE.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-Copyright (c) 2016 Uber Technologies, Inc.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
diff --git a/vendor/go.uber.org/atomic/Makefile b/vendor/go.uber.org/atomic/Makefile
deleted file mode 100644
index 1b1376d42..000000000
--- a/vendor/go.uber.org/atomic/Makefile
+++ /dev/null
@@ -1,78 +0,0 @@
-# Directory to place `go install`ed binaries into.
-export GOBIN ?= $(shell pwd)/bin
-
-GOLINT = $(GOBIN)/golint
-GEN_ATOMICINT = $(GOBIN)/gen-atomicint
-GEN_ATOMICWRAPPER = $(GOBIN)/gen-atomicwrapper
-STATICCHECK = $(GOBIN)/staticcheck
-
-GO_FILES ?= $(shell find . '(' -path .git -o -path vendor ')' -prune -o -name '*.go' -print)
-
-# Also update ignore section in .codecov.yml.
-COVER_IGNORE_PKGS = \
- go.uber.org/atomic/internal/gen-atomicint \
- go.uber.org/atomic/internal/gen-atomicwrapper
-
-.PHONY: build
-build:
- go build ./...
-
-.PHONY: test
-test:
- go test -race ./...
-
-.PHONY: gofmt
-gofmt:
- $(eval FMT_LOG := $(shell mktemp -t gofmt.XXXXX))
- gofmt -e -s -l $(GO_FILES) > $(FMT_LOG) || true
- @[ ! -s "$(FMT_LOG)" ] || (echo "gofmt failed:" && cat $(FMT_LOG) && false)
-
-$(GOLINT):
- cd tools && go install golang.org/x/lint/golint
-
-$(STATICCHECK):
- cd tools && go install honnef.co/go/tools/cmd/staticcheck
-
-$(GEN_ATOMICWRAPPER): $(wildcard ./internal/gen-atomicwrapper/*)
- go build -o $@ ./internal/gen-atomicwrapper
-
-$(GEN_ATOMICINT): $(wildcard ./internal/gen-atomicint/*)
- go build -o $@ ./internal/gen-atomicint
-
-.PHONY: golint
-golint: $(GOLINT)
- $(GOLINT) ./...
-
-.PHONY: staticcheck
-staticcheck: $(STATICCHECK)
- $(STATICCHECK) ./...
-
-.PHONY: lint
-lint: gofmt golint staticcheck generatenodirty
-
-# comma separated list of packages to consider for code coverage.
-COVER_PKG = $(shell \
- go list -find ./... | \
- grep -v $(foreach pkg,$(COVER_IGNORE_PKGS),-e "^$(pkg)$$") | \
- paste -sd, -)
-
-.PHONY: cover
-cover:
- go test -coverprofile=cover.out -coverpkg $(COVER_PKG) -v ./...
- go tool cover -html=cover.out -o cover.html
-
-.PHONY: generate
-generate: $(GEN_ATOMICINT) $(GEN_ATOMICWRAPPER)
- go generate ./...
-
-.PHONY: generatenodirty
-generatenodirty:
- @[ -z "$$(git status --porcelain)" ] || ( \
- echo "Working tree is dirty. Commit your changes first."; \
- exit 1 )
- @make generate
- @status=$$(git status --porcelain); \
- [ -z "$$status" ] || ( \
- echo "Working tree is dirty after `make generate`:"; \
- echo "$$status"; \
- echo "Please ensure that the generated code is up-to-date." )
diff --git a/vendor/go.uber.org/atomic/README.md b/vendor/go.uber.org/atomic/README.md
deleted file mode 100644
index ade0c20f1..000000000
--- a/vendor/go.uber.org/atomic/README.md
+++ /dev/null
@@ -1,63 +0,0 @@
-# atomic [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] [![Go Report Card][reportcard-img]][reportcard]
-
-Simple wrappers for primitive types to enforce atomic access.
-
-## Installation
-
-```shell
-$ go get -u go.uber.org/atomic@v1
-```
-
-### Legacy Import Path
-
-As of v1.5.0, the import path `go.uber.org/atomic` is the only supported way
-of using this package. If you are using Go modules, this package will fail to
-compile with the legacy import path path `github.com/uber-go/atomic`.
-
-We recommend migrating your code to the new import path but if you're unable
-to do so, or if your dependencies are still using the old import path, you
-will have to add a `replace` directive to your `go.mod` file downgrading the
-legacy import path to an older version.
-
-```
-replace github.com/uber-go/atomic => github.com/uber-go/atomic v1.4.0
-```
-
-You can do so automatically by running the following command.
-
-```shell
-$ go mod edit -replace github.com/uber-go/atomic=github.com/uber-go/atomic@v1.4.0
-```
-
-## Usage
-
-The standard library's `sync/atomic` is powerful, but it's easy to forget which
-variables must be accessed atomically. `go.uber.org/atomic` preserves all the
-functionality of the standard library, but wraps the primitive types to
-provide a safer, more convenient API.
-
-```go
-var atom atomic.Uint32
-atom.Store(42)
-atom.Sub(2)
-atom.CAS(40, 11)
-```
-
-See the [documentation][doc] for a complete API specification.
-
-## Development Status
-
-Stable.
-
----
-
-Released under the [MIT License](LICENSE.txt).
-
-[doc-img]: https://godoc.org/github.com/uber-go/atomic?status.svg
-[doc]: https://godoc.org/go.uber.org/atomic
-[ci-img]: https://travis-ci.com/uber-go/atomic.svg?branch=master
-[ci]: https://travis-ci.com/uber-go/atomic
-[cov-img]: https://codecov.io/gh/uber-go/atomic/branch/master/graph/badge.svg
-[cov]: https://codecov.io/gh/uber-go/atomic
-[reportcard-img]: https://goreportcard.com/badge/go.uber.org/atomic
-[reportcard]: https://goreportcard.com/report/go.uber.org/atomic
diff --git a/vendor/go.uber.org/atomic/bool.go b/vendor/go.uber.org/atomic/bool.go
deleted file mode 100644
index 9cf1914b1..000000000
--- a/vendor/go.uber.org/atomic/bool.go
+++ /dev/null
@@ -1,81 +0,0 @@
-// @generated Code generated by gen-atomicwrapper.
-
-// Copyright (c) 2020 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import (
- "encoding/json"
-)
-
-// Bool is an atomic type-safe wrapper for bool values.
-type Bool struct {
- _ nocmp // disallow non-atomic comparison
-
- v Uint32
-}
-
-var _zeroBool bool
-
-// NewBool creates a new Bool.
-func NewBool(v bool) *Bool {
- x := &Bool{}
- if v != _zeroBool {
- x.Store(v)
- }
- return x
-}
-
-// Load atomically loads the wrapped bool.
-func (x *Bool) Load() bool {
- return truthy(x.v.Load())
-}
-
-// Store atomically stores the passed bool.
-func (x *Bool) Store(v bool) {
- x.v.Store(boolToInt(v))
-}
-
-// CAS is an atomic compare-and-swap for bool values.
-func (x *Bool) CAS(o, n bool) bool {
- return x.v.CAS(boolToInt(o), boolToInt(n))
-}
-
-// Swap atomically stores the given bool and returns the old
-// value.
-func (x *Bool) Swap(o bool) bool {
- return truthy(x.v.Swap(boolToInt(o)))
-}
-
-// MarshalJSON encodes the wrapped bool into JSON.
-func (x *Bool) MarshalJSON() ([]byte, error) {
- return json.Marshal(x.Load())
-}
-
-// UnmarshalJSON decodes a bool from JSON.
-func (x *Bool) UnmarshalJSON(b []byte) error {
- var v bool
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
- x.Store(v)
- return nil
-}
diff --git a/vendor/go.uber.org/atomic/bool_ext.go b/vendor/go.uber.org/atomic/bool_ext.go
deleted file mode 100644
index c7bf7a827..000000000
--- a/vendor/go.uber.org/atomic/bool_ext.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright (c) 2020 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import (
- "strconv"
-)
-
-//go:generate bin/gen-atomicwrapper -name=Bool -type=bool -wrapped=Uint32 -pack=boolToInt -unpack=truthy -cas -swap -json -file=bool.go
-
-func truthy(n uint32) bool {
- return n == 1
-}
-
-func boolToInt(b bool) uint32 {
- if b {
- return 1
- }
- return 0
-}
-
-// Toggle atomically negates the Boolean and returns the previous value.
-func (b *Bool) Toggle() bool {
- for {
- old := b.Load()
- if b.CAS(old, !old) {
- return old
- }
- }
-}
-
-// String encodes the wrapped value as a string.
-func (b *Bool) String() string {
- return strconv.FormatBool(b.Load())
-}
diff --git a/vendor/go.uber.org/atomic/doc.go b/vendor/go.uber.org/atomic/doc.go
deleted file mode 100644
index ae7390ee6..000000000
--- a/vendor/go.uber.org/atomic/doc.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright (c) 2020 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-// Package atomic provides simple wrappers around numerics to enforce atomic
-// access.
-package atomic
diff --git a/vendor/go.uber.org/atomic/duration.go b/vendor/go.uber.org/atomic/duration.go
deleted file mode 100644
index 027cfcb20..000000000
--- a/vendor/go.uber.org/atomic/duration.go
+++ /dev/null
@@ -1,82 +0,0 @@
-// @generated Code generated by gen-atomicwrapper.
-
-// Copyright (c) 2020 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import (
- "encoding/json"
- "time"
-)
-
-// Duration is an atomic type-safe wrapper for time.Duration values.
-type Duration struct {
- _ nocmp // disallow non-atomic comparison
-
- v Int64
-}
-
-var _zeroDuration time.Duration
-
-// NewDuration creates a new Duration.
-func NewDuration(v time.Duration) *Duration {
- x := &Duration{}
- if v != _zeroDuration {
- x.Store(v)
- }
- return x
-}
-
-// Load atomically loads the wrapped time.Duration.
-func (x *Duration) Load() time.Duration {
- return time.Duration(x.v.Load())
-}
-
-// Store atomically stores the passed time.Duration.
-func (x *Duration) Store(v time.Duration) {
- x.v.Store(int64(v))
-}
-
-// CAS is an atomic compare-and-swap for time.Duration values.
-func (x *Duration) CAS(o, n time.Duration) bool {
- return x.v.CAS(int64(o), int64(n))
-}
-
-// Swap atomically stores the given time.Duration and returns the old
-// value.
-func (x *Duration) Swap(o time.Duration) time.Duration {
- return time.Duration(x.v.Swap(int64(o)))
-}
-
-// MarshalJSON encodes the wrapped time.Duration into JSON.
-func (x *Duration) MarshalJSON() ([]byte, error) {
- return json.Marshal(x.Load())
-}
-
-// UnmarshalJSON decodes a time.Duration from JSON.
-func (x *Duration) UnmarshalJSON(b []byte) error {
- var v time.Duration
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
- x.Store(v)
- return nil
-}
diff --git a/vendor/go.uber.org/atomic/duration_ext.go b/vendor/go.uber.org/atomic/duration_ext.go
deleted file mode 100644
index 6273b66bd..000000000
--- a/vendor/go.uber.org/atomic/duration_ext.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright (c) 2020 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import "time"
-
-//go:generate bin/gen-atomicwrapper -name=Duration -type=time.Duration -wrapped=Int64 -pack=int64 -unpack=time.Duration -cas -swap -json -imports time -file=duration.go
-
-// Add atomically adds to the wrapped time.Duration and returns the new value.
-func (d *Duration) Add(n time.Duration) time.Duration {
- return time.Duration(d.v.Add(int64(n)))
-}
-
-// Sub atomically subtracts from the wrapped time.Duration and returns the new value.
-func (d *Duration) Sub(n time.Duration) time.Duration {
- return time.Duration(d.v.Sub(int64(n)))
-}
-
-// String encodes the wrapped value as a string.
-func (d *Duration) String() string {
- return d.Load().String()
-}
diff --git a/vendor/go.uber.org/atomic/error.go b/vendor/go.uber.org/atomic/error.go
deleted file mode 100644
index a6166fbea..000000000
--- a/vendor/go.uber.org/atomic/error.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// @generated Code generated by gen-atomicwrapper.
-
-// Copyright (c) 2020 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-// Error is an atomic type-safe wrapper for error values.
-type Error struct {
- _ nocmp // disallow non-atomic comparison
-
- v Value
-}
-
-var _zeroError error
-
-// NewError creates a new Error.
-func NewError(v error) *Error {
- x := &Error{}
- if v != _zeroError {
- x.Store(v)
- }
- return x
-}
-
-// Load atomically loads the wrapped error.
-func (x *Error) Load() error {
- return unpackError(x.v.Load())
-}
-
-// Store atomically stores the passed error.
-func (x *Error) Store(v error) {
- x.v.Store(packError(v))
-}
diff --git a/vendor/go.uber.org/atomic/error_ext.go b/vendor/go.uber.org/atomic/error_ext.go
deleted file mode 100644
index ffe0be21c..000000000
--- a/vendor/go.uber.org/atomic/error_ext.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright (c) 2020 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-// atomic.Value panics on nil inputs, or if the underlying type changes.
-// Stabilize by always storing a custom struct that we control.
-
-//go:generate bin/gen-atomicwrapper -name=Error -type=error -wrapped=Value -pack=packError -unpack=unpackError -file=error.go
-
-type packedError struct{ Value error }
-
-func packError(v error) interface{} {
- return packedError{v}
-}
-
-func unpackError(v interface{}) error {
- if err, ok := v.(packedError); ok {
- return err.Value
- }
- return nil
-}
diff --git a/vendor/go.uber.org/atomic/float64.go b/vendor/go.uber.org/atomic/float64.go
deleted file mode 100644
index 071906020..000000000
--- a/vendor/go.uber.org/atomic/float64.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// @generated Code generated by gen-atomicwrapper.
-
-// Copyright (c) 2020 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import (
- "encoding/json"
- "math"
-)
-
-// Float64 is an atomic type-safe wrapper for float64 values.
-type Float64 struct {
- _ nocmp // disallow non-atomic comparison
-
- v Uint64
-}
-
-var _zeroFloat64 float64
-
-// NewFloat64 creates a new Float64.
-func NewFloat64(v float64) *Float64 {
- x := &Float64{}
- if v != _zeroFloat64 {
- x.Store(v)
- }
- return x
-}
-
-// Load atomically loads the wrapped float64.
-func (x *Float64) Load() float64 {
- return math.Float64frombits(x.v.Load())
-}
-
-// Store atomically stores the passed float64.
-func (x *Float64) Store(v float64) {
- x.v.Store(math.Float64bits(v))
-}
-
-// CAS is an atomic compare-and-swap for float64 values.
-func (x *Float64) CAS(o, n float64) bool {
- return x.v.CAS(math.Float64bits(o), math.Float64bits(n))
-}
-
-// MarshalJSON encodes the wrapped float64 into JSON.
-func (x *Float64) MarshalJSON() ([]byte, error) {
- return json.Marshal(x.Load())
-}
-
-// UnmarshalJSON decodes a float64 from JSON.
-func (x *Float64) UnmarshalJSON(b []byte) error {
- var v float64
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
- x.Store(v)
- return nil
-}
diff --git a/vendor/go.uber.org/atomic/float64_ext.go b/vendor/go.uber.org/atomic/float64_ext.go
deleted file mode 100644
index 927b1add7..000000000
--- a/vendor/go.uber.org/atomic/float64_ext.go
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright (c) 2020 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import "strconv"
-
-//go:generate bin/gen-atomicwrapper -name=Float64 -type=float64 -wrapped=Uint64 -pack=math.Float64bits -unpack=math.Float64frombits -cas -json -imports math -file=float64.go
-
-// Add atomically adds to the wrapped float64 and returns the new value.
-func (f *Float64) Add(s float64) float64 {
- for {
- old := f.Load()
- new := old + s
- if f.CAS(old, new) {
- return new
- }
- }
-}
-
-// Sub atomically subtracts from the wrapped float64 and returns the new value.
-func (f *Float64) Sub(s float64) float64 {
- return f.Add(-s)
-}
-
-// String encodes the wrapped value as a string.
-func (f *Float64) String() string {
- // 'g' is the behavior for floats with %v.
- return strconv.FormatFloat(f.Load(), 'g', -1, 64)
-}
diff --git a/vendor/go.uber.org/atomic/gen.go b/vendor/go.uber.org/atomic/gen.go
deleted file mode 100644
index 50d6b2485..000000000
--- a/vendor/go.uber.org/atomic/gen.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright (c) 2020 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-//go:generate bin/gen-atomicint -name=Int32 -wrapped=int32 -file=int32.go
-//go:generate bin/gen-atomicint -name=Int64 -wrapped=int64 -file=int64.go
-//go:generate bin/gen-atomicint -name=Uint32 -wrapped=uint32 -unsigned -file=uint32.go
-//go:generate bin/gen-atomicint -name=Uint64 -wrapped=uint64 -unsigned -file=uint64.go
diff --git a/vendor/go.uber.org/atomic/go.mod b/vendor/go.uber.org/atomic/go.mod
deleted file mode 100644
index daa7599fe..000000000
--- a/vendor/go.uber.org/atomic/go.mod
+++ /dev/null
@@ -1,8 +0,0 @@
-module go.uber.org/atomic
-
-require (
- github.com/davecgh/go-spew v1.1.1 // indirect
- github.com/stretchr/testify v1.3.0
-)
-
-go 1.13
diff --git a/vendor/go.uber.org/atomic/go.sum b/vendor/go.uber.org/atomic/go.sum
deleted file mode 100644
index 4f76e62c1..000000000
--- a/vendor/go.uber.org/atomic/go.sum
+++ /dev/null
@@ -1,9 +0,0 @@
-github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
-github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
-github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
-github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
-github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
diff --git a/vendor/go.uber.org/atomic/int32.go b/vendor/go.uber.org/atomic/int32.go
deleted file mode 100644
index 18ae56493..000000000
--- a/vendor/go.uber.org/atomic/int32.go
+++ /dev/null
@@ -1,102 +0,0 @@
-// @generated Code generated by gen-atomicint.
-
-// Copyright (c) 2020 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import (
- "encoding/json"
- "strconv"
- "sync/atomic"
-)
-
-// Int32 is an atomic wrapper around int32.
-type Int32 struct {
- _ nocmp // disallow non-atomic comparison
-
- v int32
-}
-
-// NewInt32 creates a new Int32.
-func NewInt32(i int32) *Int32 {
- return &Int32{v: i}
-}
-
-// Load atomically loads the wrapped value.
-func (i *Int32) Load() int32 {
- return atomic.LoadInt32(&i.v)
-}
-
-// Add atomically adds to the wrapped int32 and returns the new value.
-func (i *Int32) Add(n int32) int32 {
- return atomic.AddInt32(&i.v, n)
-}
-
-// Sub atomically subtracts from the wrapped int32 and returns the new value.
-func (i *Int32) Sub(n int32) int32 {
- return atomic.AddInt32(&i.v, -n)
-}
-
-// Inc atomically increments the wrapped int32 and returns the new value.
-func (i *Int32) Inc() int32 {
- return i.Add(1)
-}
-
-// Dec atomically decrements the wrapped int32 and returns the new value.
-func (i *Int32) Dec() int32 {
- return i.Sub(1)
-}
-
-// CAS is an atomic compare-and-swap.
-func (i *Int32) CAS(old, new int32) bool {
- return atomic.CompareAndSwapInt32(&i.v, old, new)
-}
-
-// Store atomically stores the passed value.
-func (i *Int32) Store(n int32) {
- atomic.StoreInt32(&i.v, n)
-}
-
-// Swap atomically swaps the wrapped int32 and returns the old value.
-func (i *Int32) Swap(n int32) int32 {
- return atomic.SwapInt32(&i.v, n)
-}
-
-// MarshalJSON encodes the wrapped int32 into JSON.
-func (i *Int32) MarshalJSON() ([]byte, error) {
- return json.Marshal(i.Load())
-}
-
-// UnmarshalJSON decodes JSON into the wrapped int32.
-func (i *Int32) UnmarshalJSON(b []byte) error {
- var v int32
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
- i.Store(v)
- return nil
-}
-
-// String encodes the wrapped value as a string.
-func (i *Int32) String() string {
- v := i.Load()
- return strconv.FormatInt(int64(v), 10)
-}
diff --git a/vendor/go.uber.org/atomic/int64.go b/vendor/go.uber.org/atomic/int64.go
deleted file mode 100644
index 2bcbbfaa9..000000000
--- a/vendor/go.uber.org/atomic/int64.go
+++ /dev/null
@@ -1,102 +0,0 @@
-// @generated Code generated by gen-atomicint.
-
-// Copyright (c) 2020 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import (
- "encoding/json"
- "strconv"
- "sync/atomic"
-)
-
-// Int64 is an atomic wrapper around int64.
-type Int64 struct {
- _ nocmp // disallow non-atomic comparison
-
- v int64
-}
-
-// NewInt64 creates a new Int64.
-func NewInt64(i int64) *Int64 {
- return &Int64{v: i}
-}
-
-// Load atomically loads the wrapped value.
-func (i *Int64) Load() int64 {
- return atomic.LoadInt64(&i.v)
-}
-
-// Add atomically adds to the wrapped int64 and returns the new value.
-func (i *Int64) Add(n int64) int64 {
- return atomic.AddInt64(&i.v, n)
-}
-
-// Sub atomically subtracts from the wrapped int64 and returns the new value.
-func (i *Int64) Sub(n int64) int64 {
- return atomic.AddInt64(&i.v, -n)
-}
-
-// Inc atomically increments the wrapped int64 and returns the new value.
-func (i *Int64) Inc() int64 {
- return i.Add(1)
-}
-
-// Dec atomically decrements the wrapped int64 and returns the new value.
-func (i *Int64) Dec() int64 {
- return i.Sub(1)
-}
-
-// CAS is an atomic compare-and-swap.
-func (i *Int64) CAS(old, new int64) bool {
- return atomic.CompareAndSwapInt64(&i.v, old, new)
-}
-
-// Store atomically stores the passed value.
-func (i *Int64) Store(n int64) {
- atomic.StoreInt64(&i.v, n)
-}
-
-// Swap atomically swaps the wrapped int64 and returns the old value.
-func (i *Int64) Swap(n int64) int64 {
- return atomic.SwapInt64(&i.v, n)
-}
-
-// MarshalJSON encodes the wrapped int64 into JSON.
-func (i *Int64) MarshalJSON() ([]byte, error) {
- return json.Marshal(i.Load())
-}
-
-// UnmarshalJSON decodes JSON into the wrapped int64.
-func (i *Int64) UnmarshalJSON(b []byte) error {
- var v int64
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
- i.Store(v)
- return nil
-}
-
-// String encodes the wrapped value as a string.
-func (i *Int64) String() string {
- v := i.Load()
- return strconv.FormatInt(int64(v), 10)
-}
diff --git a/vendor/go.uber.org/atomic/nocmp.go b/vendor/go.uber.org/atomic/nocmp.go
deleted file mode 100644
index a8201cb4a..000000000
--- a/vendor/go.uber.org/atomic/nocmp.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright (c) 2020 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-// nocmp is an uncomparable struct. Embed this inside another struct to make
-// it uncomparable.
-//
-// type Foo struct {
-// nocmp
-// // ...
-// }
-//
-// This DOES NOT:
-//
-// - Disallow shallow copies of structs
-// - Disallow comparison of pointers to uncomparable structs
-type nocmp [0]func()
diff --git a/vendor/go.uber.org/atomic/string.go b/vendor/go.uber.org/atomic/string.go
deleted file mode 100644
index 225b7a2be..000000000
--- a/vendor/go.uber.org/atomic/string.go
+++ /dev/null
@@ -1,54 +0,0 @@
-// @generated Code generated by gen-atomicwrapper.
-
-// Copyright (c) 2020 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-// String is an atomic type-safe wrapper for string values.
-type String struct {
- _ nocmp // disallow non-atomic comparison
-
- v Value
-}
-
-var _zeroString string
-
-// NewString creates a new String.
-func NewString(v string) *String {
- x := &String{}
- if v != _zeroString {
- x.Store(v)
- }
- return x
-}
-
-// Load atomically loads the wrapped string.
-func (x *String) Load() string {
- if v := x.v.Load(); v != nil {
- return v.(string)
- }
- return _zeroString
-}
-
-// Store atomically stores the passed string.
-func (x *String) Store(v string) {
- x.v.Store(v)
-}
diff --git a/vendor/go.uber.org/atomic/string_ext.go b/vendor/go.uber.org/atomic/string_ext.go
deleted file mode 100644
index 3a9558213..000000000
--- a/vendor/go.uber.org/atomic/string_ext.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright (c) 2020 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-//go:generate bin/gen-atomicwrapper -name=String -type=string -wrapped=Value -file=string.go
-
-// String returns the wrapped value.
-func (s *String) String() string {
- return s.Load()
-}
-
-// MarshalText encodes the wrapped string into a textual form.
-//
-// This makes it encodable as JSON, YAML, XML, and more.
-func (s *String) MarshalText() ([]byte, error) {
- return []byte(s.Load()), nil
-}
-
-// UnmarshalText decodes text and replaces the wrapped string with it.
-//
-// This makes it decodable from JSON, YAML, XML, and more.
-func (s *String) UnmarshalText(b []byte) error {
- s.Store(string(b))
- return nil
-}
diff --git a/vendor/go.uber.org/atomic/uint32.go b/vendor/go.uber.org/atomic/uint32.go
deleted file mode 100644
index a973aba1a..000000000
--- a/vendor/go.uber.org/atomic/uint32.go
+++ /dev/null
@@ -1,102 +0,0 @@
-// @generated Code generated by gen-atomicint.
-
-// Copyright (c) 2020 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import (
- "encoding/json"
- "strconv"
- "sync/atomic"
-)
-
-// Uint32 is an atomic wrapper around uint32.
-type Uint32 struct {
- _ nocmp // disallow non-atomic comparison
-
- v uint32
-}
-
-// NewUint32 creates a new Uint32.
-func NewUint32(i uint32) *Uint32 {
- return &Uint32{v: i}
-}
-
-// Load atomically loads the wrapped value.
-func (i *Uint32) Load() uint32 {
- return atomic.LoadUint32(&i.v)
-}
-
-// Add atomically adds to the wrapped uint32 and returns the new value.
-func (i *Uint32) Add(n uint32) uint32 {
- return atomic.AddUint32(&i.v, n)
-}
-
-// Sub atomically subtracts from the wrapped uint32 and returns the new value.
-func (i *Uint32) Sub(n uint32) uint32 {
- return atomic.AddUint32(&i.v, ^(n - 1))
-}
-
-// Inc atomically increments the wrapped uint32 and returns the new value.
-func (i *Uint32) Inc() uint32 {
- return i.Add(1)
-}
-
-// Dec atomically decrements the wrapped uint32 and returns the new value.
-func (i *Uint32) Dec() uint32 {
- return i.Sub(1)
-}
-
-// CAS is an atomic compare-and-swap.
-func (i *Uint32) CAS(old, new uint32) bool {
- return atomic.CompareAndSwapUint32(&i.v, old, new)
-}
-
-// Store atomically stores the passed value.
-func (i *Uint32) Store(n uint32) {
- atomic.StoreUint32(&i.v, n)
-}
-
-// Swap atomically swaps the wrapped uint32 and returns the old value.
-func (i *Uint32) Swap(n uint32) uint32 {
- return atomic.SwapUint32(&i.v, n)
-}
-
-// MarshalJSON encodes the wrapped uint32 into JSON.
-func (i *Uint32) MarshalJSON() ([]byte, error) {
- return json.Marshal(i.Load())
-}
-
-// UnmarshalJSON decodes JSON into the wrapped uint32.
-func (i *Uint32) UnmarshalJSON(b []byte) error {
- var v uint32
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
- i.Store(v)
- return nil
-}
-
-// String encodes the wrapped value as a string.
-func (i *Uint32) String() string {
- v := i.Load()
- return strconv.FormatUint(uint64(v), 10)
-}
diff --git a/vendor/go.uber.org/atomic/uint64.go b/vendor/go.uber.org/atomic/uint64.go
deleted file mode 100644
index 3b6c71fd5..000000000
--- a/vendor/go.uber.org/atomic/uint64.go
+++ /dev/null
@@ -1,102 +0,0 @@
-// @generated Code generated by gen-atomicint.
-
-// Copyright (c) 2020 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import (
- "encoding/json"
- "strconv"
- "sync/atomic"
-)
-
-// Uint64 is an atomic wrapper around uint64.
-type Uint64 struct {
- _ nocmp // disallow non-atomic comparison
-
- v uint64
-}
-
-// NewUint64 creates a new Uint64.
-func NewUint64(i uint64) *Uint64 {
- return &Uint64{v: i}
-}
-
-// Load atomically loads the wrapped value.
-func (i *Uint64) Load() uint64 {
- return atomic.LoadUint64(&i.v)
-}
-
-// Add atomically adds to the wrapped uint64 and returns the new value.
-func (i *Uint64) Add(n uint64) uint64 {
- return atomic.AddUint64(&i.v, n)
-}
-
-// Sub atomically subtracts from the wrapped uint64 and returns the new value.
-func (i *Uint64) Sub(n uint64) uint64 {
- return atomic.AddUint64(&i.v, ^(n - 1))
-}
-
-// Inc atomically increments the wrapped uint64 and returns the new value.
-func (i *Uint64) Inc() uint64 {
- return i.Add(1)
-}
-
-// Dec atomically decrements the wrapped uint64 and returns the new value.
-func (i *Uint64) Dec() uint64 {
- return i.Sub(1)
-}
-
-// CAS is an atomic compare-and-swap.
-func (i *Uint64) CAS(old, new uint64) bool {
- return atomic.CompareAndSwapUint64(&i.v, old, new)
-}
-
-// Store atomically stores the passed value.
-func (i *Uint64) Store(n uint64) {
- atomic.StoreUint64(&i.v, n)
-}
-
-// Swap atomically swaps the wrapped uint64 and returns the old value.
-func (i *Uint64) Swap(n uint64) uint64 {
- return atomic.SwapUint64(&i.v, n)
-}
-
-// MarshalJSON encodes the wrapped uint64 into JSON.
-func (i *Uint64) MarshalJSON() ([]byte, error) {
- return json.Marshal(i.Load())
-}
-
-// UnmarshalJSON decodes JSON into the wrapped uint64.
-func (i *Uint64) UnmarshalJSON(b []byte) error {
- var v uint64
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
- i.Store(v)
- return nil
-}
-
-// String encodes the wrapped value as a string.
-func (i *Uint64) String() string {
- v := i.Load()
- return strconv.FormatUint(uint64(v), 10)
-}
diff --git a/vendor/go.uber.org/atomic/value.go b/vendor/go.uber.org/atomic/value.go
deleted file mode 100644
index 671f3a382..000000000
--- a/vendor/go.uber.org/atomic/value.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright (c) 2020 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import "sync/atomic"
-
-// Value shadows the type of the same name from sync/atomic
-// https://godoc.org/sync/atomic#Value
-type Value struct {
- atomic.Value
-
- _ nocmp // disallow non-atomic comparison
-}
diff --git a/vendor/k8s.io/client-go/LICENSE b/vendor/k8s.io/client-go/LICENSE
deleted file mode 100644
index d64569567..000000000
--- a/vendor/k8s.io/client-go/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/k8s.io/client-go/util/homedir/homedir.go b/vendor/k8s.io/client-go/util/homedir/homedir.go
deleted file mode 100644
index 816db57f5..000000000
--- a/vendor/k8s.io/client-go/util/homedir/homedir.go
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
-Copyright 2016 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package homedir
-
-import (
- "os"
- "runtime"
-)
-
-// HomeDir returns the home directory for the current user
-func HomeDir() string {
- if runtime.GOOS == "windows" {
-
- // First prefer the HOME environmental variable
- if home := os.Getenv("HOME"); len(home) > 0 {
- if _, err := os.Stat(home); err == nil {
- return home
- }
- }
- if homeDrive, homePath := os.Getenv("HOMEDRIVE"), os.Getenv("HOMEPATH"); len(homeDrive) > 0 && len(homePath) > 0 {
- homeDir := homeDrive + homePath
- if _, err := os.Stat(homeDir); err == nil {
- return homeDir
- }
- }
- if userProfile := os.Getenv("USERPROFILE"); len(userProfile) > 0 {
- if _, err := os.Stat(userProfile); err == nil {
- return userProfile
- }
- }
- }
- return os.Getenv("HOME")
-}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 9d0d9b996..e5534675f 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -40,6 +40,8 @@ github.com/beorn7/perks/quantile
github.com/blang/semver
# github.com/buger/goterm v0.0.0-20181115115552-c206103e1f37
github.com/buger/goterm
+# github.com/checkpoint-restore/checkpointctl v0.0.0-20210301084134-a2024f5584e7
+github.com/checkpoint-restore/checkpointctl/lib
# github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b
github.com/checkpoint-restore/go-criu
github.com/checkpoint-restore/go-criu/rpc
@@ -63,14 +65,14 @@ github.com/containernetworking/cni/pkg/types/020
github.com/containernetworking/cni/pkg/types/current
github.com/containernetworking/cni/pkg/utils
github.com/containernetworking/cni/pkg/version
-# github.com/containernetworking/plugins v0.9.0
+# github.com/containernetworking/plugins v0.9.1
github.com/containernetworking/plugins/pkg/ip
github.com/containernetworking/plugins/pkg/ns
github.com/containernetworking/plugins/pkg/utils/hwaddr
github.com/containernetworking/plugins/pkg/utils/sysctl
github.com/containernetworking/plugins/plugins/ipam/host-local/backend
github.com/containernetworking/plugins/plugins/ipam/host-local/backend/allocator
-# github.com/containers/buildah v1.19.6
+# github.com/containers/buildah v1.19.8
github.com/containers/buildah
github.com/containers/buildah/bind
github.com/containers/buildah/chroot
@@ -224,18 +226,18 @@ github.com/containers/storage/pkg/system
github.com/containers/storage/pkg/tarlog
github.com/containers/storage/pkg/truncindex
github.com/containers/storage/pkg/unshare
-# github.com/coreos/go-iptables v0.4.5
+# github.com/coreos/go-iptables v0.5.0
github.com/coreos/go-iptables/iptables
# github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e
github.com/coreos/go-systemd/activation
-# github.com/coreos/go-systemd/v22 v22.1.0
+# github.com/coreos/go-systemd/v22 v22.2.0
github.com/coreos/go-systemd/v22/activation
github.com/coreos/go-systemd/v22/daemon
github.com/coreos/go-systemd/v22/dbus
github.com/coreos/go-systemd/v22/internal/dlopen
github.com/coreos/go-systemd/v22/journal
github.com/coreos/go-systemd/v22/sdjournal
-# github.com/cri-o/ocicni v0.2.1-0.20201204103948-b6cbe99b9756
+# github.com/cri-o/ocicni v0.2.1-0.20210301205850-541cf7c703cf
github.com/cri-o/ocicni/pkg/ocicni
# github.com/cyphar/filepath-securejoin v0.2.2
github.com/cyphar/filepath-securejoin
@@ -405,13 +407,13 @@ github.com/morikuni/aec
github.com/mrunalp/fileutils
# github.com/mtrmac/gpgme v0.1.2
github.com/mtrmac/gpgme
-# github.com/nxadm/tail v1.4.4
+# github.com/nxadm/tail v1.4.8
github.com/nxadm/tail
github.com/nxadm/tail/ratelimiter
github.com/nxadm/tail/util
github.com/nxadm/tail/watch
github.com/nxadm/tail/winfile
-# github.com/onsi/ginkgo v1.15.0
+# github.com/onsi/ginkgo v1.15.1
github.com/onsi/ginkgo
github.com/onsi/ginkgo/config
github.com/onsi/ginkgo/extensions/table
@@ -440,7 +442,7 @@ github.com/onsi/ginkgo/reporters/stenographer
github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable
github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty
github.com/onsi/ginkgo/types
-# github.com/onsi/gomega v1.10.5
+# github.com/onsi/gomega v1.11.0
github.com/onsi/gomega
github.com/onsi/gomega/format
github.com/onsi/gomega/gbytes
@@ -487,10 +489,6 @@ github.com/openshift/imagebuilder/dockerfile/command
github.com/openshift/imagebuilder/dockerfile/parser
github.com/openshift/imagebuilder/signal
github.com/openshift/imagebuilder/strslice
-# github.com/opentracing/opentracing-go v1.2.0
-github.com/opentracing/opentracing-go
-github.com/opentracing/opentracing-go/ext
-github.com/opentracing/opentracing-go/log
# github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913
github.com/ostreedev/ostree-go/pkg/glibobject
github.com/ostreedev/ostree-go/pkg/otbuiltin
@@ -516,7 +514,8 @@ github.com/prometheus/common/model
# github.com/prometheus/procfs v0.0.3
github.com/prometheus/procfs
github.com/prometheus/procfs/internal/fs
-# github.com/rootless-containers/rootlesskit v0.13.2
+# github.com/rootless-containers/rootlesskit v0.14.0-beta.0
+github.com/rootless-containers/rootlesskit/pkg/api
github.com/rootless-containers/rootlesskit/pkg/msgutil
github.com/rootless-containers/rootlesskit/pkg/port
github.com/rootless-containers/rootlesskit/pkg/port/builtin
@@ -549,26 +548,12 @@ github.com/syndtr/gocapability/capability
# github.com/tchap/go-patricia v2.3.0+incompatible
github.com/tchap/go-patricia/patricia
# github.com/uber/jaeger-client-go v2.25.0+incompatible
-github.com/uber/jaeger-client-go
-github.com/uber/jaeger-client-go/config
-github.com/uber/jaeger-client-go/internal/baggage
-github.com/uber/jaeger-client-go/internal/baggage/remote
-github.com/uber/jaeger-client-go/internal/reporterstats
-github.com/uber/jaeger-client-go/internal/spanlog
-github.com/uber/jaeger-client-go/internal/throttler
-github.com/uber/jaeger-client-go/internal/throttler/remote
github.com/uber/jaeger-client-go/log
-github.com/uber/jaeger-client-go/rpcmetrics
github.com/uber/jaeger-client-go/thrift
github.com/uber/jaeger-client-go/thrift-gen/agent
-github.com/uber/jaeger-client-go/thrift-gen/baggage
github.com/uber/jaeger-client-go/thrift-gen/jaeger
-github.com/uber/jaeger-client-go/thrift-gen/sampling
github.com/uber/jaeger-client-go/thrift-gen/zipkincore
-github.com/uber/jaeger-client-go/transport
github.com/uber/jaeger-client-go/utils
-# github.com/uber/jaeger-lib v2.2.0+incompatible
-github.com/uber/jaeger-lib/metrics
# github.com/ulikunitz/xz v0.5.9
github.com/ulikunitz/xz
github.com/ulikunitz/xz/internal/hash
@@ -606,8 +591,6 @@ go.opencensus.io/internal
go.opencensus.io/trace
go.opencensus.io/trace/internal
go.opencensus.io/trace/tracestate
-# go.uber.org/atomic v1.7.0
-go.uber.org/atomic
# golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad
golang.org/x/crypto/blowfish
golang.org/x/crypto/cast5
@@ -788,8 +771,6 @@ k8s.io/apimachinery/pkg/util/validation
k8s.io/apimachinery/pkg/util/validation/field
k8s.io/apimachinery/pkg/watch
k8s.io/apimachinery/third_party/forked/golang/reflect
-# k8s.io/client-go v0.0.0-20190620085101-78d2af792bab
-k8s.io/client-go/util/homedir
# k8s.io/klog/v2 v2.4.0
k8s.io/klog/v2
# sigs.k8s.io/structured-merge-diff/v4 v4.0.2
diff --git a/version/version.go b/version/version.go
index 520014bb7..6b93ed8ea 100644
--- a/version/version.go
+++ b/version/version.go
@@ -4,13 +4,44 @@ import (
"github.com/blang/semver"
)
+type (
+ // Tree determines which API endpoint tree for version
+ Tree int
+ // Level determines which API level, current or something from the past
+ Level int
+)
+
+const (
+ // Libpod supports Libpod endpoints
+ Libpod = Tree(iota)
+ // Compat supports Libpod endpoints
+ Compat
+
+ // CurrentAPI announces what is the current API level
+ CurrentAPI = Level(iota)
+ // MinimalAPI announces what is the oldest API level supported
+ MinimalAPI
+)
+
// Version is the version of the build.
// NOTE: remember to bump the version at the top
// of the top-level README.md file when this is
// bumped.
var Version = semver.MustParse("3.1.0-dev")
-// APIVersion is the version for the remote
-// client API. It is used to determine compatibility
-// between a remote podman client and its backend
-var APIVersion = semver.MustParse("3.0.0")
+// See https://docs.docker.com/engine/api/v1.40/
+// libpod compat handlers are expected to honor docker API versions
+
+// APIVersion provides the current and minimal API versions for compat and libpod endpoint trees
+// Note: GET|HEAD /_ping is never versioned and provides the API-Version and Libpod-API-Version headers to allow
+// clients to shop for the Version they wish to support
+var APIVersion = map[Tree]map[Level]semver.Version{
+ Libpod: {
+ CurrentAPI: Version,
+ MinimalAPI: semver.MustParse("3.0.0"),
+ },
+ Compat: {
+ CurrentAPI: semver.MustParse("1.40.0"),
+ MinimalAPI: semver.MustParse("1.24.0"),
+ },
+}