summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.github/workflows/multi-arch-build.yaml42
-rw-r--r--CONTRIBUTING.md6
-rw-r--r--Makefile12
-rw-r--r--README.md4
-rw-r--r--RELEASE_NOTES.md125
-rw-r--r--cmd/podman/README.md6
-rw-r--r--cmd/podman/auto-update.go2
-rw-r--r--cmd/podman/common/completion.go7
-rw-r--r--cmd/podman/common/create_opts.go9
-rw-r--r--cmd/podman/common/netflags.go13
-rw-r--r--cmd/podman/common/util.go4
-rw-r--r--cmd/podman/completion/completion.go2
-rw-r--r--cmd/podman/containers/attach.go2
-rw-r--r--cmd/podman/containers/checkpoint.go28
-rw-r--r--cmd/podman/containers/cleanup.go10
-rw-r--r--cmd/podman/containers/commit.go2
-rw-r--r--cmd/podman/containers/container.go2
-rw-r--r--cmd/podman/containers/cp.go2
-rw-r--r--cmd/podman/containers/create.go6
-rw-r--r--cmd/podman/containers/diff.go1
-rw-r--r--cmd/podman/containers/exec.go24
-rw-r--r--cmd/podman/containers/exists.go8
-rw-r--r--cmd/podman/containers/export.go2
-rw-r--r--cmd/podman/containers/init.go2
-rw-r--r--cmd/podman/containers/inspect.go1
-rw-r--r--cmd/podman/containers/kill.go2
-rw-r--r--cmd/podman/containers/list.go2
-rw-r--r--cmd/podman/containers/logs.go2
-rw-r--r--cmd/podman/containers/mount.go13
-rw-r--r--cmd/podman/containers/pause.go2
-rw-r--r--cmd/podman/containers/port.go2
-rw-r--r--cmd/podman/containers/prune.go1
-rw-r--r--cmd/podman/containers/ps.go2
-rw-r--r--cmd/podman/containers/rename.go2
-rw-r--r--cmd/podman/containers/restart.go3
-rw-r--r--cmd/podman/containers/restore.go25
-rw-r--r--cmd/podman/containers/rm.go14
-rw-r--r--cmd/podman/containers/run.go12
-rw-r--r--cmd/podman/containers/runlabel.go2
-rw-r--r--cmd/podman/containers/start.go2
-rw-r--r--cmd/podman/containers/stats.go2
-rw-r--r--cmd/podman/containers/stop.go2
-rw-r--r--cmd/podman/containers/top.go2
-rw-r--r--cmd/podman/containers/unmount.go24
-rw-r--r--cmd/podman/containers/unpause.go2
-rw-r--r--cmd/podman/containers/wait.go2
-rw-r--r--cmd/podman/diff.go1
-rw-r--r--cmd/podman/generate/generate.go2
-rw-r--r--cmd/podman/generate/kube.go1
-rw-r--r--cmd/podman/generate/systemd.go1
-rw-r--r--cmd/podman/healthcheck/healthcheck.go3
-rw-r--r--cmd/podman/healthcheck/run.go21
-rw-r--r--cmd/podman/images/build.go3
-rw-r--r--cmd/podman/images/diff.go1
-rw-r--r--cmd/podman/images/exists.go3
-rw-r--r--cmd/podman/images/history.go2
-rw-r--r--cmd/podman/images/image.go2
-rw-r--r--cmd/podman/images/images.go32
-rw-r--r--cmd/podman/images/import.go2
-rw-r--r--cmd/podman/images/inspect.go1
-rw-r--r--cmd/podman/images/list.go26
-rw-r--r--cmd/podman/images/load.go2
-rw-r--r--cmd/podman/images/mount.go10
-rw-r--r--cmd/podman/images/prune.go1
-rw-r--r--cmd/podman/images/pull.go2
-rw-r--r--cmd/podman/images/push.go2
-rw-r--r--cmd/podman/images/rm.go18
-rw-r--r--cmd/podman/images/rmi.go29
-rw-r--r--cmd/podman/images/save.go3
-rw-r--r--cmd/podman/images/search.go7
-rw-r--r--cmd/podman/images/sign.go2
-rw-r--r--cmd/podman/images/tag.go28
-rw-r--r--cmd/podman/images/tree.go1
-rw-r--r--cmd/podman/images/trust.go11
-rw-r--r--cmd/podman/images/trust_set.go2
-rw-r--r--cmd/podman/images/trust_show.go2
-rw-r--r--cmd/podman/images/unmount.go2
-rw-r--r--cmd/podman/images/untag.go36
-rw-r--r--cmd/podman/inspect.go1
-rw-r--r--cmd/podman/login.go2
-rw-r--r--cmd/podman/logout.go2
-rw-r--r--cmd/podman/machine/init.go6
-rw-r--r--cmd/podman/machine/list.go16
-rw-r--r--cmd/podman/machine/machine.go2
-rw-r--r--cmd/podman/machine/machine_unsupported.go1
-rw-r--r--cmd/podman/machine/rm.go2
-rw-r--r--cmd/podman/machine/ssh.go2
-rw-r--r--cmd/podman/machine/start.go2
-rw-r--r--cmd/podman/machine/stop.go2
-rw-r--r--cmd/podman/main.go60
-rw-r--r--cmd/podman/manifest/add.go1
-rw-r--r--cmd/podman/manifest/annotate.go2
-rw-r--r--cmd/podman/manifest/create.go1
-rw-r--r--cmd/podman/manifest/exists.go2
-rw-r--r--cmd/podman/manifest/inspect.go17
-rw-r--r--cmd/podman/manifest/manifest.go2
-rw-r--r--cmd/podman/manifest/push.go1
-rw-r--r--cmd/podman/manifest/remove.go17
-rw-r--r--cmd/podman/manifest/rm.go17
-rw-r--r--cmd/podman/networks/connect.go1
-rw-r--r--cmd/podman/networks/create.go1
-rw-r--r--cmd/podman/networks/disconnect.go1
-rw-r--r--cmd/podman/networks/exists.go2
-rw-r--r--cmd/podman/networks/inspect.go1
-rw-r--r--cmd/podman/networks/list.go1
-rw-r--r--cmd/podman/networks/network.go2
-rw-r--r--cmd/podman/networks/prune.go1
-rw-r--r--cmd/podman/networks/reload.go10
-rw-r--r--cmd/podman/networks/rm.go1
-rw-r--r--cmd/podman/play/kube.go1
-rw-r--r--cmd/podman/play/play.go2
-rw-r--r--cmd/podman/pods/create.go4
-rw-r--r--cmd/podman/pods/exists.go3
-rw-r--r--cmd/podman/pods/inspect.go1
-rw-r--r--cmd/podman/pods/kill.go1
-rw-r--r--cmd/podman/pods/pause.go1
-rw-r--r--cmd/podman/pods/pod.go2
-rw-r--r--cmd/podman/pods/prune.go1
-rw-r--r--cmd/podman/pods/ps.go1
-rw-r--r--cmd/podman/pods/restart.go1
-rw-r--r--cmd/podman/pods/rm.go1
-rw-r--r--cmd/podman/pods/start.go1
-rw-r--r--cmd/podman/pods/stats.go1
-rw-r--r--cmd/podman/pods/stop.go1
-rw-r--r--cmd/podman/pods/top.go1
-rw-r--r--cmd/podman/pods/unpause.go1
-rw-r--r--cmd/podman/registry/config.go13
-rw-r--r--cmd/podman/registry/registry.go1
-rw-r--r--cmd/podman/secrets/create.go1
-rw-r--r--cmd/podman/secrets/inspect.go1
-rw-r--r--cmd/podman/secrets/list.go1
-rw-r--r--cmd/podman/secrets/rm.go1
-rw-r--r--cmd/podman/secrets/secret.go2
-rw-r--r--cmd/podman/system/connection.go17
-rw-r--r--cmd/podman/system/connection/add.go2
-rw-r--r--cmd/podman/system/connection/default.go17
-rw-r--r--cmd/podman/system/connection/list.go13
-rw-r--r--cmd/podman/system/connection/remove.go17
-rw-r--r--cmd/podman/system/connection/rename.go17
-rw-r--r--cmd/podman/system/df.go1
-rw-r--r--cmd/podman/system/events.go1
-rw-r--r--cmd/podman/system/info.go3
-rw-r--r--cmd/podman/system/migrate.go2
-rw-r--r--cmd/podman/system/prune.go1
-rw-r--r--cmd/podman/system/renumber.go15
-rw-r--r--cmd/podman/system/reset.go2
-rw-r--r--cmd/podman/system/service.go13
-rw-r--r--cmd/podman/system/service_abi.go2
-rw-r--r--cmd/podman/system/system.go2
-rw-r--r--cmd/podman/system/unshare.go13
-rw-r--r--cmd/podman/system/version.go1
-rw-r--r--cmd/podman/volumes/create.go1
-rw-r--r--cmd/podman/volumes/exists.go2
-rw-r--r--cmd/podman/volumes/inspect.go1
-rw-r--r--cmd/podman/volumes/list.go1
-rw-r--r--cmd/podman/volumes/prune.go1
-rw-r--r--cmd/podman/volumes/rm.go1
-rw-r--r--cmd/podman/volumes/volume.go2
-rw-r--r--contrib/spec/podman.spec.in2
-rw-r--r--docs/MANPAGE_SYNTAX.md119
-rw-r--r--docs/README.md4
-rw-r--r--docs/source/Commands.rst4
-rw-r--r--docs/source/machine.rst5
-rw-r--r--docs/source/markdown/containers-mounts.conf.5.md16
-rw-r--r--docs/source/markdown/podman-attach.1.md36
-rw-r--r--docs/source/markdown/podman-auto-update.1.md55
-rw-r--r--docs/source/markdown/podman-build.1.md4
-rw-r--r--docs/source/markdown/podman-commit.1.md46
-rw-r--r--docs/source/markdown/podman-container-checkpoint.1.md52
-rw-r--r--docs/source/markdown/podman-container-restore.1.md15
-rw-r--r--docs/source/markdown/podman-container-runlabel.1.md4
-rw-r--r--docs/source/markdown/podman-generate-systemd.1.md6
-rw-r--r--docs/source/markdown/podman-image-sign.1.md4
-rw-r--r--docs/source/markdown/podman-login.1.md4
-rw-r--r--docs/source/markdown/podman-manifest-add.1.md26
-rw-r--r--docs/source/markdown/podman-manifest-push.1.md4
-rw-r--r--docs/source/markdown/podman-play-kube.1.md4
-rw-r--r--docs/source/markdown/podman-pull.1.md4
-rw-r--r--docs/source/markdown/podman-push.1.md4
-rw-r--r--docs/source/markdown/podman-system-service.1.md4
-rw-r--r--docs/tutorials/mac_experimental.md99
-rw-r--r--docs/tutorials/podman-go-bindings.md12
-rw-r--r--docs/tutorials/remote_client.md4
-rw-r--r--go.mod16
-rw-r--r--go.sum34
-rw-r--r--libpod/container.go3
-rw-r--r--libpod/container_api.go4
-rw-r--r--libpod/container_internal_linux.go36
-rw-r--r--libpod/container_log_linux.go276
-rw-r--r--libpod/events/filters.go39
-rw-r--r--libpod/events/journal_linux.go10
-rw-r--r--libpod/events/logfile.go10
-rw-r--r--libpod/network/config.go43
-rw-r--r--libpod/network/netconflist.go18
-rw-r--r--libpod/networking_linux.go24
-rw-r--r--libpod/runtime.go17
-rw-r--r--libpod/runtime_ctr.go9
-rw-r--r--pkg/api/handlers/compat/containers_create.go7
-rw-r--r--pkg/api/handlers/compat/events.go2
-rw-r--r--pkg/api/handlers/compat/images.go76
-rw-r--r--pkg/api/handlers/compat/images_push.go112
-rw-r--r--pkg/api/handlers/compat/resize.go15
-rw-r--r--pkg/api/handlers/libpod/images.go2
-rw-r--r--pkg/api/handlers/libpod/images_pull.go4
-rw-r--r--pkg/api/handlers/libpod/manifests.go2
-rw-r--r--pkg/api/handlers/libpod/swagger.go2
-rw-r--r--pkg/api/handlers/utils/images.go25
-rw-r--r--pkg/api/server/handler_api.go6
-rw-r--r--pkg/api/server/register_containers.go2
-rw-r--r--pkg/api/server/register_networks.go4
-rw-r--r--pkg/api/server/server.go21
-rw-r--r--pkg/bindings/containers/attach.go54
-rw-r--r--pkg/bindings/images/build.go2
-rw-r--r--pkg/checkpoint/checkpoint_restore.go9
-rw-r--r--pkg/domain/entities/containers.go3
-rw-r--r--pkg/domain/entities/events.go28
-rw-r--r--pkg/domain/entities/system.go7
-rw-r--r--pkg/domain/infra/abi/containers.go1
-rw-r--r--pkg/domain/infra/abi/network.go2
-rw-r--r--pkg/machine/config.go1
-rw-r--r--pkg/machine/ignition.go17
-rw-r--r--pkg/machine/qemu/machine.go120
-rw-r--r--pkg/machine/qemu/options_darwin.go2
-rw-r--r--pkg/machine/qemu/options_darwin_amd64.go2
-rw-r--r--pkg/machine/qemu/options_linux.go10
-rw-r--r--pkg/rootless/rootless_linux.c14
-rw-r--r--pkg/specgen/generate/kube/kube.go14
-rw-r--r--pkg/specgen/generate/namespaces.go2
-rw-r--r--pkg/specgen/generate/pod_create.go2
-rw-r--r--pkg/specgen/generate/ports.go53
-rw-r--r--pkg/specgen/namespaces.go8
-rw-r--r--pkg/systemd/generate/common.go13
-rw-r--r--pkg/systemd/generate/common_test.go24
-rw-r--r--pkg/systemd/generate/containers.go49
-rw-r--r--pkg/systemd/generate/containers_test.go164
-rw-r--r--pkg/systemd/generate/pods_test.go10
-rw-r--r--rootless.md2
-rw-r--r--test/apiv2/20-containers.at9
-rw-r--r--test/apiv2/python/rest_api/fixtures/api_testcase.py2
-rw-r--r--test/apiv2/python/rest_api/test_v2_0_0_container.py4
-rw-r--r--test/e2e/checkpoint_test.go154
-rw-r--r--test/e2e/common_test.go18
-rw-r--r--test/e2e/events_test.go13
-rw-r--r--test/e2e/generate_systemd_test.go4
-rw-r--r--test/e2e/logs_test.go2
-rw-r--r--test/e2e/network_test.go26
-rw-r--r--test/e2e/play_kube_test.go2
-rw-r--r--test/e2e/run_device_test.go6
-rw-r--r--test/e2e/run_networking_test.go14
-rw-r--r--test/system/001-basic.bats8
-rw-r--r--test/system/030-run.bats6
-rw-r--r--test/system/035-logs.bats52
-rw-r--r--test/system/045-start.bats2
-rw-r--r--test/system/070-build.bats14
-rw-r--r--test/system/090-events.bats30
-rw-r--r--test/system/130-kill.bats3
-rw-r--r--test/system/255-auto-update.bats274
-rw-r--r--test/system/450-interactive.bats3
-rw-r--r--test/system/500-networking.bats62
-rw-r--r--test/system/700-play.bats41
-rw-r--r--transfer.md116
-rw-r--r--troubleshooting.md29
-rw-r--r--vendor/github.com/bits-and-blooms/bitset/.gitignore (renamed from vendor/github.com/willf/bitset/.gitignore)0
-rw-r--r--vendor/github.com/bits-and-blooms/bitset/.travis.yml (renamed from vendor/github.com/willf/bitset/.travis.yml)0
-rw-r--r--vendor/github.com/bits-and-blooms/bitset/LICENSE (renamed from vendor/github.com/willf/bitset/LICENSE)0
-rw-r--r--vendor/github.com/bits-and-blooms/bitset/README.md (renamed from vendor/github.com/willf/bitset/README.md)11
-rw-r--r--vendor/github.com/bits-and-blooms/bitset/azure-pipelines.yml (renamed from vendor/github.com/willf/bitset/azure-pipelines.yml)0
-rw-r--r--vendor/github.com/bits-and-blooms/bitset/bitset.go (renamed from vendor/github.com/willf/bitset/bitset.go)23
-rw-r--r--vendor/github.com/bits-and-blooms/bitset/go.mod3
-rw-r--r--vendor/github.com/bits-and-blooms/bitset/go.sum (renamed from vendor/github.com/willf/bitset/go.sum)0
-rw-r--r--vendor/github.com/bits-and-blooms/bitset/popcnt.go (renamed from vendor/github.com/willf/bitset/popcnt.go)0
-rw-r--r--vendor/github.com/bits-and-blooms/bitset/popcnt_19.go (renamed from vendor/github.com/willf/bitset/popcnt_19.go)0
-rw-r--r--vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.go (renamed from vendor/github.com/willf/bitset/popcnt_amd64.go)0
-rw-r--r--vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.s (renamed from vendor/github.com/willf/bitset/popcnt_amd64.s)0
-rw-r--r--vendor/github.com/bits-and-blooms/bitset/popcnt_generic.go (renamed from vendor/github.com/willf/bitset/popcnt_generic.go)0
-rw-r--r--vendor/github.com/bits-and-blooms/bitset/trailing_zeros_18.go (renamed from vendor/github.com/willf/bitset/trailing_zeros_18.go)0
-rw-r--r--vendor/github.com/bits-and-blooms/bitset/trailing_zeros_19.go (renamed from vendor/github.com/willf/bitset/trailing_zeros_19.go)0
-rw-r--r--vendor/github.com/containers/common/libimage/image.go19
-rw-r--r--vendor/github.com/containers/common/libimage/normalize.go51
-rw-r--r--vendor/github.com/containers/common/libimage/pull.go24
-rw-r--r--vendor/github.com/containers/common/libimage/runtime.go17
-rw-r--r--vendor/github.com/containers/common/pkg/config/config.go5
-rw-r--r--vendor/github.com/containers/common/pkg/config/containers.conf3
-rw-r--r--vendor/github.com/containers/common/pkg/config/default.go50
-rw-r--r--vendor/github.com/containers/common/pkg/config/nosystemd.go10
-rw-r--r--vendor/github.com/containers/common/pkg/config/systemd.go40
-rw-r--r--vendor/github.com/containers/common/pkg/defaultnet/default_network.go222
-rw-r--r--vendor/github.com/containers/common/version/version.go2
-rw-r--r--vendor/github.com/containers/storage/VERSION2
-rw-r--r--vendor/github.com/containers/storage/containers.go2
-rw-r--r--vendor/github.com/containers/storage/drivers/devmapper/deviceset.go4
-rw-r--r--vendor/github.com/containers/storage/drivers/driver.go7
-rw-r--r--vendor/github.com/containers/storage/drivers/overlay/overlay.go46
-rw-r--r--vendor/github.com/containers/storage/go.mod2
-rw-r--r--vendor/github.com/containers/storage/go.sum4
-rw-r--r--vendor/github.com/containers/storage/images.go2
-rw-r--r--vendor/github.com/containers/storage/layers.go134
-rw-r--r--vendor/github.com/containers/storage/store.go34
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/archive.go16
-rw-r--r--vendor/github.com/klauspost/compress/flate/deflate.go2
-rw-r--r--vendor/github.com/klauspost/compress/flate/fast_encoder.go2
-rw-r--r--vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go246
-rw-r--r--vendor/github.com/klauspost/compress/flate/huffman_code.go58
-rw-r--r--vendor/github.com/klauspost/compress/flate/token.go41
-rw-r--r--vendor/github.com/klauspost/compress/zstd/README.md22
-rw-r--r--vendor/github.com/klauspost/compress/zstd/blockdec.go11
-rw-r--r--vendor/github.com/klauspost/compress/zstd/bytebuf.go21
-rw-r--r--vendor/github.com/klauspost/compress/zstd/framedec.go46
-rw-r--r--vendor/github.com/klauspost/compress/zstd/zip.go5
-rw-r--r--vendor/github.com/onsi/ginkgo/CHANGELOG.md10
-rw-r--r--vendor/github.com/onsi/ginkgo/RELEASING.md11
-rw-r--r--vendor/github.com/onsi/ginkgo/config/config.go2
-rw-r--r--vendor/github.com/onsi/ginkgo/ginkgo_dsl.go4
-rw-r--r--vendor/github.com/onsi/ginkgo/go.mod2
-rw-r--r--vendor/github.com/onsi/ginkgo/types/deprecation_support.go4
-rw-r--r--vendor/github.com/onsi/gomega/CHANGELOG.md6
-rw-r--r--vendor/github.com/onsi/gomega/env.go40
-rw-r--r--vendor/github.com/onsi/gomega/gomega_dsl.go2
-rw-r--r--vendor/github.com/onsi/gomega/internal/defaults/env.go22
-rw-r--r--vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go2
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/GoUnusedProtection__.go6
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent-consts.go (renamed from vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/constants.go)11
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent.go607
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/ttypes.go21
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/GoUnusedProtection__.go6
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/agent.go242
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/jaeger-consts.go (renamed from vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/constants.go)11
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/jaeger.go2698
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/ttypes.go2106
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/GoUnusedProtection__.go6
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/ttypes.go1337
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincollector.go446
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincore-consts.go (renamed from vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/constants.go)10
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincore.go1853
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/README.md10
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/application_exception.go100
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/binary_protocol.go337
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/client.go109
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/compact_protocol.go216
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/configuration.go378
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/context.go (renamed from vendor/github.com/uber/jaeger-client-go/thrift/processor.go)10
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/exception.go86
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/header_context.go110
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/header_protocol.go351
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/header_transport.go810
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/logger.go59
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/memory_buffer.go9
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/numeric.go4
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/processor_factory.go80
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/protocol.go148
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/protocol_exception.go44
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/response_helper.go94
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/rich_transport.go8
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/serializer.go87
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/server_transport.go34
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/simple_json_protocol.go326
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/simple_server.go332
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/transport.go12
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/transport_exception.go65
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/transport_factory.go6
-rw-r--r--vendor/github.com/uber/jaeger-client-go/utils/udp_client.go8
-rw-r--r--vendor/github.com/willf/bitset/go.mod3
-rw-r--r--vendor/go.etcd.io/bbolt/.gitignore2
-rw-r--r--vendor/go.etcd.io/bbolt/.travis.yml3
-rw-r--r--vendor/go.etcd.io/bbolt/Makefile2
-rw-r--r--vendor/go.etcd.io/bbolt/README.md5
-rw-r--r--vendor/go.etcd.io/bbolt/bolt_unix.go17
-rw-r--r--vendor/go.etcd.io/bbolt/compact.go114
-rw-r--r--vendor/go.etcd.io/bbolt/db.go66
-rw-r--r--vendor/go.etcd.io/bbolt/freelist_hmap.go6
-rw-r--r--vendor/go.etcd.io/bbolt/go.mod2
-rw-r--r--vendor/go.etcd.io/bbolt/go.sum4
-rw-r--r--vendor/go.etcd.io/bbolt/mlock_unix.go36
-rw-r--r--vendor/go.etcd.io/bbolt/mlock_windows.go11
-rw-r--r--vendor/go.etcd.io/bbolt/tx.go3
-rw-r--r--vendor/modules.txt24
-rw-r--r--version/version.go2
377 files changed, 12032 insertions, 6688 deletions
diff --git a/.github/workflows/multi-arch-build.yaml b/.github/workflows/multi-arch-build.yaml
index 0f8a3df7e..2ade44ee6 100644
--- a/.github/workflows/multi-arch-build.yaml
+++ b/.github/workflows/multi-arch-build.yaml
@@ -2,6 +2,11 @@
# Please see contrib/podmanimage/README.md for details on the intentions
# of this workflow.
+#
+# BIG FAT WARNING: This workflow is duplicated across containers/skopeo,
+# containers/buildah, and containers/podman. ANY AND
+# ALL CHANGES MADE HERE MUST BE MANUALLY DUPLICATED
+# TO THE OTHER REPOS.
name: build multi-arch images
@@ -86,7 +91,7 @@ jobs:
docker://$PODMAN_QUAY_REGISTRY/stable | \
jq -r '.Tags[]')
- # New image? Push quay.io/podman/stable:vX.X.X and :latest
+ # New version? Push quay.io/podman/stable:vX.X.X and :latest
if ! fgrep -qx "$VERSION" <<<"$ALLTAGS"; then
# Assume version-tag is also the most up to date (i.e. "latest")
FQIN="$PODMAN_QUAY_REGISTRY/stable:$VERSION,$PODMAN_QUAY_REGISTRY/stable:latest"
@@ -108,31 +113,24 @@ jobs:
echo "::set-output name=fqin::${FQIN}"
echo '::set-output name=push::true'
- # This is substantially the same as the above step, except the
- # $CONTAINERS_QUAY_REGISTRY is used and the "testing"
- # flavor is never pushed.
+ # This is substantially similar to the above logic,
+ # but only handles $CONTAINERS_QUAY_REGISTRY for
+ # the stable "latest" and named-version tagged images.
- name: Generate containers reg. image FQIN(s)
- if: matrix.source != 'testing'
+ if: matrix.source == 'stable'
id: containers_reg
run: |
- if [[ "${{ matrix.source }}" == 'stable' ]]; then
- VERSION='v${{ steps.sniff_test.outputs.version }}'
- # workaround vim syntax-highlight bug: '
- ALLTAGS=$(skopeo list-tags \
- docker://$CONTAINERS_QUAY_REGISTRY/podman | \
- jq -r '.Tags[]')
-
- # New image? Push quay.io/containers/podman:vX.X.X and :latest
- if ! fgrep -qx "$VERSION" <<<"$ALLTAGS"; then
- FQIN="$CONTAINERS_QUAY_REGISTRY/podman:$VERSION,$CONTAINERS_QUAY_REGISTRY/podman:latest"
- else # Not a new version-tagged image, but contents may be updated
- FQIN="$CONTAINERS_QUAY_REGISTRY/podman:latest"
- fi
- elif [[ "${{ matrix.source }}" == 'upstream' ]]; then
+ VERSION='v${{ steps.sniff_test.outputs.version }}'
+ # workaround vim syntax-highlight bug: '
+ ALLTAGS=$(skopeo list-tags \
+ docker://$CONTAINERS_QUAY_REGISTRY/podman | \
+ jq -r '.Tags[]')
+
+ # New version? Push quay.io/containers/podman:vX.X.X and latest
+ if ! fgrep -qx "$VERSION" <<<"$ALLTAGS"; then
+ FQIN="$CONTAINERS_QUAY_REGISTRY/podman:$VERSION,$CONTAINERS_QUAY_REGISTRY/podman:latest"
+ else # Not a new version-tagged image, only update latest.
FQIN="$CONTAINERS_QUAY_REGISTRY/podman:latest"
- else
- echo "::error::Unknown matrix item '${{ matrix.source }}'"
- exit 1
fi
echo "::warning::Pushing $FQIN"
echo "::set-output name=fqin::${FQIN}"
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index cb8bce9ed..30bbe3045 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -126,6 +126,10 @@ Well, you can now create your own branch, apply changes on it, and then submitti
For further reading about branching [you can read this document](https://herve.beraud.io/containers/linux/podman/isolate/environment/2019/02/06/how-to-hack-on-podman.html).
+## Documentation
+
+Make sure to update the documentation if needed. You can find the man pages under `docs/source/markdown`. The syntax for the formatting of all man pages can be found [here](docs/MANPAGE_SYNTAX.md).
+
## Submitting Pull Requests
No Pull Request (PR) is too small! Typos, additional comments in the code,
@@ -393,7 +397,7 @@ if possible.](#communications)
## Communications
For general questions and discussion, please use the
-IRC `#podman` channel on `irc.freenode.net`.
+IRC `#podman` channel on `irc.libera.chat`.
For discussions around issues/bugs and features, you can use the GitHub
[issues](https://github.com/containers/podman/issues)
diff --git a/Makefile b/Makefile
index 15d6d9fb6..4a7c727de 100644
--- a/Makefile
+++ b/Makefile
@@ -428,8 +428,16 @@ pkg/api/swagger.yaml: .gopathok
make -C pkg/api
$(MANPAGES): %: %.md .install.md2man docdir
- @sed -e 's/\((podman[^)]*\.md)\)//g' -e 's/\[\(podman[^]]*\)\]/\1/g' \
- -e 's;<\(/\)\?\(a\|a\s\+[^>]*\|sup\)>;;g' $< | \
+
+### sed is used to filter http/s links as well as relative links
+### replaces "\" at the end of a line with two spaces
+### this ensures that manpages are renderd correctly
+
+ @sed -e 's/\((podman[^)]*\.md\(#.*\)\?)\)//g' \
+ -e 's/\[\(podman[^]]*\)\]/\1/g' \
+ -e 's/\[\([^]]*\)](http[^)]\+)/\1/g' \
+ -e 's;<\(/\)\?\(a\|a\s\+[^>]*\|sup\)>;;g' \
+ -e 's/\\$// /g' $< | \
$(GOMD2MAN) -in /dev/stdin -out $(subst source/markdown,build/man,$@)
.PHONY: docdir
diff --git a/README.md b/README.md
index 829d50eb2..db6fc3b5d 100644
--- a/README.md
+++ b/README.md
@@ -5,7 +5,7 @@
Podman (the POD MANager) is a tool for managing containers and images, volumes mounted into those containers, and pods made from groups of containers.
Podman is based on libpod, a library for container lifecycle management that is also contained in this repository. The libpod library provides APIs for managing containers, pods, container images, and volumes.
-* [Latest Version: 3.1.0](https://github.com/containers/podman/releases/latest)
+* [Latest Version: 3.2.0](https://github.com/containers/podman/releases/latest)
* Latest Remote client for Windows
* Latest Remote client for MacOs
* Latest Static Remote client for Linux
@@ -42,7 +42,7 @@ If you think you've identified a security issue in the project, please *DO NOT*
Instead, send an email with as many details as possible to `security@lists.podman.io`. This is a private mailing list for the core maintainers.
For general questions and discussion, please use the
-IRC `#podman` channel on `irc.freenode.net`.
+IRC `#podman` channel on `irc.libera.chat`.
For discussions around issues/bugs and features, you can use the GitHub
[issues](https://github.com/containers/podman/issues)
diff --git a/RELEASE_NOTES.md b/RELEASE_NOTES.md
index 5ba5e251b..c6efff5dd 100644
--- a/RELEASE_NOTES.md
+++ b/RELEASE_NOTES.md
@@ -1,5 +1,130 @@
# Release Notes
+## 3.2.0
+### Features
+- Docker Compose is now supported with rootless Podman ([#9169](https://github.com/containers/podman/issues/9169)).
+- The `podman network connect`, `podman network disconnect`, and `podman network reload` commands have been enabled for rootless Podman.
+- An experimental new set of commands, `podman machine`, was added to assist in managing virtual machines containing a Podman server. These are intended for easing the use of Podman on OS X by handling the creation of a Linux VM for running Podman.
+- The `podman generate kube` command can now be run on Podman named volumes (generating `PersistentVolumeClaim` YAML), in addition to pods and containers.
+- The `podman play kube` command now supports two new options, `--ip` and `--mac`, to set static IPs and MAC addresses for created pods ([#8442](https://github.com/containers/podman/issues/8442) and [#9731](https://github.com/containers/podman/issues/9731)).
+- The `podman play kube` command's support for `PersistentVolumeClaim` YAML has been greatly improved.
+- The `podman generate kube` command now preserves the label used by `podman auto-update` to identify containers to update as a Kubernetes annotation, and the `podman play kube` command will convert this annotation back into a label. This allows `podman auto-update` to be used with containers created by `podman play kube`.
+- The `podman play kube` command now supports Kubernetes `secretRef` YAML (using the secrets support from `podman secret`) for environment variables.
+- Secrets can now be added to containers as environment variables using the `type=env` option to the `--secret` flag to `podman create` and `podman run`.
+- The `podman start` command now supports the `--all` option, allowing all containers to be started simultaneously with a single command. The `--filter` option has also been added to filter which containers to start when `--all` is used.
+- Filtering containers with the `--filter` option to `podman ps` and `podman start` now supports a new filter, `restart-policy`, to filter containers based on their restart policy.
+- The `--group-add` option to rootless `podman run` and `podman create` now accepts a new value, `keep-groups`, which instructs Podman to retain the supplemental groups of the user running Podman in the created container. This is only supported with the `crun` OCI runtime.
+- The `podman run` and `podman create` commands now support a new option, `--timeout`. This sets a maximum time the container is allowed to run, after which it is killed ([#6412](https://github.com/containers/podman/issues/6412)).
+- The `podman run` and `podman create` commands now support a new option, `--pidfile`. This will create a file when the container is started containing the PID of the first process in the container.
+- The `podman run` and `podman create` commands now support a new option, `--requires`. The `--requires` option adds dependency containers - containers that must be running before the current container. Commands like `podman start` will automatically start the requirements of a container before starting the container itself.
+- Auto-updating containers can now be done with locally-built images, not just images hosted on a registry, by creating containers with the `io.containers.autoupdate` label set to `local`.
+- Podman now supports the [Container Device Interface](https://github.com/container-orchestrated-devices/container-device-interface) (CDI) standard.
+- Podman now adds an entry to `/etc/hosts`, `host.containers.internal`, pointing to the current gateway (which, for root containers, is usually a bridge interface on the host system) ([#5651](https://github.com/containers/podman/issues/5651)).
+- The `podman ps`, `podman pod ps`, `podman network list`, `podman secret list`, and `podman volume list` commands now support a `--noheading` option, which will cause Podman to omit the heading line including column names.
+- The `podman unshare` command now supports a new flag, `--rootless-cni`, to join the rootless network namespace. This allows commands to be run in the same network environment as rootless containers with CNI networking.
+- The `--security-opt unmask=` option to `podman run` and `podman create` now supports glob operations to unmask a group of paths at once (e.g. `podman run --security-opt unmask=/proc/* ...` will unmask all paths in `/proc` in the container).
+- The `podman network prune` command now supports a `--filter` option to filter which networks will be pruned.
+
+### Changes
+- The change in Podman 3.1.2 where the `:z` and `:Z` mount options for volumes were ignored for privileged containers has been reverted after discussion in [#10209](https://github.com/containers/podman/issues/10209).
+- Podman's rootless CNI functionality no longer requires a sidecar container! The removal of the requirement for the `rootless-cni-infra` container means that rootless CNI is now usable on all architectures, not just AMD64, and no longer requires pulling an image ([#8709](https://github.com/containers/podman/issues/8709)).
+- The Image handling code used by Podman has seen a major rewrite to improve code sharing with our other projects, Buildah and CRI-O. This should result in fewer bugs and performance gains in the long term. Work on this is still ongoing.
+- The `podman auto-update` command now prunes previous versions of images after updating if they are unused, to prevent disk exhaustion after repeated updates ([#10190](https://github.com/containers/podman/issues/10190)).
+- The `podman play kube` now treats environment variables configured as references to a `ConfigMap` as mandatory unless the `optional` parameter was set; this better matches the behavior of Kubernetes.
+- Podman now supports the `--context=default` flag from Docker as a no-op for compatibility purposes.
+- When Podman is run as root, but without `CAP_SYS_ADMIN` being available, it will run in a user namespace using the same code as rootless Podman (instead of failing outright).
+- The `podman info` command now includes the path of the Seccomp profile Podman is using, available cgroup controllers, and whether Podman is connected to a remote service or running containers locally.
+- Containers created with the `--rm` option now automatically use the `volatile` storage flag when available for their root filesystems, causing them not to write changes to disk as often as they will be removed at completion anyways. This should result in improved performance.
+- The `podman generate systemd --new` command will now include environment variables referenced by the container in generated unit files if the value would be looked up from the system environment.
+- Podman now requires that Conmon v2.0.24 be available.
+
+### Bugfixes
+- Fixed a bug where the remote Podman client's `podman build` command did not support the `--arch`, `--platform`, and `--os`, options.
+- Fixed a bug where the remote Podman client's `podman build` command ignored the `--rm=false` option ([#9869](https://github.com/containers/podman/issues/9869)).
+- Fixed a bug where the remote Podman client's `podman build --iidfile` command could include extra output (in addition to just the image ID) in the image ID file written ([#10233](https://github.com/containers/podman/issues/10233)).
+- Fixed a bug where the remote Podman client's `podman build` command did not preserve hardlinks when moving files into the container via `COPY` instructions ([#9893](https://github.com/containers/podman/issues/9893)).
+- Fixed a bug where the `podman generate systemd --new` command could generate extra `--iidfile` arguments if the container was already created with one.
+- Fixed a bug where the `podman generate systemd --new` command would generate unit files that did not include `RequiresMountsFor` lines ([#10493](https://github.com/containers/podman/issues/10493)).
+- Fixed a bug where the `podman generate kube` command produced incorrect YAML for containers which bind-mounted both `/` and `/root` from the host system into the container ([#9764](https://github.com/containers/podman/issues/9764)).
+- Fixed a bug where pods created by `podman play kube` from YAML that specified `ShareProcessNamespace` would only share the PID namespace (and not also the UTS, Network, and IPC namespaces) ([#9128](https://github.com/containers/podman/issues/9128)).
+- Fixed a bug where the `podman network reload` command could generate spurious error messages when `iptables-nft` was in use.
+- Fixed a bug where rootless Podman could fail to attach to containers when the user running Podman had a large UID.
+- Fixed a bug where the `podman ps` command could fail with a `no such container` error due to a race condition with container removal ([#10120](https://github.com/containers/podman/issues/10120)).
+- Fixed a bug where containers using the `slirp4netns` network mode and setting a custom `slirp4netns` subnet while using the `rootlesskit` port forwarder would not be able to forward ports ([#9828](https://github.com/containers/podman/issues/9828)).
+- Fixed a bug where the `--filter ancestor=` option to `podman ps` did not require an exact match of the image name/ID to include a container in its results.
+- Fixed a bug where the `--filter until=` option to `podman image prune` would prune images created after the specified time (instead of before).
+- Fixed a bug where setting a custom Seccomp profile via the `seccomp_profile` option in `containers.conf` had no effect, and the default profile was used instead.
+- Fixed a bug where the `--cgroup-parent` option to `podman create` and `podman run` was ignored in rootless Podman on cgroups v2 systems with the `cgroupfs` cgroup manager ([#10173](https://github.com/containers/podman/issues/10173)).
+- Fixed a bug where the `IMAGE` and `NAME` variables in `podman container runlabel` were not being correctly substituted ([#10192](https://github.com/containers/podman/issues/10192)).
+- Fixed a bug where Podman could freeze when creating containers with a specific combination of volumes and working directory ([#10216](https://github.com/containers/podman/issues/10216)).
+- Fixed a bug where rootless Podman containers restarted by restart policy (e.g. containers created with `--restart=always`) would lose networking after being restarted ([#8047](https://github.com/containers/podman/issues/8047)).
+- Fixed a bug where the `podman cp` command could not copy files into containers created with the `--pid=host` flag ([#9985](https://github.com/containers/podman/issues/9985)).
+- Fixed a bug where filters to the `podman events` command could not be specified twice (if a filter is specified more than once, it will match if any of the given values match - logical or) ([#10507](https://github.com/containers/podman/issues/10507)).
+- Fixed a bug where Podman would include IPv6 nameservers in `resolv.conf` in containers without IPv6 connectivity ([#10158](https://github.com/containers/podman/issues/10158)).
+- Fixed a bug where containers could not be created with static IP addresses when connecting to a network using the `macvlan` driver ([#10283](https://github.com/containers/podman/issues/10283)).
+
+### API
+- Fixed a bug where the Compat Create endpoint for Containers did not allow advanced network options to be set ([#10110](https://github.com/containers/podman/issues/10110)).
+- Fixed a bug where the Compat Create endpoint for Containers ignored static IP information provided in the `IPAMConfig` block ([#10245](https://github.com/containers/podman/issues/10245)).
+- Fixed a bug where the Compat Inspect endpoint for Containers returned null (instead of an empty list) for Networks when the container was not joined to a CNI network ([#9837](https://github.com/containers/podman/issues/9837)).
+- Fixed a bug where the Compat Wait endpoint for Containers could miss containers exiting if they were immediately restarted.
+- Fixed a bug where the Compat Create endpoint for Volumes required that the user provide a name for the new volume ([#9803](https://github.com/containers/podman/issues/9803)).
+- Fixed a bug where the Libpod Info handler would sometimes not return the correct path to the Podman API socket.
+- Fixed a bug where the Compat Events handler used the wrong name for container exited events (`died` instead of `die`) ([#10168](https://github.com/containers/podman/issues/10168)).
+- Fixed a bug where the Compat Push endpoint for Images could leak goroutines if the remote end closed the connection prematurely.
+
+### Misc
+- Updated Buildah to v1.21.0
+- Updated the containers/common library to v0.38.5
+- Updated the containers/storage library to v1.31.3
+
+## 3.1.2
+### Bugfixes
+- The Compat Export endpoint for Images now supports exporting multiple images at the same time to a single archive.
+- Fixed a bug where images with empty layers were stored incorrectly, causing them to be unable to be pushed or saved.
+- Fixed a bug where the `podman rmi` command could fail to remove corrupt images from storage.
+- Fixed a bug where the remote Podman client's `podman save` command did not support the `oci-dir` and `docker-dir` formats ([#9742](https://github.com/containers/podman/issues/9742)).
+- Fixed a bug where volume mounts from `podman play kube` created with a trailing `/` in the container path were were not properly superceding named volumes from the image ([#9618](https://github.com/containers/podman/issues/9618)).
+- Fixed a bug where Podman could fail to build on 32-bit architectures.
+
+### Misc
+- Updated the containers/image library to v5.11.1
+
+## 3.1.1
+### Changes
+- Podman now recognizes `trace` as a valid argument to the `--log-level` command. Trace logging is now the most verbose level of logging available.
+- The `:z` and `:Z` options for volume mounts are now ignored when the container is privileged or is run with SELinux isolation disabled (`--security-opt label=disable`). This matches better matches Docker's behavior in this case.
+
+### Bugfixes
+- Fixed a bug where pruning images with the `podman image prune` or `podman system prune` commands could cause Podman to panic.
+- Fixed a bug where the `podman save` command did not properly error when the `--compress` flag was used with incompatible format types.
+- Fixed a bug where the `--security-opt` and `--ulimit` options to the remote Podman client's `podman build` command were nonfunctional.
+- Fixed a bug where the `--log-rusage` option to the remote Podman client's `podman build` command was nonfunctional ([#9489](https://github.com/containers/podman/issues/9889)).
+- Fixed a bug where the `podman build` command could, in some circumstances, use the wrong OCI runtime ([#9459](https://github.com/containers/podman/issues/9459)).
+- Fixed a bug where the remote Podman client's `podman build` command could return 0 despite failing ([#10029](https://github.com/containers/podman/issues/10029)).
+- Fixed a bug where the `podman container runlabel` command did not properly expand the `IMAGE` and `NAME` variables in the label ([#9405](https://github.com/containers/podman/issues/9405)).
+- Fixed a bug where poststop OCI hooks would be executed twice on containers started with the `--rm` argument ([#9983](https://github.com/containers/podman/issues/9983)).
+- Fixed a bug where rootless Podman could fail to launch containers on cgroups v2 systems when the `cgroupfs` cgroup manager was in use.
+- Fixed a bug where the `podman stats` command could error when statistics tracked exceeded the maximum size of a 32-bit signed integer ([#9979](https://github.com/containers/podman/issues/9979)).
+- Fixed a bug where rootless Podman containers run with `--userns=keepid` (without a `--user` flag in addition) would grant exec sessions run in them too many capabilities ([#9919](https://github.com/containers/podman/issues/9919)).
+- Fixed a bug where the `--authfile` option to `podman build` did not validate that the path given existed ([#9572](https://github.com/containers/podman/issues/9572)).
+- Fixed a bug where the `--storage-opt` option to Podman was appending to, instead of overriding (as is documented), the default storage options.
+- Fixed a bug where the `podman system service` connection did not function properly when run in a socket-activated systemd unit file as a non-root user.
+- Fixed a bug where the `--network` option to the `podman play kube` command of the remote Podman client was being ignored ([#9698](https://github.com/containers/podman/issues/9698)).
+- Fixed a bug where the `--log-driver` option to the `podman play kube` command was nonfunctional ([#10015](https://github.com/containers/podman/issues/10015)).
+
+### API
+- Fixed a bug where the Libpod Create endpoint for Manifests did not properly validate the image the manifest was being created with.
+- Fixed a bug where the Libpod DF endpoint could, in error cases, append an extra null to the JSON response, causing decode errors.
+- Fixed a bug where the Libpod and Compat Top endpoint for Containers would return process names that included extra whitespace.
+- Fixed a bug where the Compat Prune endpoint for Containers accepted too many types of filter.
+
+### Misc
+- Updated Buildah to v1.20.1
+- Updated the containers/storage library to v1.29.0
+- Updated the containers/image library to v5.11.0
+- Updated the containers/common library to v0.36.0
+
## 3.1.0
### Features
- A set of new commands has been added to manage secrets! The `podman secret create`, `podman secret inspect`, `podman secret ls` and `podman secret rm` commands have been added to handle secrets, along with the `--secret` option to `podman run` and `podman create` to add secrets to containers. The initial driver for secrets does not support encryption - this will be added in a future release.
diff --git a/cmd/podman/README.md b/cmd/podman/README.md
index 260c9bcfc..f2aa3f018 100644
--- a/cmd/podman/README.md
+++ b/cmd/podman/README.md
@@ -40,9 +40,6 @@ var (
func init() {
// Subscribe command to podman
registry.Commands = append(registry.Commands, registry.CliCommand{
- // _podman manifest_ will support both ABIMode and TunnelMode
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
- // The definition for this command
Command: manifestCmd,
})
}
@@ -83,9 +80,6 @@ var (
func init() {
// Subscribe inspect sub command to manifest command
registry.Commands = append(registry.Commands, registry.CliCommand{
- // _podman manifest inspect_ will support both ABIMode and TunnelMode
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
- // The definition for this command
Command: inspectCmd,
// The parent command to proceed this command on the CLI
Parent: manifestCmd,
diff --git a/cmd/podman/auto-update.go b/cmd/podman/auto-update.go
index a6d990b0c..99226790f 100644
--- a/cmd/podman/auto-update.go
+++ b/cmd/podman/auto-update.go
@@ -21,6 +21,7 @@ var (
or similar units that create new containers in order to run the updated images.
Please refer to the podman-auto-update(1) man page for details.`
autoUpdateCommand = &cobra.Command{
+ Annotations: map[string]string{registry.EngineMode: registry.ABIMode},
Use: "auto-update [options]",
Short: "Auto update containers according to their auto-update policy",
Long: autoUpdateDescription,
@@ -33,7 +34,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode},
Command: autoUpdateCommand,
})
diff --git a/cmd/podman/common/completion.go b/cmd/podman/common/completion.go
index de5b2995a..c93f2017c 100644
--- a/cmd/podman/common/completion.go
+++ b/cmd/podman/common/completion.go
@@ -1211,3 +1211,10 @@ func AutocompleteVolumeFilters(cmd *cobra.Command, args []string, toComplete str
}
return completeKeyValues(toComplete, kv)
}
+
+// AutocompleteCheckpointCompressType - Autocomplete checkpoint compress type options.
+// -> "gzip", "none", "zstd"
+func AutocompleteCheckpointCompressType(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ types := []string{"gzip", "none", "zstd"}
+ return types, cobra.ShellCompDirectiveNoFileComp
+}
diff --git a/cmd/podman/common/create_opts.go b/cmd/podman/common/create_opts.go
index 77ac781a5..66778f519 100644
--- a/cmd/podman/common/create_opts.go
+++ b/cmd/podman/common/create_opts.go
@@ -8,6 +8,7 @@ import (
"strconv"
"strings"
+ "github.com/containers/common/pkg/config"
"github.com/containers/podman/v3/cmd/podman/registry"
"github.com/containers/podman/v3/pkg/api/handlers"
"github.com/containers/podman/v3/pkg/cgroups"
@@ -140,7 +141,7 @@ func stringMaptoArray(m map[string]string) []string {
// ContainerCreateToContainerCLIOpts converts a compat input struct to cliopts so it can be converted to
// a specgen spec.
-func ContainerCreateToContainerCLIOpts(cc handlers.CreateContainerConfig, cgroupsManager string) (*ContainerCLIOpts, []string, error) {
+func ContainerCreateToContainerCLIOpts(cc handlers.CreateContainerConfig, rtc *config.Config) (*ContainerCLIOpts, []string, error) {
var (
capAdd []string
cappDrop []string
@@ -248,7 +249,7 @@ func ContainerCreateToContainerCLIOpts(cc handlers.CreateContainerConfig, cgroup
}
// netMode
- nsmode, _, err := specgen.ParseNetworkNamespace(string(cc.HostConfig.NetworkMode))
+ nsmode, networks, err := specgen.ParseNetworkNamespace(string(cc.HostConfig.NetworkMode), true)
if err != nil {
return nil, nil, err
}
@@ -321,7 +322,7 @@ func ContainerCreateToContainerCLIOpts(cc handlers.CreateContainerConfig, cgroup
netInfo.Aliases = aliases
netInfo.CNINetworks = cniNetworks
case len(cc.HostConfig.NetworkMode) > 0:
- netInfo.CNINetworks = []string{string(cc.HostConfig.NetworkMode)}
+ netInfo.CNINetworks = networks
}
parsedTmp := make([]string, 0, len(cc.HostConfig.Tmpfs))
@@ -507,7 +508,7 @@ func ContainerCreateToContainerCLIOpts(cc handlers.CreateContainerConfig, cgroup
cliOpts.Restart = policy
}
- if cc.HostConfig.MemorySwappiness != nil && (!rootless.IsRootless() || rootless.IsRootless() && cgroupsv2 && cgroupsManager == "systemd") {
+ if cc.HostConfig.MemorySwappiness != nil && (!rootless.IsRootless() || rootless.IsRootless() && cgroupsv2 && rtc.Engine.CgroupManager == "systemd") {
cliOpts.MemorySwappiness = *cc.HostConfig.MemorySwappiness
} else {
cliOpts.MemorySwappiness = -1
diff --git a/cmd/podman/common/netflags.go b/cmd/podman/common/netflags.go
index 4d0a554a6..78cfe2f13 100644
--- a/cmd/podman/common/netflags.go
+++ b/cmd/podman/common/netflags.go
@@ -85,7 +85,10 @@ func DefineNetFlags(cmd *cobra.Command) {
)
}
-func NetFlagsToNetOptions(cmd *cobra.Command) (*entities.NetOptions, error) {
+// NetFlagsToNetOptions parses the network flags for the given cmd.
+// The netnsFromConfig bool is used to indicate if the --network flag
+// should always be parsed regardless if it was set on the cli.
+func NetFlagsToNetOptions(cmd *cobra.Command, netnsFromConfig bool) (*entities.NetOptions, error) {
var (
err error
)
@@ -167,7 +170,7 @@ func NetFlagsToNetOptions(cmd *cobra.Command) (*entities.NetOptions, error) {
return nil, err
}
if len(inputPorts) > 0 {
- opts.PublishPorts, err = createPortBindings(inputPorts)
+ opts.PublishPorts, err = CreatePortBindings(inputPorts)
if err != nil {
return nil, err
}
@@ -193,7 +196,9 @@ func NetFlagsToNetOptions(cmd *cobra.Command) (*entities.NetOptions, error) {
return nil, err
}
- if cmd.Flags().Changed("network") {
+ // parse the --network value only when the flag is set or we need to use
+ // the netns config value, e.g. when --pod is not used
+ if netnsFromConfig || cmd.Flag("network").Changed {
network, err := cmd.Flags().GetString("network")
if err != nil {
return nil, err
@@ -201,7 +206,7 @@ func NetFlagsToNetOptions(cmd *cobra.Command) (*entities.NetOptions, error) {
parts := strings.SplitN(network, ":", 2)
- ns, cniNets, err := specgen.ParseNetworkNamespace(network)
+ ns, cniNets, err := specgen.ParseNetworkNamespace(network, containerConfig.Containers.RootlessNetworking == "cni")
if err != nil {
return nil, err
}
diff --git a/cmd/podman/common/util.go b/cmd/podman/common/util.go
index afee55914..6a0af4dff 100644
--- a/cmd/podman/common/util.go
+++ b/cmd/podman/common/util.go
@@ -89,8 +89,8 @@ func createExpose(expose []string) (map[uint16]string, error) {
return toReturn, nil
}
-// createPortBindings iterates ports mappings into SpecGen format.
-func createPortBindings(ports []string) ([]specgen.PortMapping, error) {
+// CreatePortBindings iterates ports mappings into SpecGen format.
+func CreatePortBindings(ports []string) ([]specgen.PortMapping, error) {
// --publish is formatted as follows:
// [[hostip:]hostport[-endPort]:]containerport[-endPort][/protocol]
toReturn := make([]specgen.PortMapping, 0, len(ports))
diff --git a/cmd/podman/completion/completion.go b/cmd/podman/completion/completion.go
index 472068130..da74eefdd 100644
--- a/cmd/podman/completion/completion.go
+++ b/cmd/podman/completion/completion.go
@@ -8,7 +8,6 @@ import (
commonComp "github.com/containers/common/pkg/completion"
"github.com/containers/podman/v3/cmd/podman/registry"
- "github.com/containers/podman/v3/pkg/domain/entities"
"github.com/spf13/cobra"
)
@@ -39,7 +38,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: completionCmd,
})
flags := completionCmd.Flags()
diff --git a/cmd/podman/containers/attach.go b/cmd/podman/containers/attach.go
index dae1126a9..acfa0357b 100644
--- a/cmd/podman/containers/attach.go
+++ b/cmd/podman/containers/attach.go
@@ -55,14 +55,12 @@ func attachFlags(cmd *cobra.Command) {
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: attachCommand,
})
attachFlags(attachCommand)
validate.AddLatestFlag(attachCommand, &attachOpts.Latest)
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: containerAttachCommand,
Parent: containerCmd,
})
diff --git a/cmd/podman/containers/checkpoint.go b/cmd/podman/containers/checkpoint.go
index 25d0eb510..4fa72d520 100644
--- a/cmd/podman/containers/checkpoint.go
+++ b/cmd/podman/containers/checkpoint.go
@@ -3,6 +3,7 @@ package containers
import (
"context"
"fmt"
+ "strings"
"github.com/containers/common/pkg/completion"
"github.com/containers/podman/v3/cmd/podman/common"
@@ -11,6 +12,7 @@ import (
"github.com/containers/podman/v3/cmd/podman/validate"
"github.com/containers/podman/v3/pkg/domain/entities"
"github.com/containers/podman/v3/pkg/rootless"
+ "github.com/containers/storage/pkg/archive"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
@@ -36,13 +38,10 @@ var (
}
)
-var (
- checkpointOptions entities.CheckpointOptions
-)
+var checkpointOptions entities.CheckpointOptions
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: checkpointCommand,
Parent: containerCmd,
})
@@ -61,11 +60,32 @@ func init() {
flags.BoolVarP(&checkpointOptions.PreCheckPoint, "pre-checkpoint", "P", false, "Dump container's memory information only, leave the container running")
flags.BoolVar(&checkpointOptions.WithPrevious, "with-previous", false, "Checkpoint container with pre-checkpoint images")
+ flags.StringP("compress", "c", "zstd", "Select compression algorithm (gzip, none, zstd) for checkpoint archive.")
+ _ = checkpointCommand.RegisterFlagCompletionFunc("compress", common.AutocompleteCheckpointCompressType)
+
validate.AddLatestFlag(checkpointCommand, &checkpointOptions.Latest)
}
func checkpoint(cmd *cobra.Command, args []string) error {
var errs utils.OutputErrors
+ if cmd.Flags().Changed("compress") {
+ if checkpointOptions.Export == "" {
+ return errors.Errorf("--compress can only be used with --export")
+ }
+ compress, _ := cmd.Flags().GetString("compress")
+ switch strings.ToLower(compress) {
+ case "none":
+ checkpointOptions.Compression = archive.Uncompressed
+ case "gzip":
+ checkpointOptions.Compression = archive.Gzip
+ case "zstd":
+ checkpointOptions.Compression = archive.Zstd
+ default:
+ return errors.Errorf("Selected compression algorithm (%q) not supported. Please select one from: gzip, none, zstd", compress)
+ }
+ } else {
+ checkpointOptions.Compression = archive.Zstd
+ }
if rootless.IsRootless() {
return errors.New("checkpointing a container requires root")
}
diff --git a/cmd/podman/containers/cleanup.go b/cmd/podman/containers/cleanup.go
index 1f87725e9..98706c575 100644
--- a/cmd/podman/containers/cleanup.go
+++ b/cmd/podman/containers/cleanup.go
@@ -21,10 +21,11 @@ var (
Cleans up mount points and network stacks on one or more containers from the host. The container name or ID can be used. This command is used internally when running containers, but can also be used if container cleanup has failed when a container exits.
`
cleanupCommand = &cobra.Command{
- Use: "cleanup [options] CONTAINER [CONTAINER...]",
- Short: "Cleanup network and mountpoints of one or more containers",
- Long: cleanupDescription,
- RunE: cleanup,
+ Annotations: map[string]string{registry.EngineMode: registry.ABIMode},
+ Use: "cleanup [options] CONTAINER [CONTAINER...]",
+ Short: "Cleanup network and mountpoints of one or more containers",
+ Long: cleanupDescription,
+ RunE: cleanup,
Args: func(cmd *cobra.Command, args []string) error {
return validate.CheckAllLatestAndCIDFile(cmd, args, false, false)
},
@@ -41,7 +42,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode},
Parent: containerCmd,
Command: cleanupCommand,
})
diff --git a/cmd/podman/containers/commit.go b/cmd/podman/containers/commit.go
index 168ded356..f74f12851 100644
--- a/cmd/podman/containers/commit.go
+++ b/cmd/podman/containers/commit.go
@@ -82,13 +82,11 @@ func commitFlags(cmd *cobra.Command) {
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: commitCommand,
})
commitFlags(commitCommand)
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: containerCommitCommand,
Parent: containerCmd,
})
diff --git a/cmd/podman/containers/container.go b/cmd/podman/containers/container.go
index c78668611..14e030bf4 100644
--- a/cmd/podman/containers/container.go
+++ b/cmd/podman/containers/container.go
@@ -3,7 +3,6 @@ package containers
import (
"github.com/containers/podman/v3/cmd/podman/registry"
"github.com/containers/podman/v3/cmd/podman/validate"
- "github.com/containers/podman/v3/pkg/domain/entities"
"github.com/containers/podman/v3/pkg/util"
"github.com/spf13/cobra"
)
@@ -26,7 +25,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: containerCmd,
})
}
diff --git a/cmd/podman/containers/cp.go b/cmd/podman/containers/cp.go
index 27aacc6e5..2c7d72b20 100644
--- a/cmd/podman/containers/cp.go
+++ b/cmd/podman/containers/cp.go
@@ -62,13 +62,11 @@ func cpFlags(cmd *cobra.Command) {
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: cpCommand,
})
cpFlags(cpCommand)
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: containerCpCommand,
Parent: containerCmd,
})
diff --git a/cmd/podman/containers/create.go b/cmd/podman/containers/create.go
index f06869c4e..68a17abd0 100644
--- a/cmd/podman/containers/create.go
+++ b/cmd/podman/containers/create.go
@@ -39,7 +39,7 @@ var (
}
containerCreateCommand = &cobra.Command{
- Args: cobra.MinimumNArgs(1),
+ Args: createCommand.Args,
Use: createCommand.Use,
Short: createCommand.Short,
Long: createCommand.Long,
@@ -72,13 +72,11 @@ func createFlags(cmd *cobra.Command) {
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: createCommand,
})
createFlags(createCommand)
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: containerCreateCommand,
Parent: containerCmd,
})
@@ -89,7 +87,7 @@ func create(cmd *cobra.Command, args []string) error {
var (
err error
)
- cliVals.Net, err = common.NetFlagsToNetOptions(cmd)
+ cliVals.Net, err = common.NetFlagsToNetOptions(cmd, cliVals.Pod == "")
if err != nil {
return err
}
diff --git a/cmd/podman/containers/diff.go b/cmd/podman/containers/diff.go
index 799d01127..0eee85825 100644
--- a/cmd/podman/containers/diff.go
+++ b/cmd/podman/containers/diff.go
@@ -27,7 +27,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: diffCmd,
Parent: containerCmd,
})
diff --git a/cmd/podman/containers/exec.go b/cmd/podman/containers/exec.go
index 15ac2b164..5d0da1514 100644
--- a/cmd/podman/containers/exec.go
+++ b/cmd/podman/containers/exec.go
@@ -21,24 +21,22 @@ var (
execDescription = `Execute the specified command inside a running container.
`
execCommand = &cobra.Command{
- Use: "exec [options] CONTAINER [COMMAND [ARG...]]",
- Short: "Run a process in a running container",
- Long: execDescription,
- RunE: exec,
- DisableFlagsInUseLine: true,
- ValidArgsFunction: common.AutocompleteExecCommand,
+ Use: "exec [options] CONTAINER [COMMAND [ARG...]]",
+ Short: "Run a process in a running container",
+ Long: execDescription,
+ RunE: exec,
+ ValidArgsFunction: common.AutocompleteExecCommand,
Example: `podman exec -it ctrID ls
podman exec -it -w /tmp myCtr pwd
podman exec --user root ctrID ls`,
}
containerExecCommand = &cobra.Command{
- Use: execCommand.Use,
- Short: execCommand.Short,
- Long: execCommand.Long,
- RunE: execCommand.RunE,
- DisableFlagsInUseLine: true,
- ValidArgsFunction: execCommand.ValidArgsFunction,
+ Use: execCommand.Use,
+ Short: execCommand.Short,
+ Long: execCommand.Long,
+ RunE: execCommand.RunE,
+ ValidArgsFunction: execCommand.ValidArgsFunction,
Example: `podman container exec -it ctrID ls
podman container exec -it -w /tmp myCtr pwd
podman container exec --user root ctrID ls`,
@@ -92,14 +90,12 @@ func execFlags(cmd *cobra.Command) {
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: execCommand,
})
execFlags(execCommand)
validate.AddLatestFlag(execCommand, &execOpts.Latest)
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: containerExecCommand,
Parent: containerCmd,
})
diff --git a/cmd/podman/containers/exists.go b/cmd/podman/containers/exists.go
index c21b30a4b..cf7ac4c5d 100644
--- a/cmd/podman/containers/exists.go
+++ b/cmd/podman/containers/exists.go
@@ -18,16 +18,14 @@ var (
Long: containerExistsDescription,
Example: `podman container exists --external containerID
podman container exists myctr || podman run --name myctr [etc...]`,
- RunE: exists,
- Args: cobra.ExactArgs(1),
- DisableFlagsInUseLine: true,
- ValidArgsFunction: common.AutocompleteContainers,
+ RunE: exists,
+ Args: cobra.ExactArgs(1),
+ ValidArgsFunction: common.AutocompleteContainers,
}
)
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: existsCommand,
Parent: containerCmd,
})
diff --git a/cmd/podman/containers/export.go b/cmd/podman/containers/export.go
index 3ea55415c..e031578b8 100644
--- a/cmd/podman/containers/export.go
+++ b/cmd/podman/containers/export.go
@@ -55,13 +55,11 @@ func exportFlags(cmd *cobra.Command) {
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: exportCommand,
})
exportFlags(exportCommand)
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: containerExportCommand,
Parent: containerCmd,
})
diff --git a/cmd/podman/containers/init.go b/cmd/podman/containers/init.go
index e874b9cb0..e68217478 100644
--- a/cmd/podman/containers/init.go
+++ b/cmd/podman/containers/init.go
@@ -52,7 +52,6 @@ func initFlags(flags *pflag.FlagSet) {
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: initCommand,
})
flags := initCommand.Flags()
@@ -60,7 +59,6 @@ func init() {
validate.AddLatestFlag(initCommand, &initOptions.Latest)
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Parent: containerCmd,
Command: containerInitCommand,
})
diff --git a/cmd/podman/containers/inspect.go b/cmd/podman/containers/inspect.go
index eb29b7285..5a8ccc628 100644
--- a/cmd/podman/containers/inspect.go
+++ b/cmd/podman/containers/inspect.go
@@ -26,7 +26,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: inspectCmd,
Parent: containerCmd,
})
diff --git a/cmd/podman/containers/kill.go b/cmd/podman/containers/kill.go
index aa36b51fd..449484449 100644
--- a/cmd/podman/containers/kill.go
+++ b/cmd/podman/containers/kill.go
@@ -67,14 +67,12 @@ func killFlags(cmd *cobra.Command) {
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: killCommand,
})
killFlags(killCommand)
validate.AddLatestFlag(killCommand, &killOptions.Latest)
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: containerKillCommand,
Parent: containerCmd,
})
diff --git a/cmd/podman/containers/list.go b/cmd/podman/containers/list.go
index 4a903f5ea..203f9560e 100644
--- a/cmd/podman/containers/list.go
+++ b/cmd/podman/containers/list.go
@@ -4,7 +4,6 @@ import (
"github.com/containers/common/pkg/completion"
"github.com/containers/podman/v3/cmd/podman/registry"
"github.com/containers/podman/v3/cmd/podman/validate"
- "github.com/containers/podman/v3/pkg/domain/entities"
"github.com/spf13/cobra"
)
@@ -26,7 +25,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: listCmd,
Parent: containerCmd,
})
diff --git a/cmd/podman/containers/logs.go b/cmd/podman/containers/logs.go
index 6275106cf..0d745291e 100644
--- a/cmd/podman/containers/logs.go
+++ b/cmd/podman/containers/logs.go
@@ -77,7 +77,6 @@ func init() {
// logs
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: logsCommand,
})
logsFlags(logsCommand)
@@ -85,7 +84,6 @@ func init() {
// container logs
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: containerLogsCommand,
Parent: containerCmd,
})
diff --git a/cmd/podman/containers/mount.go b/cmd/podman/containers/mount.go
index fd5a279d2..8a7c542ba 100644
--- a/cmd/podman/containers/mount.go
+++ b/cmd/podman/containers/mount.go
@@ -25,6 +25,11 @@ var (
`
mountCommand = &cobra.Command{
+ Annotations: map[string]string{
+ registry.UnshareNSRequired: "",
+ registry.ParentNSRequired: "",
+ registry.EngineMode: registry.ABIMode,
+ },
Use: "mount [options] [CONTAINER...]",
Short: "Mount a working container's root filesystem",
Long: mountDescription,
@@ -32,20 +37,16 @@ var (
Args: func(cmd *cobra.Command, args []string) error {
return validate.CheckAllLatestAndCIDFile(cmd, args, true, false)
},
- Annotations: map[string]string{
- registry.UnshareNSRequired: "",
- registry.ParentNSRequired: "",
- },
ValidArgsFunction: common.AutocompleteContainers,
}
containerMountCommand = &cobra.Command{
+ Annotations: mountCommand.Annotations,
Use: mountCommand.Use,
Short: mountCommand.Short,
Long: mountCommand.Long,
RunE: mountCommand.RunE,
Args: mountCommand.Args,
- Annotations: mountCommand.Annotations,
ValidArgsFunction: mountCommand.ValidArgsFunction,
}
)
@@ -68,14 +69,12 @@ func mountFlags(cmd *cobra.Command) {
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode},
Command: mountCommand,
})
mountFlags(mountCommand)
validate.AddLatestFlag(mountCommand, &mountOpts.Latest)
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode},
Command: containerMountCommand,
Parent: containerCmd,
})
diff --git a/cmd/podman/containers/pause.go b/cmd/podman/containers/pause.go
index a1ac00bfb..8e1b69d71 100644
--- a/cmd/podman/containers/pause.go
+++ b/cmd/podman/containers/pause.go
@@ -48,14 +48,12 @@ func pauseFlags(flags *pflag.FlagSet) {
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: pauseCommand,
})
flags := pauseCommand.Flags()
pauseFlags(flags)
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: containerPauseCommand,
Parent: containerCmd,
})
diff --git a/cmd/podman/containers/port.go b/cmd/podman/containers/port.go
index 7c9565c26..db66fc9a0 100644
--- a/cmd/podman/containers/port.go
+++ b/cmd/podman/containers/port.go
@@ -56,14 +56,12 @@ func portFlags(flags *pflag.FlagSet) {
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: portCommand,
})
portFlags(portCommand.Flags())
validate.AddLatestFlag(portCommand, &portOpts.Latest)
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: containerPortCommand,
Parent: containerCmd,
})
diff --git a/cmd/podman/containers/prune.go b/cmd/podman/containers/prune.go
index 94da029b9..e55bd8a53 100644
--- a/cmd/podman/containers/prune.go
+++ b/cmd/podman/containers/prune.go
@@ -35,7 +35,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: pruneCommand,
Parent: containerCmd,
})
diff --git a/cmd/podman/containers/ps.go b/cmd/podman/containers/ps.go
index 3c0162676..352f67f4e 100644
--- a/cmd/podman/containers/ps.go
+++ b/cmd/podman/containers/ps.go
@@ -60,14 +60,12 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: psCommand,
})
listFlagSet(psCommand)
validate.AddLatestFlag(psCommand, &listOpts.Latest)
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: psContainerCommand,
Parent: containerCmd,
})
diff --git a/cmd/podman/containers/rename.go b/cmd/podman/containers/rename.go
index 0f4a1dde1..1371e50e0 100644
--- a/cmd/podman/containers/rename.go
+++ b/cmd/podman/containers/rename.go
@@ -33,12 +33,10 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: renameCommand,
})
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: containerRenameCommand,
Parent: containerCmd,
})
diff --git a/cmd/podman/containers/restart.go b/cmd/podman/containers/restart.go
index 80ffff700..41b33258f 100644
--- a/cmd/podman/containers/restart.go
+++ b/cmd/podman/containers/restart.go
@@ -39,6 +39,7 @@ var (
Short: restartCommand.Short,
Long: restartCommand.Long,
RunE: restartCommand.RunE,
+ Args: restartCommand.Args,
ValidArgsFunction: restartCommand.ValidArgsFunction,
Example: `podman container restart ctrID
podman container restart --latest
@@ -66,14 +67,12 @@ func restartFlags(cmd *cobra.Command) {
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: restartCommand,
})
restartFlags(restartCommand)
validate.AddLatestFlag(restartCommand, &restartOptions.Latest)
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: containerRestartCommand,
Parent: containerCmd,
})
diff --git a/cmd/podman/containers/restore.go b/cmd/podman/containers/restore.go
index ad530e308..b908ea493 100644
--- a/cmd/podman/containers/restore.go
+++ b/cmd/podman/containers/restore.go
@@ -36,13 +36,10 @@ var (
}
)
-var (
- restoreOptions entities.RestoreOptions
-)
+var restoreOptions entities.RestoreOptions
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: restoreCommand,
Parent: containerCmd,
})
@@ -67,10 +64,17 @@ func init() {
flags.BoolVar(&restoreOptions.IgnoreStaticIP, "ignore-static-ip", false, "Ignore IP address set via --static-ip")
flags.BoolVar(&restoreOptions.IgnoreStaticMAC, "ignore-static-mac", false, "Ignore MAC address set via --mac-address")
flags.BoolVar(&restoreOptions.IgnoreVolumes, "ignore-volumes", false, "Do not export volumes associated with container")
+
+ flags.StringSliceP(
+ "publish", "p", []string{},
+ "Publish a container's port, or a range of ports, to the host (default [])",
+ )
+ _ = restoreCommand.RegisterFlagCompletionFunc("publish", completion.AutocompleteNone)
+
validate.AddLatestFlag(restoreCommand, &restoreOptions.Latest)
}
-func restore(_ *cobra.Command, args []string) error {
+func restore(cmd *cobra.Command, args []string) error {
var errs utils.OutputErrors
if rootless.IsRootless() {
return errors.New("restoring a container requires root")
@@ -91,6 +95,17 @@ func restore(_ *cobra.Command, args []string) error {
return errors.Errorf("--tcp-established cannot be used with --name")
}
+ inputPorts, err := cmd.Flags().GetStringSlice("publish")
+ if err != nil {
+ return err
+ }
+ if len(inputPorts) > 0 {
+ restoreOptions.PublishPorts, err = common.CreatePortBindings(inputPorts)
+ if err != nil {
+ return err
+ }
+ }
+
argLen := len(args)
if restoreOptions.Import != "" {
if restoreOptions.All || restoreOptions.Latest {
diff --git a/cmd/podman/containers/rm.go b/cmd/podman/containers/rm.go
index 5822fc4b7..abab71a79 100644
--- a/cmd/podman/containers/rm.go
+++ b/cmd/podman/containers/rm.go
@@ -38,13 +38,11 @@ var (
}
containerRmCommand = &cobra.Command{
- Use: rmCommand.Use,
- Short: rmCommand.Short,
- Long: rmCommand.Long,
- RunE: rmCommand.RunE,
- Args: func(cmd *cobra.Command, args []string) error {
- return validate.CheckAllLatestAndCIDFile(cmd, args, false, true)
- },
+ Use: rmCommand.Use,
+ Short: rmCommand.Short,
+ Long: rmCommand.Long,
+ RunE: rmCommand.RunE,
+ Args: rmCommand.Args,
ValidArgsFunction: rmCommand.ValidArgsFunction,
Example: `podman container rm imageID
podman container rm mywebserver myflaskserver 860a4b23
@@ -79,14 +77,12 @@ func rmFlags(cmd *cobra.Command) {
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: rmCommand,
})
rmFlags(rmCommand)
validate.AddLatestFlag(rmCommand, &rmOptions.Latest)
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: containerRmCommand,
Parent: containerCmd,
})
diff --git a/cmd/podman/containers/run.go b/cmd/podman/containers/run.go
index 8e27977c0..579af4eb1 100644
--- a/cmd/podman/containers/run.go
+++ b/cmd/podman/containers/run.go
@@ -77,6 +77,11 @@ func runFlags(cmd *cobra.Command) {
flags.StringVar(&runOpts.DetachKeys, detachKeysFlagName, containerConfig.DetachKeys(), "Override the key sequence for detaching a container. Format is a single character `[a-Z]` or a comma separated sequence of `ctrl-<value>`, where `<value>` is one of: `a-cf`, `@`, `^`, `[`, `\\`, `]`, `^` or `_`")
_ = cmd.RegisterFlagCompletionFunc(detachKeysFlagName, common.AutocompleteDetachKeys)
+ gpuFlagName := "gpus"
+ flags.String(gpuFlagName, "", "This is a Docker specific option and is a NOOP")
+ _ = cmd.RegisterFlagCompletionFunc(gpuFlagName, completion.AutocompleteNone)
+ _ = flags.MarkHidden("gpus")
+
if registry.IsRemote() {
_ = flags.MarkHidden("preserve-fds")
_ = flags.MarkHidden("conmon-pidfile")
@@ -86,14 +91,12 @@ func runFlags(cmd *cobra.Command) {
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: runCommand,
})
runFlags(runCommand)
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: containerRunCommand,
Parent: containerCmd,
})
@@ -103,7 +106,7 @@ func init() {
func run(cmd *cobra.Command, args []string) error {
var err error
- cliVals.Net, err = common.NetFlagsToNetOptions(cmd)
+ cliVals.Net, err = common.NetFlagsToNetOptions(cmd, cliVals.Pod == "")
if err != nil {
return err
}
@@ -204,5 +207,8 @@ func run(cmd *cobra.Command, args []string) error {
logrus.Errorf("%s", errorhandling.JoinErrors(rmErrors))
}
}
+ if cmd.Flag("gpus").Changed {
+ logrus.Info("--gpus is a Docker specific option and is a NOOP")
+ }
return nil
}
diff --git a/cmd/podman/containers/runlabel.go b/cmd/podman/containers/runlabel.go
index 75c2557f0..85f3785be 100644
--- a/cmd/podman/containers/runlabel.go
+++ b/cmd/podman/containers/runlabel.go
@@ -25,6 +25,7 @@ var (
runlabelOptions = runlabelOptionsWrapper{}
runlabelDescription = "Executes a command as described by a container image label."
runlabelCommand = &cobra.Command{
+ Annotations: map[string]string{registry.EngineMode: registry.ABIMode},
Use: "runlabel [options] LABEL IMAGE [ARG...]",
Short: "Execute the command described by an image label",
Long: runlabelDescription,
@@ -39,7 +40,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode},
Command: runlabelCommand,
Parent: containerCmd,
})
diff --git a/cmd/podman/containers/start.go b/cmd/podman/containers/start.go
index dcd1eca82..1163b9093 100644
--- a/cmd/podman/containers/start.go
+++ b/cmd/podman/containers/start.go
@@ -70,14 +70,12 @@ func startFlags(cmd *cobra.Command) {
}
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: startCommand,
})
startFlags(startCommand)
validate.AddLatestFlag(startCommand, &startOptions.Latest)
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: containerStartCommand,
Parent: containerCmd,
})
diff --git a/cmd/podman/containers/stats.go b/cmd/podman/containers/stats.go
index 7160f1ba8..568e410d2 100644
--- a/cmd/podman/containers/stats.go
+++ b/cmd/podman/containers/stats.go
@@ -79,14 +79,12 @@ func statFlags(cmd *cobra.Command) {
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: statsCommand,
})
statFlags(statsCommand)
validate.AddLatestFlag(statsCommand, &statsOptions.Latest)
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: containerStatsCommand,
Parent: containerCmd,
})
diff --git a/cmd/podman/containers/stop.go b/cmd/podman/containers/stop.go
index 62ce9b036..13da59a8c 100644
--- a/cmd/podman/containers/stop.go
+++ b/cmd/podman/containers/stop.go
@@ -78,14 +78,12 @@ func stopFlags(cmd *cobra.Command) {
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: stopCommand,
})
stopFlags(stopCommand)
validate.AddLatestFlag(stopCommand, &stopOptions.Latest)
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: containerStopCommand,
Parent: containerCmd,
})
diff --git a/cmd/podman/containers/top.go b/cmd/podman/containers/top.go
index 03cee5d56..3fde2e954 100644
--- a/cmd/podman/containers/top.go
+++ b/cmd/podman/containers/top.go
@@ -58,7 +58,6 @@ func topFlags(flags *pflag.FlagSet) {
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: topCommand,
})
topFlags(topCommand.Flags())
@@ -71,7 +70,6 @@ func init() {
}
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: containerTopCommand,
Parent: containerCmd,
})
diff --git a/cmd/podman/containers/unmount.go b/cmd/podman/containers/unmount.go
index 98bbce581..96470da78 100644
--- a/cmd/podman/containers/unmount.go
+++ b/cmd/podman/containers/unmount.go
@@ -20,11 +20,12 @@ var (
An unmount can be forced with the --force flag.
`
unmountCommand = &cobra.Command{
- Use: "unmount [options] CONTAINER [CONTAINER...]",
- Aliases: []string{"umount"},
- Short: "Unmounts working container's root filesystem",
- Long: description,
- RunE: unmount,
+ Annotations: map[string]string{registry.EngineMode: registry.ABIMode},
+ Use: "unmount [options] CONTAINER [CONTAINER...]",
+ Aliases: []string{"umount"},
+ Short: "Unmounts working container's root filesystem",
+ Long: description,
+ RunE: unmount,
Args: func(cmd *cobra.Command, args []string) error {
return validate.CheckAllLatestAndCIDFile(cmd, args, false, false)
},
@@ -35,11 +36,12 @@ var (
}
containerUnmountCommand = &cobra.Command{
- Use: unmountCommand.Use,
- Short: unmountCommand.Short,
- Aliases: unmountCommand.Aliases,
- Long: unmountCommand.Long,
- RunE: unmountCommand.RunE,
+ Annotations: unmountCommand.Annotations,
+ Use: unmountCommand.Use,
+ Short: unmountCommand.Short,
+ Aliases: unmountCommand.Aliases,
+ Long: unmountCommand.Long,
+ RunE: unmountCommand.RunE,
Args: func(cmd *cobra.Command, args []string) error {
return validate.CheckAllLatestAndCIDFile(cmd, args, false, false)
},
@@ -61,14 +63,12 @@ func unmountFlags(flags *pflag.FlagSet) {
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode},
Command: unmountCommand,
})
unmountFlags(unmountCommand.Flags())
validate.AddLatestFlag(unmountCommand, &unmountOpts.Latest)
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode},
Command: containerUnmountCommand,
Parent: containerCmd,
})
diff --git a/cmd/podman/containers/unpause.go b/cmd/podman/containers/unpause.go
index 0077181d7..e152f70e1 100644
--- a/cmd/podman/containers/unpause.go
+++ b/cmd/podman/containers/unpause.go
@@ -45,14 +45,12 @@ func unpauseFlags(flags *pflag.FlagSet) {
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: unpauseCommand,
})
flags := unpauseCommand.Flags()
unpauseFlags(flags)
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: containerUnpauseCommand,
Parent: containerCmd,
})
diff --git a/cmd/podman/containers/wait.go b/cmd/podman/containers/wait.go
index c01bc08b1..e720421a6 100644
--- a/cmd/podman/containers/wait.go
+++ b/cmd/podman/containers/wait.go
@@ -60,14 +60,12 @@ func waitFlags(cmd *cobra.Command) {
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: waitCommand,
})
waitFlags(waitCommand)
validate.AddLatestFlag(waitCommand, &waitOptions.Latest)
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: containerWaitCommand,
Parent: containerCmd,
})
diff --git a/cmd/podman/diff.go b/cmd/podman/diff.go
index ae7d6c4bc..e2a3cd727 100644
--- a/cmd/podman/diff.go
+++ b/cmd/podman/diff.go
@@ -34,7 +34,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: diffCmd,
})
flags := diffCmd.Flags()
diff --git a/cmd/podman/generate/generate.go b/cmd/podman/generate/generate.go
index a6d39bbbe..6b48a342e 100644
--- a/cmd/podman/generate/generate.go
+++ b/cmd/podman/generate/generate.go
@@ -3,7 +3,6 @@ package pods
import (
"github.com/containers/podman/v3/cmd/podman/registry"
"github.com/containers/podman/v3/cmd/podman/validate"
- "github.com/containers/podman/v3/pkg/domain/entities"
"github.com/containers/podman/v3/pkg/util"
"github.com/spf13/cobra"
)
@@ -21,7 +20,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: generateCmd,
})
}
diff --git a/cmd/podman/generate/kube.go b/cmd/podman/generate/kube.go
index 9767b0e06..b4c9f9146 100644
--- a/cmd/podman/generate/kube.go
+++ b/cmd/podman/generate/kube.go
@@ -38,7 +38,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: kubeCmd,
Parent: generateCmd,
})
diff --git a/cmd/podman/generate/systemd.go b/cmd/podman/generate/systemd.go
index 8a8f5016a..5461f1f6a 100644
--- a/cmd/podman/generate/systemd.go
+++ b/cmd/podman/generate/systemd.go
@@ -40,7 +40,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: systemdCmd,
Parent: generateCmd,
})
diff --git a/cmd/podman/healthcheck/healthcheck.go b/cmd/podman/healthcheck/healthcheck.go
index 4a986e171..b42ae2182 100644
--- a/cmd/podman/healthcheck/healthcheck.go
+++ b/cmd/podman/healthcheck/healthcheck.go
@@ -3,12 +3,10 @@ package healthcheck
import (
"github.com/containers/podman/v3/cmd/podman/registry"
"github.com/containers/podman/v3/cmd/podman/validate"
- "github.com/containers/podman/v3/pkg/domain/entities"
"github.com/spf13/cobra"
)
var (
- // Command: healthcheck
healthCmd = &cobra.Command{
Use: "healthcheck",
Short: "Manage health checks on containers",
@@ -19,7 +17,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: healthCmd,
})
}
diff --git a/cmd/podman/healthcheck/run.go b/cmd/podman/healthcheck/run.go
index 50a51d172..fe8dc2573 100644
--- a/cmd/podman/healthcheck/run.go
+++ b/cmd/podman/healthcheck/run.go
@@ -11,23 +11,20 @@ import (
)
var (
- healthcheckRunDescription = "run the health check of a container"
- healthcheckrunCommand = &cobra.Command{
- Use: "run CONTAINER",
- Short: "run the health check of a container",
- Long: healthcheckRunDescription,
- Example: `podman healthcheck run mywebapp`,
- RunE: run,
- Args: cobra.ExactArgs(1),
- ValidArgsFunction: common.AutocompleteContainersRunning,
- DisableFlagsInUseLine: true,
+ runCmd = &cobra.Command{
+ Use: "run CONTAINER",
+ Short: "run the health check of a container",
+ Long: "run the health check of a container",
+ Example: `podman healthcheck run mywebapp`,
+ RunE: run,
+ Args: cobra.ExactArgs(1),
+ ValidArgsFunction: common.AutocompleteContainersRunning,
}
)
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
- Command: healthcheckrunCommand,
+ Command: runCmd,
Parent: healthCmd,
})
}
diff --git a/cmd/podman/images/build.go b/cmd/podman/images/build.go
index 04fdeab0a..dfb7415a9 100644
--- a/cmd/podman/images/build.go
+++ b/cmd/podman/images/build.go
@@ -82,14 +82,11 @@ func useLayers() string {
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
-
Command: buildCmd,
})
buildFlags(buildCmd)
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: imageBuildCmd,
Parent: imageCmd,
})
diff --git a/cmd/podman/images/diff.go b/cmd/podman/images/diff.go
index 7f4c3e83d..2e883d7ae 100644
--- a/cmd/podman/images/diff.go
+++ b/cmd/podman/images/diff.go
@@ -27,7 +27,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: diffCmd,
Parent: imageCmd,
})
diff --git a/cmd/podman/images/exists.go b/cmd/podman/images/exists.go
index 332510b3d..d278eafda 100644
--- a/cmd/podman/images/exists.go
+++ b/cmd/podman/images/exists.go
@@ -3,7 +3,6 @@ package images
import (
"github.com/containers/podman/v3/cmd/podman/common"
"github.com/containers/podman/v3/cmd/podman/registry"
- "github.com/containers/podman/v3/pkg/domain/entities"
"github.com/spf13/cobra"
)
@@ -17,13 +16,11 @@ var (
ValidArgsFunction: common.AutocompleteImages,
Example: `podman image exists ID
podman image exists IMAGE && podman pull IMAGE`,
- DisableFlagsInUseLine: true,
}
)
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: existsCmd,
Parent: imageCmd,
})
diff --git a/cmd/podman/images/history.go b/cmd/podman/images/history.go
index 16be0bb19..69268c261 100644
--- a/cmd/podman/images/history.go
+++ b/cmd/podman/images/history.go
@@ -56,13 +56,11 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: historyCmd,
})
historyFlags(historyCmd)
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: imageHistoryCmd,
Parent: imageCmd,
})
diff --git a/cmd/podman/images/image.go b/cmd/podman/images/image.go
index 69563959d..92d1348bf 100644
--- a/cmd/podman/images/image.go
+++ b/cmd/podman/images/image.go
@@ -3,7 +3,6 @@ package images
import (
"github.com/containers/podman/v3/cmd/podman/registry"
"github.com/containers/podman/v3/cmd/podman/validate"
- "github.com/containers/podman/v3/pkg/domain/entities"
"github.com/spf13/cobra"
)
@@ -22,7 +21,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: imageCmd,
})
}
diff --git a/cmd/podman/images/images.go b/cmd/podman/images/images.go
deleted file mode 100644
index 17fe21911..000000000
--- a/cmd/podman/images/images.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package images
-
-import (
- "strings"
-
- "github.com/containers/podman/v3/cmd/podman/registry"
- "github.com/containers/podman/v3/pkg/domain/entities"
- "github.com/spf13/cobra"
-)
-
-var (
- // podman _images_ Alias for podman image _list_
- imagesCmd = &cobra.Command{
- Use: strings.Replace(listCmd.Use, "list", "images", 1),
- Args: listCmd.Args,
- Short: listCmd.Short,
- Long: listCmd.Long,
- RunE: listCmd.RunE,
- ValidArgsFunction: listCmd.ValidArgsFunction,
- Example: strings.Replace(listCmd.Example, "podman image list", "podman images", -1),
- DisableFlagsInUseLine: true,
- }
-)
-
-func init() {
- registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
- Command: imagesCmd,
- })
-
- imageListFlagSet(imagesCmd)
-}
diff --git a/cmd/podman/images/import.go b/cmd/podman/images/import.go
index 72e0b63ba..bed2d4105 100644
--- a/cmd/podman/images/import.go
+++ b/cmd/podman/images/import.go
@@ -51,13 +51,11 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: importCommand,
})
importFlags(importCommand)
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: imageImportCommand,
Parent: imageCmd,
})
diff --git a/cmd/podman/images/inspect.go b/cmd/podman/images/inspect.go
index ac3becaa6..35c173a60 100644
--- a/cmd/podman/images/inspect.go
+++ b/cmd/podman/images/inspect.go
@@ -26,7 +26,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: inspectCmd,
Parent: imageCmd,
})
diff --git a/cmd/podman/images/list.go b/cmd/podman/images/list.go
index 132af858b..cc38a45c7 100644
--- a/cmd/podman/images/list.go
+++ b/cmd/podman/images/list.go
@@ -34,8 +34,7 @@ type listFlagType struct {
}
var (
- // Command: podman image _list_
- listCmd = &cobra.Command{
+ imageListCmd = &cobra.Command{
Use: "list [options] [IMAGE]",
Aliases: []string{"ls"},
Args: cobra.MaximumNArgs(1),
@@ -46,7 +45,18 @@ var (
Example: `podman image list --format json
podman image list --sort repository --format "table {{.ID}} {{.Repository}} {{.Tag}}"
podman image list --filter dangling=true`,
- DisableFlagsInUseLine: true,
+ }
+
+ imagesCmd = &cobra.Command{
+ Use: "images [options] [IMAGE]",
+ Args: imageListCmd.Args,
+ Short: imageListCmd.Short,
+ Long: imageListCmd.Long,
+ RunE: imageListCmd.RunE,
+ ValidArgsFunction: imageListCmd.ValidArgsFunction,
+ Example: `podman images --format json
+ podman images --sort repository --format "table {{.ID}} {{.Repository}} {{.Tag}}"
+ podman images --filter dangling=true`,
}
// Options to pull data
@@ -65,11 +75,15 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
- Command: listCmd,
+ Command: imageListCmd,
Parent: imageCmd,
})
- imageListFlagSet(listCmd)
+ imageListFlagSet(imageListCmd)
+
+ registry.Commands = append(registry.Commands, registry.CliCommand{
+ Command: imagesCmd,
+ })
+ imageListFlagSet(imagesCmd)
}
func imageListFlagSet(cmd *cobra.Command) {
diff --git a/cmd/podman/images/load.go b/cmd/podman/images/load.go
index aac95dae8..c7b7d6b3f 100644
--- a/cmd/podman/images/load.go
+++ b/cmd/podman/images/load.go
@@ -45,12 +45,10 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: loadCommand,
})
loadFlags(loadCommand)
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: imageLoadCommand,
Parent: imageCmd,
})
diff --git a/cmd/podman/images/mount.go b/cmd/podman/images/mount.go
index a098aac63..efcff8264 100644
--- a/cmd/podman/images/mount.go
+++ b/cmd/podman/images/mount.go
@@ -24,6 +24,11 @@ var (
`
mountCommand = &cobra.Command{
+ Annotations: map[string]string{
+ registry.UnshareNSRequired: "",
+ registry.ParentNSRequired: "",
+ registry.EngineMode: registry.ABIMode,
+ },
Use: "mount [options] [IMAGE...]",
Short: "Mount an image's root filesystem",
Long: mountDescription,
@@ -33,10 +38,6 @@ var (
podman image mount imgID1 imgID2 imgID3
podman image mount
podman image mount --all`,
- Annotations: map[string]string{
- registry.UnshareNSRequired: "",
- registry.ParentNSRequired: "",
- },
}
)
@@ -56,7 +57,6 @@ func mountFlags(cmd *cobra.Command) {
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode},
Command: mountCommand,
Parent: imageCmd,
})
diff --git a/cmd/podman/images/prune.go b/cmd/podman/images/prune.go
index db645cc2e..a082255f6 100644
--- a/cmd/podman/images/prune.go
+++ b/cmd/podman/images/prune.go
@@ -34,7 +34,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: pruneCmd,
Parent: imageCmd,
})
diff --git a/cmd/podman/images/pull.go b/cmd/podman/images/pull.go
index bf0eec759..a831ea848 100644
--- a/cmd/podman/images/pull.go
+++ b/cmd/podman/images/pull.go
@@ -60,14 +60,12 @@ var (
func init() {
// pull
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: pullCmd,
})
pullFlags(pullCmd)
// images pull
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: imagesPullCmd,
Parent: imageCmd,
})
diff --git a/cmd/podman/images/push.go b/cmd/podman/images/push.go
index 9e7b447a4..edf8e9203 100644
--- a/cmd/podman/images/push.go
+++ b/cmd/podman/images/push.go
@@ -57,14 +57,12 @@ var (
func init() {
// push
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: pushCmd,
})
pushFlags(pushCmd)
// images push
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: imagePushCmd,
Parent: imageCmd,
})
diff --git a/cmd/podman/images/rm.go b/cmd/podman/images/rm.go
index 2daa4743f..be4c0b883 100644
--- a/cmd/podman/images/rm.go
+++ b/cmd/podman/images/rm.go
@@ -25,17 +25,33 @@ var (
podman image rm c4dfb1609ee2 93fd78260bd1 c0ed59d05ff7`,
}
+ rmiCmd = &cobra.Command{
+ Use: "rmi [options] IMAGE [IMAGE...]",
+ Args: rmCmd.Args,
+ Short: rmCmd.Short,
+ Long: rmCmd.Long,
+ RunE: rmCmd.RunE,
+ ValidArgsFunction: rmCmd.ValidArgsFunction,
+ Example: `podman rmi imageID
+ podman rmi --force alpine
+ podman rmi c4dfb1609ee2 93fd78260bd1 c0ed59d05ff7`,
+ }
+
imageOpts = entities.ImageRemoveOptions{}
)
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: rmCmd,
Parent: imageCmd,
})
imageRemoveFlagSet(rmCmd.Flags())
+
+ registry.Commands = append(registry.Commands, registry.CliCommand{
+ Command: rmiCmd,
+ })
+ imageRemoveFlagSet(rmiCmd.Flags())
}
func imageRemoveFlagSet(flags *pflag.FlagSet) {
diff --git a/cmd/podman/images/rmi.go b/cmd/podman/images/rmi.go
deleted file mode 100644
index edd6ae96f..000000000
--- a/cmd/podman/images/rmi.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package images
-
-import (
- "strings"
-
- "github.com/containers/podman/v3/cmd/podman/registry"
- "github.com/containers/podman/v3/pkg/domain/entities"
- "github.com/spf13/cobra"
-)
-
-var (
- rmiCmd = &cobra.Command{
- Use: strings.Replace(rmCmd.Use, "rm ", "rmi ", 1),
- Args: rmCmd.Args,
- Short: rmCmd.Short,
- Long: rmCmd.Long,
- RunE: rmCmd.RunE,
- ValidArgsFunction: rmCmd.ValidArgsFunction,
- Example: strings.Replace(rmCmd.Example, "podman image rm", "podman rmi", -1),
- }
-)
-
-func init() {
- registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
- Command: rmiCmd,
- })
- imageRemoveFlagSet(rmiCmd.Flags())
-}
diff --git a/cmd/podman/images/save.go b/cmd/podman/images/save.go
index f1f7e8b2e..19dadb2ad 100644
--- a/cmd/podman/images/save.go
+++ b/cmd/podman/images/save.go
@@ -48,6 +48,7 @@ var (
podman save --format docker-dir -o ubuntu-dir ubuntu
podman save > alpine-all.tar alpine:latest`,
}
+
imageSaveCommand = &cobra.Command{
Args: saveCommand.Args,
Use: saveCommand.Use,
@@ -67,13 +68,11 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: saveCommand,
})
saveFlags(saveCommand)
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: imageSaveCommand,
Parent: imageCmd,
})
diff --git a/cmd/podman/images/search.go b/cmd/podman/images/search.go
index a3cfa983f..08bf3cf6b 100644
--- a/cmd/podman/images/search.go
+++ b/cmd/podman/images/search.go
@@ -38,7 +38,6 @@ var (
Users can limit the number of results, and filter the output based on certain conditions.`
- // Command: podman search
searchCmd = &cobra.Command{
Use: "search [options] TERM",
Short: "Search registry for image",
@@ -51,14 +50,12 @@ var (
podman search --format "table {{.Index}} {{.Name}}" registry.fedoraproject.org/fedora`,
}
- // Command: podman image search
imageSearchCmd = &cobra.Command{
Use: searchCmd.Use,
Short: searchCmd.Short,
Long: searchCmd.Long,
RunE: searchCmd.RunE,
Args: searchCmd.Args,
- Annotations: searchCmd.Annotations,
ValidArgsFunction: searchCmd.ValidArgsFunction,
Example: `podman image search --filter=is-official --limit 3 alpine
podman image search registry.fedoraproject.org/ # only works with v2 registries
@@ -67,16 +64,12 @@ var (
)
func init() {
- // search
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: searchCmd,
})
searchFlags(searchCmd)
- // images search
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: imageSearchCmd,
Parent: imageCmd,
})
diff --git a/cmd/podman/images/sign.go b/cmd/podman/images/sign.go
index fcd904bb7..96f214d0b 100644
--- a/cmd/podman/images/sign.go
+++ b/cmd/podman/images/sign.go
@@ -14,6 +14,7 @@ import (
var (
signDescription = "Create a signature file that can be used later to verify the image."
signCommand = &cobra.Command{
+ Annotations: map[string]string{registry.EngineMode: registry.ABIMode},
Use: "sign [options] IMAGE [IMAGE...]",
Short: "Sign an image",
Long: signDescription,
@@ -31,7 +32,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode},
Command: signCommand,
Parent: imageCmd,
})
diff --git a/cmd/podman/images/tag.go b/cmd/podman/images/tag.go
index 695ca30ed..633368f50 100644
--- a/cmd/podman/images/tag.go
+++ b/cmd/podman/images/tag.go
@@ -10,26 +10,24 @@ import (
var (
tagDescription = "Adds one or more additional names to locally-stored image."
tagCommand = &cobra.Command{
- Use: "tag IMAGE TARGET_NAME [TARGET_NAME...]",
- Short: "Add an additional name to a local image",
- Long: tagDescription,
- RunE: tag,
- Args: cobra.MinimumNArgs(2),
- DisableFlagsInUseLine: true,
- ValidArgsFunction: common.AutocompleteImages,
+ Use: "tag IMAGE TARGET_NAME [TARGET_NAME...]",
+ Short: "Add an additional name to a local image",
+ Long: tagDescription,
+ RunE: tag,
+ Args: cobra.MinimumNArgs(2),
+ ValidArgsFunction: common.AutocompleteImages,
Example: `podman tag 0e3bbc2 fedora:latest
podman tag imageID:latest myNewImage:newTag
podman tag httpd myregistryhost:5000/fedora/httpd:v2`,
}
imageTagCommand = &cobra.Command{
- Args: tagCommand.Args,
- DisableFlagsInUseLine: true,
- Use: tagCommand.Use,
- Short: tagCommand.Short,
- Long: tagCommand.Long,
- RunE: tagCommand.RunE,
- ValidArgsFunction: tagCommand.ValidArgsFunction,
+ Args: tagCommand.Args,
+ Use: tagCommand.Use,
+ Short: tagCommand.Short,
+ Long: tagCommand.Long,
+ RunE: tagCommand.RunE,
+ ValidArgsFunction: tagCommand.ValidArgsFunction,
Example: `podman image tag 0e3bbc2 fedora:latest
podman image tag imageID:latest myNewImage:newTag
podman image tag httpd myregistryhost:5000/fedora/httpd:v2`,
@@ -38,11 +36,9 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: tagCommand,
})
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: imageTagCommand,
Parent: imageCmd,
})
diff --git a/cmd/podman/images/tree.go b/cmd/podman/images/tree.go
index 0a659c914..8ace9d63c 100644
--- a/cmd/podman/images/tree.go
+++ b/cmd/podman/images/tree.go
@@ -25,7 +25,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: treeCmd,
Parent: imageCmd,
})
diff --git a/cmd/podman/images/trust.go b/cmd/podman/images/trust.go
index 2c51bb5f8..28eb53193 100644
--- a/cmd/podman/images/trust.go
+++ b/cmd/podman/images/trust.go
@@ -3,7 +3,6 @@ package images
import (
"github.com/containers/podman/v3/cmd/podman/registry"
"github.com/containers/podman/v3/cmd/podman/validate"
- "github.com/containers/podman/v3/pkg/domain/entities"
"github.com/spf13/cobra"
)
@@ -11,16 +10,16 @@ var (
trustDescription = `Manages which registries you trust as a source of container images based on their location.
The location is determined by the transport and the registry host of the image. Using this container image docker://quay.io/podman/stable as an example, docker is the transport and quay.io is the registry host.`
trustCmd = &cobra.Command{
- Use: "trust",
- Short: "Manage container image trust policy",
- Long: trustDescription,
- RunE: validate.SubCommandExists,
+ Annotations: map[string]string{registry.EngineMode: registry.ABIMode},
+ Use: "trust",
+ Short: "Manage container image trust policy",
+ Long: trustDescription,
+ RunE: validate.SubCommandExists,
}
)
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode},
Command: trustCmd,
Parent: imageCmd,
})
diff --git a/cmd/podman/images/trust_set.go b/cmd/podman/images/trust_set.go
index c192669a9..e01a04146 100644
--- a/cmd/podman/images/trust_set.go
+++ b/cmd/podman/images/trust_set.go
@@ -16,6 +16,7 @@ import (
var (
setTrustDescription = "Set default trust policy or add a new trust policy for a registry"
setTrustCommand = &cobra.Command{
+ Annotations: map[string]string{registry.EngineMode: registry.ABIMode},
Use: "set [options] REGISTRY",
Short: "Set default trust policy or a new trust policy for a registry",
Long: setTrustDescription,
@@ -32,7 +33,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode},
Command: setTrustCommand,
Parent: trustCmd,
})
diff --git a/cmd/podman/images/trust_show.go b/cmd/podman/images/trust_show.go
index ed9aecdb7..5d9376068 100644
--- a/cmd/podman/images/trust_show.go
+++ b/cmd/podman/images/trust_show.go
@@ -15,6 +15,7 @@ import (
var (
showTrustDescription = "Display trust policy for the system"
showTrustCommand = &cobra.Command{
+ Annotations: map[string]string{registry.EngineMode: registry.ABIMode},
Use: "show [options] [REGISTRY]",
Short: "Display trust policy for the system",
Long: showTrustDescription,
@@ -31,7 +32,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode},
Command: showTrustCommand,
Parent: trustCmd,
})
diff --git a/cmd/podman/images/unmount.go b/cmd/podman/images/unmount.go
index 1e890c292..f72268ea2 100644
--- a/cmd/podman/images/unmount.go
+++ b/cmd/podman/images/unmount.go
@@ -20,6 +20,7 @@ var (
An unmount can be forced with the --force flag.
`
unmountCommand = &cobra.Command{
+ Annotations: map[string]string{registry.EngineMode: registry.ABIMode},
Use: "unmount [options] IMAGE [IMAGE...]",
Aliases: []string{"umount"},
Short: "Unmount an image's root filesystem",
@@ -43,7 +44,6 @@ func unmountFlags(flags *pflag.FlagSet) {
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode},
Parent: imageCmd,
Command: unmountCommand,
})
diff --git a/cmd/podman/images/untag.go b/cmd/podman/images/untag.go
index 4344e1847..4c1862242 100644
--- a/cmd/podman/images/untag.go
+++ b/cmd/podman/images/untag.go
@@ -8,27 +8,25 @@ import (
)
var (
- untagCommand = &cobra.Command{
- Use: "untag IMAGE [IMAGE...]",
- Short: "Remove a name from a local image",
- Long: "Removes one or more names from a locally-stored image.",
- RunE: untag,
- Args: cobra.MinimumNArgs(1),
- DisableFlagsInUseLine: true,
- ValidArgsFunction: common.AutocompleteImages,
+ untagCmd = &cobra.Command{
+ Use: "untag IMAGE [IMAGE...]",
+ Short: "Remove a name from a local image",
+ Long: "Removes one or more names from a locally-stored image.",
+ RunE: untag,
+ Args: cobra.MinimumNArgs(1),
+ ValidArgsFunction: common.AutocompleteImages,
Example: `podman untag 0e3bbc2
podman untag imageID:latest otherImageName:latest
podman untag httpd myregistryhost:5000/fedora/httpd:v2`,
}
- imageUntagCommand = &cobra.Command{
- Args: untagCommand.Args,
- DisableFlagsInUseLine: true,
- Use: untagCommand.Use,
- Short: untagCommand.Short,
- Long: untagCommand.Long,
- RunE: untagCommand.RunE,
- ValidArgsFunction: untagCommand.ValidArgsFunction,
+ imageUntagCmd = &cobra.Command{
+ Args: untagCmd.Args,
+ Use: untagCmd.Use,
+ Short: untagCmd.Short,
+ Long: untagCmd.Long,
+ RunE: untagCmd.RunE,
+ ValidArgsFunction: untagCmd.ValidArgsFunction,
Example: `podman image untag 0e3bbc2
podman image untag imageID:latest otherImageName:latest
podman image untag httpd myregistryhost:5000/fedora/httpd:v2`,
@@ -37,12 +35,10 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
- Command: untagCommand,
+ Command: untagCmd,
})
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
- Command: imageUntagCommand,
+ Command: imageUntagCmd,
Parent: imageCmd,
})
}
diff --git a/cmd/podman/inspect.go b/cmd/podman/inspect.go
index 13417c1ab..0c8102952 100644
--- a/cmd/podman/inspect.go
+++ b/cmd/podman/inspect.go
@@ -36,7 +36,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: inspectCmd,
})
inspectOpts = inspect.AddInspectFlagSet(inspectCmd)
diff --git a/cmd/podman/login.go b/cmd/podman/login.go
index cd3bc9756..2101e32e2 100644
--- a/cmd/podman/login.go
+++ b/cmd/podman/login.go
@@ -9,7 +9,6 @@ import (
"github.com/containers/image/v5/types"
"github.com/containers/podman/v3/cmd/podman/common"
"github.com/containers/podman/v3/cmd/podman/registry"
- "github.com/containers/podman/v3/pkg/domain/entities"
"github.com/containers/podman/v3/pkg/registries"
"github.com/spf13/cobra"
)
@@ -39,7 +38,6 @@ func init() {
// store credentials locally while the remote client will pass them
// over the wire to the endpoint.
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: loginCommand,
})
flags := loginCommand.Flags()
diff --git a/cmd/podman/logout.go b/cmd/podman/logout.go
index 286737b12..092ad2610 100644
--- a/cmd/podman/logout.go
+++ b/cmd/podman/logout.go
@@ -8,7 +8,6 @@ import (
"github.com/containers/image/v5/types"
"github.com/containers/podman/v3/cmd/podman/common"
"github.com/containers/podman/v3/cmd/podman/registry"
- "github.com/containers/podman/v3/pkg/domain/entities"
"github.com/containers/podman/v3/pkg/registries"
"github.com/spf13/cobra"
)
@@ -33,7 +32,6 @@ func init() {
// store credentials locally while the remote client will pass them
// over the wire to the endpoint.
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: logoutCommand,
})
flags := logoutCommand.Flags()
diff --git a/cmd/podman/machine/init.go b/cmd/podman/machine/init.go
index 02dfc80aa..f4133dbde 100644
--- a/cmd/podman/machine/init.go
+++ b/cmd/podman/machine/init.go
@@ -5,7 +5,6 @@ package machine
import (
"github.com/containers/common/pkg/completion"
"github.com/containers/podman/v3/cmd/podman/registry"
- "github.com/containers/podman/v3/pkg/domain/entities"
"github.com/containers/podman/v3/pkg/machine"
"github.com/containers/podman/v3/pkg/machine/qemu"
"github.com/pkg/errors"
@@ -25,13 +24,12 @@ var (
)
var (
- initOpts = machine.InitOptions{}
- defaultMachineName string = "podman-machine-default"
+ initOpts = machine.InitOptions{}
+ defaultMachineName = "podman-machine-default"
)
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: initCmd,
Parent: machineCmd,
})
diff --git a/cmd/podman/machine/list.go b/cmd/podman/machine/list.go
index af4e2c807..77b47161a 100644
--- a/cmd/podman/machine/list.go
+++ b/cmd/podman/machine/list.go
@@ -15,7 +15,6 @@ import (
"github.com/containers/podman/v3/cmd/podman/parse"
"github.com/containers/podman/v3/cmd/podman/registry"
"github.com/containers/podman/v3/cmd/podman/validate"
- "github.com/containers/podman/v3/pkg/domain/entities"
"github.com/containers/podman/v3/pkg/machine"
"github.com/containers/podman/v3/pkg/machine/qemu"
"github.com/docker/go-units"
@@ -25,15 +24,15 @@ import (
var (
lsCmd = &cobra.Command{
- Use: "list [options]",
- Aliases: []string{"ls"},
- Short: "List machines",
- Long: "List managed virtual machines.",
- RunE: list,
- Args: validate.NoArgs,
+ Use: "list [options]",
+ Aliases: []string{"ls"},
+ Short: "List machines",
+ Long: "List managed virtual machines.",
+ RunE: list,
+ Args: validate.NoArgs,
+ ValidArgsFunction: completion.AutocompleteNone,
Example: `podman machine list,
podman machine ls`,
- ValidArgsFunction: completion.AutocompleteNone,
}
listFlag = listFlagType{}
)
@@ -52,7 +51,6 @@ type machineReporter struct {
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: lsCmd,
Parent: machineCmd,
})
diff --git a/cmd/podman/machine/machine.go b/cmd/podman/machine/machine.go
index d8cdf5568..b059afc38 100644
--- a/cmd/podman/machine/machine.go
+++ b/cmd/podman/machine/machine.go
@@ -7,7 +7,6 @@ import (
"github.com/containers/podman/v3/cmd/podman/registry"
"github.com/containers/podman/v3/cmd/podman/validate"
- "github.com/containers/podman/v3/pkg/domain/entities"
"github.com/containers/podman/v3/pkg/machine"
"github.com/containers/podman/v3/pkg/machine/qemu"
"github.com/spf13/cobra"
@@ -30,7 +29,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: machineCmd,
})
}
diff --git a/cmd/podman/machine/machine_unsupported.go b/cmd/podman/machine/machine_unsupported.go
index 3bb44b51f..f8392694a 100644
--- a/cmd/podman/machine/machine_unsupported.go
+++ b/cmd/podman/machine/machine_unsupported.go
@@ -2,4 +2,5 @@
package machine
+// init do not register _podman machine_ command on unsupported platforms
func init() {}
diff --git a/cmd/podman/machine/rm.go b/cmd/podman/machine/rm.go
index 0be2ba40c..02e3dfeb8 100644
--- a/cmd/podman/machine/rm.go
+++ b/cmd/podman/machine/rm.go
@@ -9,7 +9,6 @@ import (
"strings"
"github.com/containers/podman/v3/cmd/podman/registry"
- "github.com/containers/podman/v3/pkg/domain/entities"
"github.com/containers/podman/v3/pkg/machine"
"github.com/containers/podman/v3/pkg/machine/qemu"
"github.com/spf13/cobra"
@@ -33,7 +32,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: rmCmd,
Parent: machineCmd,
})
diff --git a/cmd/podman/machine/ssh.go b/cmd/podman/machine/ssh.go
index ecc6d3b82..b52a48faf 100644
--- a/cmd/podman/machine/ssh.go
+++ b/cmd/podman/machine/ssh.go
@@ -4,7 +4,6 @@ package machine
import (
"github.com/containers/podman/v3/cmd/podman/registry"
- "github.com/containers/podman/v3/pkg/domain/entities"
"github.com/containers/podman/v3/pkg/machine"
"github.com/containers/podman/v3/pkg/machine/qemu"
"github.com/pkg/errors"
@@ -30,7 +29,6 @@ var (
func init() {
sshCmd.Flags().SetInterspersed(false)
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: sshCmd,
Parent: machineCmd,
})
diff --git a/cmd/podman/machine/start.go b/cmd/podman/machine/start.go
index 4334cfc0f..f8f0eed09 100644
--- a/cmd/podman/machine/start.go
+++ b/cmd/podman/machine/start.go
@@ -4,7 +4,6 @@ package machine
import (
"github.com/containers/podman/v3/cmd/podman/registry"
- "github.com/containers/podman/v3/pkg/domain/entities"
"github.com/containers/podman/v3/pkg/machine"
"github.com/containers/podman/v3/pkg/machine/qemu"
"github.com/pkg/errors"
@@ -25,7 +24,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: startCmd,
Parent: machineCmd,
})
diff --git a/cmd/podman/machine/stop.go b/cmd/podman/machine/stop.go
index 4307d3eeb..2d5aa7b95 100644
--- a/cmd/podman/machine/stop.go
+++ b/cmd/podman/machine/stop.go
@@ -4,7 +4,6 @@ package machine
import (
"github.com/containers/podman/v3/cmd/podman/registry"
- "github.com/containers/podman/v3/pkg/domain/entities"
"github.com/containers/podman/v3/pkg/machine"
"github.com/containers/podman/v3/pkg/machine/qemu"
"github.com/spf13/cobra"
@@ -24,7 +23,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: stopCmd,
Parent: machineCmd,
})
diff --git a/cmd/podman/main.go b/cmd/podman/main.go
index 5219da26d..b7f5f1720 100644
--- a/cmd/podman/main.go
+++ b/cmd/podman/main.go
@@ -3,6 +3,7 @@ package main
import (
"fmt"
"os"
+ "strings"
_ "github.com/containers/podman/v3/cmd/podman/completion"
_ "github.com/containers/podman/v3/cmd/podman/containers"
@@ -42,38 +43,41 @@ func main() {
func parseCommands() *cobra.Command {
cfg := registry.PodmanConfig()
for _, c := range registry.Commands {
- for _, m := range c.Mode {
- if cfg.EngineMode == m {
- // Command cannot be run rootless
- _, found := c.Command.Annotations[registry.UnshareNSRequired]
- if found {
- if rootless.IsRootless() && found && os.Getuid() != 0 {
- c.Command.RunE = func(cmd *cobra.Command, args []string) error {
- return fmt.Errorf("cannot run command %q in rootless mode, must execute `podman unshare` first", cmd.CommandPath())
- }
- }
- } else {
- _, found = c.Command.Annotations[registry.ParentNSRequired]
- if rootless.IsRootless() && found {
- c.Command.RunE = func(cmd *cobra.Command, args []string) error {
- return fmt.Errorf("cannot run command %q in rootless mode", cmd.CommandPath())
- }
- }
+ if supported, found := c.Command.Annotations[registry.EngineMode]; found {
+ if !strings.Contains(cfg.EngineMode.String(), supported) {
+ continue
+ }
+ }
+
+ // Command cannot be run rootless
+ _, found := c.Command.Annotations[registry.UnshareNSRequired]
+ if found {
+ if rootless.IsRootless() && os.Getuid() != 0 {
+ c.Command.RunE = func(cmd *cobra.Command, args []string) error {
+ return fmt.Errorf("cannot run command %q in rootless mode, must execute `podman unshare` first", cmd.CommandPath())
}
- parent := rootCmd
- if c.Parent != nil {
- parent = c.Parent
+ }
+ } else {
+ _, found = c.Command.Annotations[registry.ParentNSRequired]
+ if rootless.IsRootless() && found {
+ c.Command.RunE = func(cmd *cobra.Command, args []string) error {
+ return fmt.Errorf("cannot run command %q in rootless mode", cmd.CommandPath())
}
- parent.AddCommand(c.Command)
-
- // - templates need to be set here, as PersistentPreRunE() is
- // not called when --help is used.
- // - rootCmd uses cobra default template not ours
- c.Command.SetHelpTemplate(helpTemplate)
- c.Command.SetUsageTemplate(usageTemplate)
- c.Command.DisableFlagsInUseLine = true
}
}
+
+ parent := rootCmd
+ if c.Parent != nil {
+ parent = c.Parent
+ }
+ parent.AddCommand(c.Command)
+
+ // - templates need to be set here, as PersistentPreRunE() is
+ // not called when --help is used.
+ // - rootCmd uses cobra default template not ours
+ c.Command.SetHelpTemplate(helpTemplate)
+ c.Command.SetUsageTemplate(usageTemplate)
+ c.Command.DisableFlagsInUseLine = true
}
if err := terminal.SetConsole(); err != nil {
logrus.Error(err)
diff --git a/cmd/podman/manifest/add.go b/cmd/podman/manifest/add.go
index 2499dc2e8..9d219601c 100644
--- a/cmd/podman/manifest/add.go
+++ b/cmd/podman/manifest/add.go
@@ -39,7 +39,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: addCmd,
Parent: manifestCmd,
})
diff --git a/cmd/podman/manifest/annotate.go b/cmd/podman/manifest/annotate.go
index f5ec13bf2..d806ce9e6 100644
--- a/cmd/podman/manifest/annotate.go
+++ b/cmd/podman/manifest/annotate.go
@@ -15,6 +15,7 @@ import (
var (
manifestAnnotateOpts = entities.ManifestAnnotateOptions{}
annotateCmd = &cobra.Command{
+ Annotations: map[string]string{registry.EngineMode: registry.ABIMode},
Use: "annotate [options] LIST IMAGE",
Short: "Add or update information about an entry in a manifest list or image index",
Long: "Adds or updates information about an entry in a manifest list or image index.",
@@ -27,7 +28,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode},
Command: annotateCmd,
Parent: manifestCmd,
})
diff --git a/cmd/podman/manifest/create.go b/cmd/podman/manifest/create.go
index a15b129d5..9f7d74d14 100644
--- a/cmd/podman/manifest/create.go
+++ b/cmd/podman/manifest/create.go
@@ -27,7 +27,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: createCmd,
Parent: manifestCmd,
})
diff --git a/cmd/podman/manifest/exists.go b/cmd/podman/manifest/exists.go
index 7fed1f25c..55318330d 100644
--- a/cmd/podman/manifest/exists.go
+++ b/cmd/podman/manifest/exists.go
@@ -3,7 +3,6 @@ package manifest
import (
"github.com/containers/podman/v3/cmd/podman/common"
"github.com/containers/podman/v3/cmd/podman/registry"
- "github.com/containers/podman/v3/pkg/domain/entities"
"github.com/spf13/cobra"
)
@@ -21,7 +20,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: existsCmd,
Parent: manifestCmd,
})
diff --git a/cmd/podman/manifest/inspect.go b/cmd/podman/manifest/inspect.go
index 118c0ae70..d444f9066 100644
--- a/cmd/podman/manifest/inspect.go
+++ b/cmd/podman/manifest/inspect.go
@@ -6,26 +6,23 @@ import (
"github.com/containers/podman/v3/cmd/podman/common"
"github.com/containers/podman/v3/cmd/podman/registry"
- "github.com/containers/podman/v3/pkg/domain/entities"
"github.com/spf13/cobra"
)
var (
inspectCmd = &cobra.Command{
- Use: "inspect IMAGE",
- Short: "Display the contents of a manifest list or image index",
- Long: "Display the contents of a manifest list or image index.",
- RunE: inspect,
- ValidArgsFunction: common.AutocompleteImages,
- Example: "podman manifest inspect localhost/list",
- Args: cobra.ExactArgs(1),
- DisableFlagsInUseLine: true,
+ Use: "inspect IMAGE",
+ Short: "Display the contents of a manifest list or image index",
+ Long: "Display the contents of a manifest list or image index.",
+ RunE: inspect,
+ ValidArgsFunction: common.AutocompleteImages,
+ Example: "podman manifest inspect localhost/list",
+ Args: cobra.ExactArgs(1),
}
)
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: inspectCmd,
Parent: manifestCmd,
})
diff --git a/cmd/podman/manifest/manifest.go b/cmd/podman/manifest/manifest.go
index c725078bf..98d5199f1 100644
--- a/cmd/podman/manifest/manifest.go
+++ b/cmd/podman/manifest/manifest.go
@@ -3,7 +3,6 @@ package manifest
import (
"github.com/containers/podman/v3/cmd/podman/registry"
"github.com/containers/podman/v3/cmd/podman/validate"
- "github.com/containers/podman/v3/pkg/domain/entities"
"github.com/spf13/cobra"
)
@@ -26,7 +25,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: manifestCmd,
})
}
diff --git a/cmd/podman/manifest/push.go b/cmd/podman/manifest/push.go
index 1dae5cc95..8b13f6dde 100644
--- a/cmd/podman/manifest/push.go
+++ b/cmd/podman/manifest/push.go
@@ -39,7 +39,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: pushCmd,
Parent: manifestCmd,
})
diff --git a/cmd/podman/manifest/remove.go b/cmd/podman/manifest/remove.go
index 1d1128166..c44c0991e 100644
--- a/cmd/podman/manifest/remove.go
+++ b/cmd/podman/manifest/remove.go
@@ -6,27 +6,24 @@ import (
"github.com/containers/podman/v3/cmd/podman/common"
"github.com/containers/podman/v3/cmd/podman/registry"
- "github.com/containers/podman/v3/pkg/domain/entities"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
var (
removeCmd = &cobra.Command{
- Use: "remove LIST IMAGE",
- Short: "Remove an entry from a manifest list or image index",
- Long: "Removes an image from a manifest list or image index.",
- RunE: remove,
- ValidArgsFunction: common.AutocompleteImages,
- Example: `podman manifest remove mylist:v1.11 sha256:15352d97781ffdf357bf3459c037be3efac4133dc9070c2dce7eca7c05c3e736`,
- Args: cobra.ExactArgs(2),
- DisableFlagsInUseLine: true,
+ Use: "remove LIST IMAGE",
+ Short: "Remove an entry from a manifest list or image index",
+ Long: "Removes an image from a manifest list or image index.",
+ RunE: remove,
+ ValidArgsFunction: common.AutocompleteImages,
+ Example: `podman manifest remove mylist:v1.11 sha256:15352d97781ffdf357bf3459c037be3efac4133dc9070c2dce7eca7c05c3e736`,
+ Args: cobra.ExactArgs(2),
}
)
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: removeCmd,
Parent: manifestCmd,
})
diff --git a/cmd/podman/manifest/rm.go b/cmd/podman/manifest/rm.go
index 58c0de886..5e78197ed 100644
--- a/cmd/podman/manifest/rm.go
+++ b/cmd/podman/manifest/rm.go
@@ -6,27 +6,24 @@ import (
"github.com/containers/podman/v3/cmd/podman/common"
"github.com/containers/podman/v3/cmd/podman/registry"
- "github.com/containers/podman/v3/pkg/domain/entities"
"github.com/containers/podman/v3/pkg/errorhandling"
"github.com/spf13/cobra"
)
var (
rmCmd = &cobra.Command{
- Use: "rm LIST",
- Short: "Remove manifest list or image index from local storage",
- Long: "Remove manifest list or image index from local storage.",
- RunE: rm,
- ValidArgsFunction: common.AutocompleteImages,
- Example: `podman manifest rm mylist:v1.11`,
- Args: cobra.ExactArgs(1),
- DisableFlagsInUseLine: true,
+ Use: "rm LIST",
+ Short: "Remove manifest list or image index from local storage",
+ Long: "Remove manifest list or image index from local storage.",
+ RunE: rm,
+ ValidArgsFunction: common.AutocompleteImages,
+ Example: `podman manifest rm mylist:v1.11`,
+ Args: cobra.ExactArgs(1),
}
)
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: rmCmd,
Parent: manifestCmd,
})
diff --git a/cmd/podman/networks/connect.go b/cmd/podman/networks/connect.go
index 9fa088b78..0d62a45df 100644
--- a/cmd/podman/networks/connect.go
+++ b/cmd/podman/networks/connect.go
@@ -34,7 +34,6 @@ func networkConnectFlags(cmd *cobra.Command) {
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: networkConnectCommand,
Parent: networkCmd,
})
diff --git a/cmd/podman/networks/create.go b/cmd/podman/networks/create.go
index 64ca2464a..1f3b321ba 100644
--- a/cmd/podman/networks/create.go
+++ b/cmd/podman/networks/create.go
@@ -75,7 +75,6 @@ func networkCreateFlags(cmd *cobra.Command) {
}
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: networkCreateCommand,
Parent: networkCmd,
})
diff --git a/cmd/podman/networks/disconnect.go b/cmd/podman/networks/disconnect.go
index 26861eec6..7f2ff4252 100644
--- a/cmd/podman/networks/disconnect.go
+++ b/cmd/podman/networks/disconnect.go
@@ -31,7 +31,6 @@ func networkDisconnectFlags(flags *pflag.FlagSet) {
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: networkDisconnectCommand,
Parent: networkCmd,
})
diff --git a/cmd/podman/networks/exists.go b/cmd/podman/networks/exists.go
index fdbd0ef11..89466ab7f 100644
--- a/cmd/podman/networks/exists.go
+++ b/cmd/podman/networks/exists.go
@@ -3,7 +3,6 @@ package network
import (
"github.com/containers/podman/v3/cmd/podman/common"
"github.com/containers/podman/v3/cmd/podman/registry"
- "github.com/containers/podman/v3/pkg/domain/entities"
"github.com/spf13/cobra"
)
@@ -22,7 +21,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: networkExistsCommand,
Parent: networkCmd,
})
diff --git a/cmd/podman/networks/inspect.go b/cmd/podman/networks/inspect.go
index a05b9026d..c0e5b9720 100644
--- a/cmd/podman/networks/inspect.go
+++ b/cmd/podman/networks/inspect.go
@@ -24,7 +24,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: networkinspectCommand,
Parent: networkCmd,
})
diff --git a/cmd/podman/networks/list.go b/cmd/podman/networks/list.go
index e1b182cbf..46872d078 100644
--- a/cmd/podman/networks/list.go
+++ b/cmd/podman/networks/list.go
@@ -54,7 +54,6 @@ func networkListFlags(flags *pflag.FlagSet) {
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: networklistCommand,
Parent: networkCmd,
})
diff --git a/cmd/podman/networks/network.go b/cmd/podman/networks/network.go
index 4d6cd8abd..ec045e3cf 100644
--- a/cmd/podman/networks/network.go
+++ b/cmd/podman/networks/network.go
@@ -3,7 +3,6 @@ package network
import (
"github.com/containers/podman/v3/cmd/podman/registry"
"github.com/containers/podman/v3/cmd/podman/validate"
- "github.com/containers/podman/v3/pkg/domain/entities"
"github.com/spf13/cobra"
)
@@ -22,7 +21,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: networkCmd,
})
}
diff --git a/cmd/podman/networks/prune.go b/cmd/podman/networks/prune.go
index 5f1cbda5f..e6b779ded 100644
--- a/cmd/podman/networks/prune.go
+++ b/cmd/podman/networks/prune.go
@@ -43,7 +43,6 @@ func networkPruneFlags(cmd *cobra.Command, flags *pflag.FlagSet) {
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: networkPruneCommand,
Parent: networkCmd,
})
diff --git a/cmd/podman/networks/reload.go b/cmd/podman/networks/reload.go
index 035e56a07..371bea9ef 100644
--- a/cmd/podman/networks/reload.go
+++ b/cmd/podman/networks/reload.go
@@ -15,10 +15,11 @@ import (
var (
networkReloadDescription = `reload container networks, recreating firewall rules`
networkReloadCommand = &cobra.Command{
- Use: "reload [options] [CONTAINER...]",
- Short: "Reload firewall rules for one or more containers",
- Long: networkReloadDescription,
- RunE: networkReload,
+ Annotations: map[string]string{registry.EngineMode: registry.ABIMode},
+ Use: "reload [options] [CONTAINER...]",
+ Short: "Reload firewall rules for one or more containers",
+ Long: networkReloadDescription,
+ RunE: networkReload,
Args: func(cmd *cobra.Command, args []string) error {
return validate.CheckAllLatestAndCIDFile(cmd, args, false, false)
},
@@ -39,7 +40,6 @@ func reloadFlags(flags *pflag.FlagSet) {
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode},
Command: networkReloadCommand,
Parent: networkCmd,
})
diff --git a/cmd/podman/networks/rm.go b/cmd/podman/networks/rm.go
index 1ae79b27d..14f9869e4 100644
--- a/cmd/podman/networks/rm.go
+++ b/cmd/podman/networks/rm.go
@@ -38,7 +38,6 @@ func networkRmFlags(flags *pflag.FlagSet) {
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: networkrmCommand,
Parent: networkCmd,
})
diff --git a/cmd/podman/play/kube.go b/cmd/podman/play/kube.go
index fe382bdfb..ece7d1f98 100644
--- a/cmd/podman/play/kube.go
+++ b/cmd/podman/play/kube.go
@@ -51,7 +51,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: kubeCmd,
Parent: playCmd,
})
diff --git a/cmd/podman/play/play.go b/cmd/podman/play/play.go
index 89c7e0139..f121d6a2d 100644
--- a/cmd/podman/play/play.go
+++ b/cmd/podman/play/play.go
@@ -3,7 +3,6 @@ package pods
import (
"github.com/containers/podman/v3/cmd/podman/registry"
"github.com/containers/podman/v3/cmd/podman/validate"
- "github.com/containers/podman/v3/pkg/domain/entities"
"github.com/spf13/cobra"
)
@@ -19,7 +18,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: playCmd,
})
}
diff --git a/cmd/podman/pods/create.go b/cmd/podman/pods/create.go
index 5600d8794..735dfa78c 100644
--- a/cmd/podman/pods/create.go
+++ b/cmd/podman/pods/create.go
@@ -47,7 +47,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: createCommand,
Parent: podCmd,
})
@@ -167,10 +166,11 @@ func create(cmd *cobra.Command, args []string) error {
defer errorhandling.SyncQuiet(podIDFD)
}
- createOptions.Net, err = common.NetFlagsToNetOptions(cmd)
+ createOptions.Net, err = common.NetFlagsToNetOptions(cmd, createOptions.Infra)
if err != nil {
return err
}
+
if len(createOptions.Net.PublishPorts) > 0 {
if !createOptions.Infra {
return errors.Errorf("you must have an infra container to publish port bindings to the host")
diff --git a/cmd/podman/pods/exists.go b/cmd/podman/pods/exists.go
index a9ef0370b..c4e64f88e 100644
--- a/cmd/podman/pods/exists.go
+++ b/cmd/podman/pods/exists.go
@@ -5,7 +5,6 @@ import (
"github.com/containers/podman/v3/cmd/podman/common"
"github.com/containers/podman/v3/cmd/podman/registry"
- "github.com/containers/podman/v3/pkg/domain/entities"
"github.com/spf13/cobra"
)
@@ -21,13 +20,11 @@ var (
ValidArgsFunction: common.AutocompletePods,
Example: `podman pod exists podID
podman pod exists mypod || podman pod create --name mypod`,
- DisableFlagsInUseLine: true,
}
)
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: existsCommand,
Parent: podCmd,
})
diff --git a/cmd/podman/pods/inspect.go b/cmd/podman/pods/inspect.go
index c66b81adb..d38f0062c 100644
--- a/cmd/podman/pods/inspect.go
+++ b/cmd/podman/pods/inspect.go
@@ -37,7 +37,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: inspectCmd,
Parent: podCmd,
})
diff --git a/cmd/podman/pods/kill.go b/cmd/podman/pods/kill.go
index 17d5499c8..66693b419 100644
--- a/cmd/podman/pods/kill.go
+++ b/cmd/podman/pods/kill.go
@@ -37,7 +37,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: killCommand,
Parent: podCmd,
})
diff --git a/cmd/podman/pods/pause.go b/cmd/podman/pods/pause.go
index 6624ccbd7..c1cc151e0 100644
--- a/cmd/podman/pods/pause.go
+++ b/cmd/podman/pods/pause.go
@@ -37,7 +37,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: pauseCommand,
Parent: podCmd,
})
diff --git a/cmd/podman/pods/pod.go b/cmd/podman/pods/pod.go
index 00fb02936..e6e23fd1f 100644
--- a/cmd/podman/pods/pod.go
+++ b/cmd/podman/pods/pod.go
@@ -3,7 +3,6 @@ package pods
import (
"github.com/containers/podman/v3/cmd/podman/registry"
"github.com/containers/podman/v3/cmd/podman/validate"
- "github.com/containers/podman/v3/pkg/domain/entities"
"github.com/containers/podman/v3/pkg/util"
"github.com/spf13/cobra"
)
@@ -24,7 +23,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: podCmd,
})
}
diff --git a/cmd/podman/pods/prune.go b/cmd/podman/pods/prune.go
index 51b6bb4a9..8032c9f12 100644
--- a/cmd/podman/pods/prune.go
+++ b/cmd/podman/pods/prune.go
@@ -35,7 +35,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: pruneCommand,
Parent: podCmd,
})
diff --git a/cmd/podman/pods/ps.go b/cmd/podman/pods/ps.go
index beaeda871..0271930e8 100644
--- a/cmd/podman/pods/ps.go
+++ b/cmd/podman/pods/ps.go
@@ -45,7 +45,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: psCmd,
Parent: podCmd,
})
diff --git a/cmd/podman/pods/restart.go b/cmd/podman/pods/restart.go
index 5b9f47a5b..5b7ea121d 100644
--- a/cmd/podman/pods/restart.go
+++ b/cmd/podman/pods/restart.go
@@ -37,7 +37,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: restartCommand,
Parent: podCmd,
})
diff --git a/cmd/podman/pods/rm.go b/cmd/podman/pods/rm.go
index 333e4eb9f..fbaf64c1f 100644
--- a/cmd/podman/pods/rm.go
+++ b/cmd/podman/pods/rm.go
@@ -45,7 +45,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: rmCommand,
Parent: podCmd,
})
diff --git a/cmd/podman/pods/start.go b/cmd/podman/pods/start.go
index f6dca854e..e39891a9b 100644
--- a/cmd/podman/pods/start.go
+++ b/cmd/podman/pods/start.go
@@ -45,7 +45,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: startCommand,
Parent: podCmd,
})
diff --git a/cmd/podman/pods/stats.go b/cmd/podman/pods/stats.go
index 97147275e..057b3dead 100644
--- a/cmd/podman/pods/stats.go
+++ b/cmd/podman/pods/stats.go
@@ -48,7 +48,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: statsCmd,
Parent: podCmd,
})
diff --git a/cmd/podman/pods/stop.go b/cmd/podman/pods/stop.go
index a9a16b39c..bcc054b8e 100644
--- a/cmd/podman/pods/stop.go
+++ b/cmd/podman/pods/stop.go
@@ -46,7 +46,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: stopCommand,
Parent: podCmd,
})
diff --git a/cmd/podman/pods/top.go b/cmd/podman/pods/top.go
index be8aab761..e26e281c8 100644
--- a/cmd/podman/pods/top.go
+++ b/cmd/podman/pods/top.go
@@ -39,7 +39,6 @@ podman pod top podID -eo user,pid,comm`,
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: topCommand,
Parent: podCmd,
})
diff --git a/cmd/podman/pods/unpause.go b/cmd/podman/pods/unpause.go
index 859ac8ac2..ef1aea7c8 100644
--- a/cmd/podman/pods/unpause.go
+++ b/cmd/podman/pods/unpause.go
@@ -39,7 +39,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: unpauseCommand,
Parent: podCmd,
})
diff --git a/cmd/podman/registry/config.go b/cmd/podman/registry/config.go
index 3ce6e3899..25139a3de 100644
--- a/cmd/podman/registry/config.go
+++ b/cmd/podman/registry/config.go
@@ -15,14 +15,25 @@ import (
)
const (
- ParentNSRequired = "ParentNSRequired"
+ // ParentNSRequired used as cobra.Annotation when command requires root access
+ ParentNSRequired = "ParentNSRequired"
+
+ // UnshareNSRequired used as cobra.Annotation when command requires modified user namespace
UnshareNSRequired = "UnshareNSRequired"
+
+ // EngineMode used as cobra.Annotation when command supports a limited number of Engines
+ EngineMode = "EngineMode"
)
var (
podmanOptions entities.PodmanConfig
podmanSync sync.Once
abiSupport = false
+
+ // ABIMode used in cobra.Annotations registry.EngineMode when command only supports ABIMode
+ ABIMode = entities.ABIMode.String()
+ // TunnelMode used in in cobra.Annotations registry.EngineMode when command only supports TunnelMode
+ TunnelMode = entities.TunnelMode.String()
)
// PodmanConfig returns an entities.PodmanConfig built up from
diff --git a/cmd/podman/registry/registry.go b/cmd/podman/registry/registry.go
index 0b1d9293a..607ef6d8e 100644
--- a/cmd/podman/registry/registry.go
+++ b/cmd/podman/registry/registry.go
@@ -19,7 +19,6 @@ const DefaultRootAPIPath = "/run/podman/podman.sock"
const DefaultRootAPIAddress = "unix:" + DefaultRootAPIPath
type CliCommand struct {
- Mode []entities.EngineMode
Command *cobra.Command
Parent *cobra.Command
}
diff --git a/cmd/podman/secrets/create.go b/cmd/podman/secrets/create.go
index 4204f30b4..f5739e99a 100644
--- a/cmd/podman/secrets/create.go
+++ b/cmd/podman/secrets/create.go
@@ -35,7 +35,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: createCmd,
Parent: secretCmd,
})
diff --git a/cmd/podman/secrets/inspect.go b/cmd/podman/secrets/inspect.go
index bcb1adb5e..874456f29 100644
--- a/cmd/podman/secrets/inspect.go
+++ b/cmd/podman/secrets/inspect.go
@@ -33,7 +33,6 @@ var format string
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: inspectCmd,
Parent: secretCmd,
})
diff --git a/cmd/podman/secrets/list.go b/cmd/podman/secrets/list.go
index ba7065d61..448a69994 100644
--- a/cmd/podman/secrets/list.go
+++ b/cmd/podman/secrets/list.go
@@ -39,7 +39,6 @@ type listFlagType struct {
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: lsCmd,
Parent: secretCmd,
})
diff --git a/cmd/podman/secrets/rm.go b/cmd/podman/secrets/rm.go
index ade015b10..dd3f5d9b9 100644
--- a/cmd/podman/secrets/rm.go
+++ b/cmd/podman/secrets/rm.go
@@ -24,7 +24,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: rmCmd,
Parent: secretCmd,
})
diff --git a/cmd/podman/secrets/secret.go b/cmd/podman/secrets/secret.go
index 139997b87..610b331d1 100644
--- a/cmd/podman/secrets/secret.go
+++ b/cmd/podman/secrets/secret.go
@@ -3,7 +3,6 @@ package secrets
import (
"github.com/containers/podman/v3/cmd/podman/registry"
"github.com/containers/podman/v3/cmd/podman/validate"
- "github.com/containers/podman/v3/pkg/domain/entities"
"github.com/spf13/cobra"
)
@@ -19,7 +18,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: secretCmd,
})
}
diff --git a/cmd/podman/system/connection.go b/cmd/podman/system/connection.go
index dbdda8cf9..1f8e8f13f 100644
--- a/cmd/podman/system/connection.go
+++ b/cmd/podman/system/connection.go
@@ -3,7 +3,6 @@ package system
import (
"github.com/containers/podman/v3/cmd/podman/registry"
"github.com/containers/podman/v3/cmd/podman/validate"
- "github.com/containers/podman/v3/pkg/domain/entities"
"github.com/spf13/cobra"
)
@@ -14,20 +13,18 @@ var (
}
ConnectionCmd = &cobra.Command{
- Use: "connection",
- Short: "Manage remote ssh destinations",
- Long: `Manage ssh destination information in podman configuration`,
- DisableFlagsInUseLine: true,
- PersistentPreRunE: noOp,
- RunE: validate.SubCommandExists,
- PersistentPostRunE: noOp,
- TraverseChildren: false,
+ Use: "connection",
+ Short: "Manage remote ssh destinations",
+ Long: `Manage ssh destination information in podman configuration`,
+ PersistentPreRunE: noOp,
+ RunE: validate.SubCommandExists,
+ PersistentPostRunE: noOp,
+ TraverseChildren: false,
}
)
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: ConnectionCmd,
Parent: systemCmd,
})
diff --git a/cmd/podman/system/connection/add.go b/cmd/podman/system/connection/add.go
index 89d28849c..ecfeb6608 100644
--- a/cmd/podman/system/connection/add.go
+++ b/cmd/podman/system/connection/add.go
@@ -15,7 +15,6 @@ import (
"github.com/containers/podman/v3/cmd/podman/registry"
"github.com/containers/podman/v3/cmd/podman/system"
"github.com/containers/podman/v3/libpod/define"
- "github.com/containers/podman/v3/pkg/domain/entities"
"github.com/containers/podman/v3/pkg/terminal"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -53,7 +52,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: addCmd,
Parent: system.ConnectionCmd,
})
diff --git a/cmd/podman/system/connection/default.go b/cmd/podman/system/connection/default.go
index 073bdbc3f..cfedc337b 100644
--- a/cmd/podman/system/connection/default.go
+++ b/cmd/podman/system/connection/default.go
@@ -7,27 +7,24 @@ import (
"github.com/containers/podman/v3/cmd/podman/common"
"github.com/containers/podman/v3/cmd/podman/registry"
"github.com/containers/podman/v3/cmd/podman/system"
- "github.com/containers/podman/v3/pkg/domain/entities"
"github.com/spf13/cobra"
)
var (
// Skip creating engines since this command will obtain connection information to said engines
dfltCmd = &cobra.Command{
- Use: "default NAME",
- Args: cobra.ExactArgs(1),
- Short: "Set named destination as default",
- Long: `Set named destination as default for the Podman service`,
- DisableFlagsInUseLine: true,
- ValidArgsFunction: common.AutocompleteSystemConnections,
- RunE: defaultRunE,
- Example: `podman system connection default testing`,
+ Use: "default NAME",
+ Args: cobra.ExactArgs(1),
+ Short: "Set named destination as default",
+ Long: `Set named destination as default for the Podman service`,
+ ValidArgsFunction: common.AutocompleteSystemConnections,
+ RunE: defaultRunE,
+ Example: `podman system connection default testing`,
}
)
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: dfltCmd,
Parent: system.ConnectionCmd,
})
diff --git a/cmd/podman/system/connection/list.go b/cmd/podman/system/connection/list.go
index fe7026ae3..ae88b0b30 100644
--- a/cmd/podman/system/connection/list.go
+++ b/cmd/podman/system/connection/list.go
@@ -10,18 +10,16 @@ import (
"github.com/containers/podman/v3/cmd/podman/registry"
"github.com/containers/podman/v3/cmd/podman/system"
"github.com/containers/podman/v3/cmd/podman/validate"
- "github.com/containers/podman/v3/pkg/domain/entities"
"github.com/spf13/cobra"
)
var (
listCmd = &cobra.Command{
- Use: "list",
- Aliases: []string{"ls"},
- Args: validate.NoArgs,
- Short: "List destination for the Podman service(s)",
- Long: `List destination information for the Podman service(s) in podman configuration`,
- DisableFlagsInUseLine: true,
+ Use: "list",
+ Aliases: []string{"ls"},
+ Args: validate.NoArgs,
+ Short: "List destination for the Podman service(s)",
+ Long: `List destination information for the Podman service(s) in podman configuration`,
Example: `podman system connection list
podman system connection ls`,
ValidArgsFunction: completion.AutocompleteNone,
@@ -32,7 +30,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: listCmd,
Parent: system.ConnectionCmd,
})
diff --git a/cmd/podman/system/connection/remove.go b/cmd/podman/system/connection/remove.go
index 4acaa36a9..73bae4994 100644
--- a/cmd/podman/system/connection/remove.go
+++ b/cmd/podman/system/connection/remove.go
@@ -5,21 +5,19 @@ import (
"github.com/containers/podman/v3/cmd/podman/common"
"github.com/containers/podman/v3/cmd/podman/registry"
"github.com/containers/podman/v3/cmd/podman/system"
- "github.com/containers/podman/v3/pkg/domain/entities"
"github.com/spf13/cobra"
)
var (
// Skip creating engines since this command will obtain connection information to said engines
rmCmd = &cobra.Command{
- Use: "remove NAME",
- Args: cobra.ExactArgs(1),
- Aliases: []string{"rm"},
- Long: `Delete named destination from podman configuration`,
- Short: "Delete named destination",
- DisableFlagsInUseLine: true,
- ValidArgsFunction: common.AutocompleteSystemConnections,
- RunE: rm,
+ Use: "remove NAME",
+ Args: cobra.ExactArgs(1),
+ Aliases: []string{"rm"},
+ Long: `Delete named destination from podman configuration`,
+ Short: "Delete named destination",
+ ValidArgsFunction: common.AutocompleteSystemConnections,
+ RunE: rm,
Example: `podman system connection remove devl
podman system connection rm devl`,
}
@@ -27,7 +25,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: rmCmd,
Parent: system.ConnectionCmd,
})
diff --git a/cmd/podman/system/connection/rename.go b/cmd/podman/system/connection/rename.go
index 7713c2b09..898457d38 100644
--- a/cmd/podman/system/connection/rename.go
+++ b/cmd/podman/system/connection/rename.go
@@ -7,21 +7,19 @@ import (
"github.com/containers/podman/v3/cmd/podman/common"
"github.com/containers/podman/v3/cmd/podman/registry"
"github.com/containers/podman/v3/cmd/podman/system"
- "github.com/containers/podman/v3/pkg/domain/entities"
"github.com/spf13/cobra"
)
var (
// Skip creating engines since this command will obtain connection information to said engines
renameCmd = &cobra.Command{
- Use: "rename OLD NEW",
- Aliases: []string{"mv"},
- Args: cobra.ExactArgs(2),
- Short: "Rename \"old\" to \"new\"",
- Long: `Rename destination for the Podman service from "old" to "new"`,
- DisableFlagsInUseLine: true,
- ValidArgsFunction: common.AutocompleteSystemConnections,
- RunE: rename,
+ Use: "rename OLD NEW",
+ Aliases: []string{"mv"},
+ Args: cobra.ExactArgs(2),
+ Short: "Rename \"old\" to \"new\"",
+ Long: `Rename destination for the Podman service from "old" to "new"`,
+ ValidArgsFunction: common.AutocompleteSystemConnections,
+ RunE: rename,
Example: `podman system connection rename laptop devl,
podman system connection mv laptop devl`,
}
@@ -29,7 +27,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: renameCmd,
Parent: system.ConnectionCmd,
})
diff --git a/cmd/podman/system/df.go b/cmd/podman/system/df.go
index 5e179a82d..de56c57d0 100644
--- a/cmd/podman/system/df.go
+++ b/cmd/podman/system/df.go
@@ -40,7 +40,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: dfSystemCommand,
Parent: systemCmd,
})
diff --git a/cmd/podman/system/events.go b/cmd/podman/system/events.go
index 568610bdc..4aa413ec6 100644
--- a/cmd/podman/system/events.go
+++ b/cmd/podman/system/events.go
@@ -41,7 +41,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: eventsCommand,
})
flags := eventsCommand.Flags()
diff --git a/cmd/podman/system/info.go b/cmd/podman/system/info.go
index 44be4ccec..1dc90f79a 100644
--- a/cmd/podman/system/info.go
+++ b/cmd/podman/system/info.go
@@ -11,7 +11,6 @@ import (
"github.com/containers/podman/v3/cmd/podman/registry"
"github.com/containers/podman/v3/cmd/podman/validate"
"github.com/containers/podman/v3/libpod/define"
- "github.com/containers/podman/v3/pkg/domain/entities"
"github.com/ghodss/yaml"
"github.com/spf13/cobra"
)
@@ -49,13 +48,11 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: infoCommand,
})
infoFlags(infoCommand)
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: systemInfoCommand,
Parent: systemCmd,
})
diff --git a/cmd/podman/system/migrate.go b/cmd/podman/system/migrate.go
index 892d60a38..9940cd063 100644
--- a/cmd/podman/system/migrate.go
+++ b/cmd/podman/system/migrate.go
@@ -22,6 +22,7 @@ var (
`
migrateCommand = &cobra.Command{
+ Annotations: map[string]string{registry.EngineMode: registry.ABIMode},
Use: "migrate [options]",
Args: validate.NoArgs,
Short: "Migrate containers",
@@ -37,7 +38,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode},
Command: migrateCommand,
Parent: systemCmd,
})
diff --git a/cmd/podman/system/prune.go b/cmd/podman/system/prune.go
index 0f1285564..e09e2d5e5 100644
--- a/cmd/podman/system/prune.go
+++ b/cmd/podman/system/prune.go
@@ -41,7 +41,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: pruneCommand,
Parent: systemCmd,
})
diff --git a/cmd/podman/system/renumber.go b/cmd/podman/system/renumber.go
index 91c49333a..83a873c2a 100644
--- a/cmd/podman/system/renumber.go
+++ b/cmd/podman/system/renumber.go
@@ -23,19 +23,18 @@ var (
`
renumberCommand = &cobra.Command{
- Use: "renumber",
- Args: validate.NoArgs,
- DisableFlagsInUseLine: true,
- Short: "Migrate lock numbers",
- Long: renumberDescription,
- Run: renumber,
- ValidArgsFunction: completion.AutocompleteNone,
+ Annotations: map[string]string{registry.EngineMode: registry.ABIMode},
+ Use: "renumber",
+ Args: validate.NoArgs,
+ Short: "Migrate lock numbers",
+ Long: renumberDescription,
+ Run: renumber,
+ ValidArgsFunction: completion.AutocompleteNone,
}
)
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode},
Command: renumberCommand,
Parent: systemCmd,
})
diff --git a/cmd/podman/system/reset.go b/cmd/podman/system/reset.go
index 6f2f873dd..0edb36889 100644
--- a/cmd/podman/system/reset.go
+++ b/cmd/podman/system/reset.go
@@ -23,6 +23,7 @@ var (
All containers will be stopped and removed, and all images, volumes and container content will be removed.
`
systemResetCommand = &cobra.Command{
+ Annotations: map[string]string{registry.EngineMode: registry.ABIMode},
Use: "reset [options]",
Args: validate.NoArgs,
Short: "Reset podman storage",
@@ -36,7 +37,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode},
Command: systemResetCommand,
Parent: systemCmd,
})
diff --git a/cmd/podman/system/service.go b/cmd/podman/system/service.go
index 1e1cdbd12..a30f43839 100644
--- a/cmd/podman/system/service.go
+++ b/cmd/podman/system/service.go
@@ -28,6 +28,7 @@ Enable a listening service for API access to Podman commands.
`
srvCmd = &cobra.Command{
+ Annotations: map[string]string{registry.EngineMode: registry.ABIMode},
Use: "service [options] [URI]",
Args: cobra.MaximumNArgs(1),
Short: "Run API service",
@@ -38,13 +39,13 @@ Enable a listening service for API access to Podman commands.
}
srvArgs = struct {
- Timeout int64
+ Timeout int64
+ CorsHeaders string
}{}
)
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode},
Command: srvCmd,
Parent: systemCmd,
})
@@ -54,6 +55,8 @@ func init() {
timeFlagName := "time"
flags.Int64VarP(&srvArgs.Timeout, timeFlagName, "t", 5, "Time until the service session expires in seconds. Use 0 to disable the timeout")
_ = srvCmd.RegisterFlagCompletionFunc(timeFlagName, completion.AutocompleteNone)
+ flags.StringVarP(&srvArgs.CorsHeaders, "cors", "", "", "Set CORS Headers")
+ _ = srvCmd.RegisterFlagCompletionFunc("cors", completion.AutocompleteNone)
flags.SetNormalizeFunc(aliasTimeoutFlag)
}
@@ -71,7 +74,6 @@ func service(cmd *cobra.Command, args []string) error {
return err
}
logrus.Infof("using API endpoint: '%s'", apiURI)
-
// Clean up any old existing unix domain socket
if len(apiURI) > 0 {
uri, err := url.Parse(apiURI)
@@ -90,8 +92,9 @@ func service(cmd *cobra.Command, args []string) error {
}
opts := entities.ServiceOptions{
- URI: apiURI,
- Command: cmd,
+ URI: apiURI,
+ Command: cmd,
+ CorsHeaders: srvArgs.CorsHeaders,
}
opts.Timeout = time.Duration(srvArgs.Timeout) * time.Second
diff --git a/cmd/podman/system/service_abi.go b/cmd/podman/system/service_abi.go
index 364663323..d59a45564 100644
--- a/cmd/podman/system/service_abi.go
+++ b/cmd/podman/system/service_abi.go
@@ -72,7 +72,7 @@ func restService(opts entities.ServiceOptions, flags *pflag.FlagSet, cfg *entiti
}
infra.StartWatcher(rt)
- server, err := api.NewServerWithSettings(rt, opts.Timeout, listener)
+ server, err := api.NewServerWithSettings(rt, listener, api.Options{Timeout: opts.Timeout, CorsHeaders: opts.CorsHeaders})
if err != nil {
return err
}
diff --git a/cmd/podman/system/system.go b/cmd/podman/system/system.go
index ab06c3ae0..1947decaf 100644
--- a/cmd/podman/system/system.go
+++ b/cmd/podman/system/system.go
@@ -3,7 +3,6 @@ package system
import (
"github.com/containers/podman/v3/cmd/podman/registry"
"github.com/containers/podman/v3/cmd/podman/validate"
- "github.com/containers/podman/v3/pkg/domain/entities"
"github.com/spf13/cobra"
)
@@ -22,7 +21,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: systemCmd,
})
}
diff --git a/cmd/podman/system/unshare.go b/cmd/podman/system/unshare.go
index c07751532..f930d8b62 100644
--- a/cmd/podman/system/unshare.go
+++ b/cmd/podman/system/unshare.go
@@ -15,12 +15,12 @@ var (
unshareOptions = entities.SystemUnshareOptions{}
unshareDescription = "Runs a command in a modified user namespace."
unshareCommand = &cobra.Command{
- Use: "unshare [options] [COMMAND [ARG...]]",
- DisableFlagsInUseLine: true,
- Short: "Run a command in a modified user namespace",
- Long: unshareDescription,
- RunE: unshare,
- ValidArgsFunction: completion.AutocompleteDefault,
+ Annotations: map[string]string{registry.EngineMode: registry.ABIMode},
+ Use: "unshare [options] [COMMAND [ARG...]]",
+ Short: "Run a command in a modified user namespace",
+ Long: unshareDescription,
+ RunE: unshare,
+ ValidArgsFunction: completion.AutocompleteDefault,
Example: `podman unshare id
podman unshare cat /proc/self/uid_map,
podman unshare podman-script.sh`,
@@ -29,7 +29,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode},
Command: unshareCommand,
})
flags := unshareCommand.Flags()
diff --git a/cmd/podman/system/version.go b/cmd/podman/system/version.go
index ad9fd2a85..5575fc87d 100644
--- a/cmd/podman/system/version.go
+++ b/cmd/podman/system/version.go
@@ -31,7 +31,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: versionCommand,
})
flags := versionCommand.Flags()
diff --git a/cmd/podman/volumes/create.go b/cmd/podman/volumes/create.go
index 1820260a4..70373d69d 100644
--- a/cmd/podman/volumes/create.go
+++ b/cmd/podman/volumes/create.go
@@ -37,7 +37,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: createCommand,
Parent: volumeCmd,
})
diff --git a/cmd/podman/volumes/exists.go b/cmd/podman/volumes/exists.go
index 4dfbb3cd4..85bbcb29a 100644
--- a/cmd/podman/volumes/exists.go
+++ b/cmd/podman/volumes/exists.go
@@ -3,7 +3,6 @@ package volumes
import (
"github.com/containers/podman/v3/cmd/podman/common"
"github.com/containers/podman/v3/cmd/podman/registry"
- "github.com/containers/podman/v3/pkg/domain/entities"
"github.com/spf13/cobra"
)
@@ -22,7 +21,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: volumeExistsCommand,
Parent: volumeCmd,
})
diff --git a/cmd/podman/volumes/inspect.go b/cmd/podman/volumes/inspect.go
index d52eee9f3..7510f8a1c 100644
--- a/cmd/podman/volumes/inspect.go
+++ b/cmd/podman/volumes/inspect.go
@@ -32,7 +32,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: inspectCommand,
Parent: volumeCmd,
})
diff --git a/cmd/podman/volumes/list.go b/cmd/podman/volumes/list.go
index f402afa94..990b39435 100644
--- a/cmd/podman/volumes/list.go
+++ b/cmd/podman/volumes/list.go
@@ -49,7 +49,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: lsCommand,
Parent: volumeCmd,
})
diff --git a/cmd/podman/volumes/prune.go b/cmd/podman/volumes/prune.go
index 8e190b870..1f3cc6913 100644
--- a/cmd/podman/volumes/prune.go
+++ b/cmd/podman/volumes/prune.go
@@ -35,7 +35,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: pruneCommand,
Parent: volumeCmd,
})
diff --git a/cmd/podman/volumes/rm.go b/cmd/podman/volumes/rm.go
index e149e398c..9ba4a30a1 100644
--- a/cmd/podman/volumes/rm.go
+++ b/cmd/podman/volumes/rm.go
@@ -37,7 +37,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: rmCommand,
Parent: volumeCmd,
})
diff --git a/cmd/podman/volumes/volume.go b/cmd/podman/volumes/volume.go
index 7786d5dc3..f42a6d81a 100644
--- a/cmd/podman/volumes/volume.go
+++ b/cmd/podman/volumes/volume.go
@@ -3,7 +3,6 @@ package volumes
import (
"github.com/containers/podman/v3/cmd/podman/registry"
"github.com/containers/podman/v3/cmd/podman/validate"
- "github.com/containers/podman/v3/pkg/domain/entities"
"github.com/spf13/cobra"
)
@@ -22,7 +21,6 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: volumeCmd,
})
}
diff --git a/contrib/spec/podman.spec.in b/contrib/spec/podman.spec.in
index 02b73bdb8..6146a2c0e 100644
--- a/contrib/spec/podman.spec.in
+++ b/contrib/spec/podman.spec.in
@@ -36,7 +36,7 @@ Epoch: 99
%else
Epoch: 0
%endif
-Version: 3.2.0
+Version: 3.3.0
Release: #COMMITDATE#.git%{shortcommit0}%{?dist}
Summary: Manage Pods, Containers and Container Images
License: ASL 2.0
diff --git a/docs/MANPAGE_SYNTAX.md b/docs/MANPAGE_SYNTAX.md
new file mode 100644
index 000000000..c9b677688
--- /dev/null
+++ b/docs/MANPAGE_SYNTAX.md
@@ -0,0 +1,119 @@
+% podman-command(1)
+
+## NAME
+podman\-command - short description
+
+## SYNOPSIS
+(Shows the command structure.)
+
+**podman command** [*optional*] *mandatory value*
+
+**podman subcommand command** [*optional*] *mandatory value*
+
+(If there is the possibility to chose between two or more mandatory command values. There should also always be a space before and after a vertical bar to ensure better readability.)
+
+**podman command** [*optional*] *value1* | *value2*
+
+**podman subcommand command** [*optional*] *value1* | *value2*
+
+(If an optinal value follows a mandatory one.)
+
+**podman command** [*optional*] *value1* | *value2* [*optional*]
+
+**podman subcommand command** [*optional*] *value1* | *value2* [*optional*]
+
+(If the command accepts an infinite number of values.)
+
+**podman command** [*optional*] *value* [*value* ...]
+
+**podman subcommand command** [*optional*] *value* [*value* ...]
+
+## DESCRIPTION
+**podman command** is always the beginning of the DESCRIPTION section. Putting the command as the first part of the DESCRIPTION ensures uniformity. All commands mentioned in the text retain their appearance and form.\
+Example sentence: The command **podman command** is an example command.
+
+Commands or files that are quoted from other podman manpages or podman repositories have to be linked to those. Non-podman commands are not to be linked.\
+Example sentence: You can use **[podman-run](podman-run.1.md)** or **[containers.conf(5)](https://github.com/containers/common/blob/master/docs/containers.conf.5.md)** for the problem.
+
+It should also be specified if the command can only be run as root. In addition, it should be described when a command, OPTION, or other content cannot be executed with the remote client or in combination with other commands, OPTIONS, or content. In this case, the following sentence is put at the end of a command, OPTION, or content: *IMPORTANT: This option/command/other is not available with the command/OPTION/content/remote Podman client*. For a command, this should be done in the DESCRIPTION section. For the OPTIONS, it should be done in the DESCRIPTION of the specified OPTION. Do not use pronouns in the man pages, especially the word `you`.
+
+## OPTIONS
+All flags are referred to as OPTIONS. The term flags should not be used. All OPTIONS are listed in this section. OPTIONS that appear in descriptions of other OPTIONS and sections retain their appearance, for example: **--exit**.
+
+OPTIONS that are quoted from other podman manpages or podman repositories have to be linked to those.\
+Example sentence: You can use **[podman-generate-systemd --new](podman-generate-systemd.1.md#--new)** for the problem.
+
+ Each OPTION should be explained to the fullest extent below the OPTION itself. Each OPTION is behind an H4-header (`####`). If the OPTION has a default argument, it has to be explained in the description of the OPTION. If the OPTION is also not available with the remote client, the sentence about the default argument should the second to last sentence.
+
+
+#### **--version**, **-v**
+
+OPTIONS can be put after the command in two different ways. Eather the long version with **--option** or as the short version **-o**. If there are two ways to write an OPTION they are separated by a comma. If there are two versions of one command the long version is always shown in front.\
+Example: The default is **false**. *IMPORTANT: This option is not available with the remote Podman client*.
+
+#### **--exit**
+
+An example of an OPTION that has only one possible structure. Thus, it cannot be executed by the extension **-e**.
+
+#### **--answer**=, **-a**=**active** | *disable*
+
+The "answer" option above is an example of an OPTION that accepts two possible arguments as inputs. If there is a default argument that is selected when the OPTION is not used in the command, it is shown in **bold**. If the OPTION is used it must include an argument afterwards. It must always be ensured that the standard argument is in the first position after the OPTION. In this example, there are two different ways to execute the command. Both possible OPTIONS have to be shown with the arguments following them. The default value is shown as **active**.
+
+#### **--status**=**good** | *better* | *best*
+
+This is an example of three arguments following an OPTION. If the number of arguments is greater than three, the arguments are **not** listed after the equal sign. The arguments have to be shown in a table like in **--test**=**_test_**, regardless of the number of arguments. The default value is shown as **good**.
+
+#### **--test**=**test**
+
+OPTIONS that are followed by an equal sign include an argument after the equal sign in **bold**. If there is a default argument, that is used if the OPTION is not specified in the command, the argument after the eqaul sign is displayed in **bold**. All arguments must be listed and explained in the text below the OPTION.
+
+| Argument | Description |
+| ------------------ | --------------------------------------------------------------------------- |
+| **example one** | This argument is the default argument if the OPTION is not specified. |
+| *example two* | If one refers to a command, one should use **bold** marks. |
+| *example three* | Example: In combination with **podman command** highly effective. |
+| *example four* | Example: Can be combined with **--exit**. |
+| *example five* | The fifth description |
+
+The table shows an example for a listing of arguments. The contents in the table should be aligned left. If the content in the table conflicts with this, it can be aligned in a way that supports the understanding of the content. If there is a default argument, it **must** listed as the first entry in the table. The default value is shown as **example one**.
+
+
+If the number of arguments is smaller than four they have to be listed behind the OPTION as seen in the OPTION **--status**.
+
+#### **--problem**=*problem*
+
+OPTIONS that are followed by an equal sign that is then followed by an unspecified argument, have no default argument. If this OPTION is written with an equal sign and the argument is left empty, there will be no error, but the OPTION will be ignored. The meaning of the argument is described preferably in `one` word after the equal sign in *italic* format.
+
+## SUBCHAPTER
+For chapters that are made specifically as an individual SUBCHAPTER in a man page, the previous conditions regarding formatting apply.
+
+There are no restrictions for the use of paragraphs and tables. Within these paragraphs and tables the previous conditions regarding formatting apply.
+
+Strings of characters or numbers can be highlighted with `backticks`. Paths of any kind **must** be highlighted.
+
+IMPORTANT: Only characters that are **not** part of categories mentioned before can be highlighted. This includes headers. For example it is not advised to highlight an OPTION or a **command**.
+
+SUBHEADINGS are displayed as follows:
+### SUBHEADING
+Text for SUBHEADINGS.
+
+## EXAMPLES
+All EXAMPLES are listed in this section. This section should be at the end of each man page. Each EXAMPLE is always in one box. The box starts and ends with the last written line, **not** with a blank line. The `$` in front of the commands indicates that it can be run as a normal user, while the commands starting with `#` can only be run as root. If there is the need for a comment in a box the comment should have `###` in front of it.
+
+Description of the EXAMPLE
+```
+$ podman command
+
+$ podman command -o
+
+$ cat $HOME/Dockerfile | podman command --option
+```
+
+Description of the EXAMPLE two
+```
+$ podman command --redhat
+
+$ podman command --redhat better
+
+$ podman command --redhat=better
+```
diff --git a/docs/README.md b/docs/README.md
index a00b8f39c..0f2af16d6 100644
--- a/docs/README.md
+++ b/docs/README.md
@@ -28,6 +28,10 @@ link on that page.
| docs/links-to-html.lua | pandoc filter to do aliases for html files |
| docs/use-pagetitle.lua | pandoc filter to set html document title |
+## Manpage Syntax
+
+The syntax for the formatting of all man pages can be found [here](MANPAGE_SYNTAX.md).
+
## API Reference
The [latest online documentation](http://docs.podman.io/en/latest/_static/api.html) is
diff --git a/docs/source/Commands.rst b/docs/source/Commands.rst
index 766b6a02e..767b09c08 100644
--- a/docs/source/Commands.rst
+++ b/docs/source/Commands.rst
@@ -55,7 +55,7 @@ Commands
:doc:`logs <markdown/podman-logs.1>` Fetch the logs of a container
-:doc:`machine <markdown/podman-machine.1>` Manage podman's virtual machine
+:doc:`machine <machine>` Manage podman's virtual machine
:doc:`manifest <manifest>` Create and manipulate manifest lists and image indexes
@@ -91,7 +91,7 @@ Commands
:doc:`search <markdown/podman-search.1>` Search registry for image
-:doc:`secret <markdown/podman-secret.1>` Manage podman secrets
+:doc:`secret <secret>` Manage podman secrets
:doc:`start <markdown/podman-start.1>` Start one or more containers
diff --git a/docs/source/machine.rst b/docs/source/machine.rst
index 16c042173..3962fca99 100644
--- a/docs/source/machine.rst
+++ b/docs/source/machine.rst
@@ -3,8 +3,13 @@ Machine
:doc:`init <markdown/podman-machine-init.1>` Initialize a new virtual machine
+
:doc:`list <markdown/podman-machine-list.1>` List virtual machines
+
:doc:`rm <markdown/podman-machine-rm.1>` Remove a virtual machine
+
:doc:`ssh <markdown/podman-machine-ssh.1>` SSH into a virtual machine
+
:doc:`start <markdown/podman-machine-start.1>` Start a virtual machine
+
:doc:`stop <markdown/podman-machine-stop.1>` Stop a virtual machine
diff --git a/docs/source/markdown/containers-mounts.conf.5.md b/docs/source/markdown/containers-mounts.conf.5.md
deleted file mode 100644
index 74492c831..000000000
--- a/docs/source/markdown/containers-mounts.conf.5.md
+++ /dev/null
@@ -1,16 +0,0 @@
-% containers-mounts.conf(5)
-
-## NAME
-containers-mounts.conf - configuration file for default mounts in containers
-
-## DESCRIPTION
-The mounts.conf file specifies volume mount directories that are automatically mounted inside containers. Container processes can then use this content. Usually these directories are used for passing secrets or credentials required by the package software to access remote package repositories. Note that for security reasons, tools adhering to the mounts.conf are expected to copy the contents instead of bind mounting the paths from the host.
-
-## FORMAT
-The format of the mounts.conf is the volume format `/SRC:/DEST`, one mount per line. For example, a mounts.conf with the line `/usr/share/secrets:/run/secrets` would cause the contents of the `/usr/share/secrets` directory on the host to be mounted on the `/run/secrets` directory inside the container. Setting mountpoints allows containers to use the files of the host, for instance, to use the host's subscription to some enterprise Linux distribution.
-
-## FILES
-Some distributions may provide a `/usr/share/containers/mounts.conf` file to provide default mounts, but users can create a `/etc/containers/mounts.conf`, to specify their own special volumes to mount in the container. When Podman runs in rootless mode, the file `$HOME/.config/containers/mounts.conf` will override the default if it exists.
-
-## HISTORY
-Aug 2018, Originally compiled by Valentin Rothberg <vrothberg@suse.com>
diff --git a/docs/source/markdown/podman-attach.1.md b/docs/source/markdown/podman-attach.1.md
index c4a5eec50..092772916 100644
--- a/docs/source/markdown/podman-attach.1.md
+++ b/docs/source/markdown/podman-attach.1.md
@@ -9,48 +9,48 @@ podman\-attach - Attach to a running container
**podman container attach** [*options*] *container*
## DESCRIPTION
-The attach command allows you to attach to a running container using the container's ID
-or name, either to view its ongoing output or to control it interactively.
-
-You can detach from the container (and leave it running) using a configurable key sequence. The default
-sequence is `ctrl-p,ctrl-q`.
-Configure the keys sequence using the **--detach-keys** option, or specifying
-it in the **containers.conf** file: see **containers.conf(5)** for more information.
+**podman attach** attaches to a running *container* using the *container's name* or *ID*, to either view its ongoing output or to control it interactively.\
+The *container* can detached from (and leave it running) using a configurable key sequence. The default sequence is `ctrl-p,ctrl-q`. Configure the keys sequence using the **--detach-keys** option, or specifying it in the `containers.conf` file: see **[containers.conf(5)](https://github.com/containers/common/blob/master/docs/containers.conf.5.md)** for more information.
## OPTIONS
-#### **--detach-keys**=*sequence*
+#### **--detach-keys**=**sequence**
-Specify the key sequence for detaching a container. Format is a single character `[a-Z]` or one or more `ctrl-<value>` characters where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. Specifying "" will disable this feature. The default is *ctrl-p,ctrl-q*.
+Specify the key **sequence** for detaching a *container*. Format is a single character `[a-Z]` or one or more `ctrl-<value>` characters where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. Specifying "" will disable this feature. The default is `ctrl-p,ctrl-q`.
#### **--latest**, **-l**
-Instead of providing the container name or ID, use the last created container. If you use methods other than Podman
-to run containers such as CRI-O, the last started container could be from either of those methods. (This option is not available with the remote Podman client)
+Instead of providing the *container name* or *ID*, use the last created *container*. If you use methods other than Podman to run containers such as CRI-O, the last started *container* could be from either of those methods. The default is **false**.\
+*IMPORTANT: This option is not available with the remote Podman client*
#### **--no-stdin**
-Do not attach STDIN. The default is false.
+Do not attach STDIN. The default is **false**.
-#### **--sig-proxy**=*true*|*false*
+#### **--sig-proxy**
-Proxy received signals to the process (non-TTY mode only). SIGCHLD, SIGSTOP, and SIGKILL are not proxied. The default is *true*.
+Proxy received signals to the process (non-TTY mode only). SIGCHLD, SIGSTOP, and SIGKILL are not proxied. The default is **true**.
## EXAMPLES
+Attach to a container called "foobar".
```
$ podman attach foobar
-[root@localhost /]#
```
+
+Attach to the latest created container.
```
$ podman attach --latest
-[root@localhost /]#
```
+
+Attach to a container that start with the ID "1234".
```
$ podman attach 1234
-[root@localhost /]#
```
+
+Attach to a container without attaching STDIN.
```
$ podman attach --no-stdin foobar
```
+
## SEE ALSO
-podman(1), podman-exec(1), podman-run(1), containers.conf(5)
+**[podman(1)](podman.1.md)**, **[podman-exec(1)](podman-exec.1.md)**, **[podman-run(1)](podman-run.1.md)**, **[containers.conf(5)](https://github.com/containers/common/blob/master/docs/containers.conf.5.md)**
diff --git a/docs/source/markdown/podman-auto-update.1.md b/docs/source/markdown/podman-auto-update.1.md
index 1bb5d4bbe..52a9a3fec 100644
--- a/docs/source/markdown/podman-auto-update.1.md
+++ b/docs/source/markdown/podman-auto-update.1.md
@@ -1,16 +1,16 @@
% podman-auto-update(1)
## NAME
-podman-auto-update - Auto update containers according to their auto-update policy
+podman\-auto-update - Auto update containers according to their auto-update policy
## SYNOPSIS
**podman auto-update** [*options*]
## DESCRIPTION
-`podman auto-update` looks up containers with a specified "io.containers.autoupdate" label (i.e., the auto-update policy).
+**podman auto-update** looks up containers with a specified `io.containers.autoupdate` label (i.e., the auto-update policy).
-If the label is present and set to "registry", Podman reaches out to the corresponding registry to check if the image has been updated.
-The label "image" is an alternative to "registry" maintained for backwards compatibility.
+If the label is present and set to `registry`, Podman reaches out to the corresponding registry to check if the image has been updated.
+The label `image` is an alternative to `registry` maintained for backwards compatibility.
An image is considered updated if the digest in the local storage is different than the one of the remote image.
If an image must be updated, Podman pulls it down and restarts the systemd unit executing the container.
@@ -18,60 +18,57 @@ The registry policy requires a fully-qualified image reference (e.g., quay.io/po
This enforcement is necessary to know which image to actually check and pull.
If an image ID was used, Podman would not know which image to check/pull anymore.
-Alternatively, if the autoupdate label is set to "local", Podman will compare the image a container is using to the image with it's raw name in local storage.
+Alternatively, if the autoupdate label is set to `local`, Podman will compare the image a container is using to the image with its raw name in local storage.
If an image is updated locally, Podman simply restarts the systemd unit executing the container.
-If "io.containers.autoupdate.authfile" label is present, Podman reaches out to corresponding authfile when pulling images.
+If `io.containers.autoupdate.authfile` label is present, Podman reaches out to the corresponding authfile when pulling images.
-At container-creation time, Podman looks up the "PODMAN_SYSTEMD_UNIT" environment variables and stores it verbatim in the container's label.
-This variable is now set by all systemd units generated by `podman-generate-systemd` and is set to `%n` (i.e., the name of systemd unit starting the container).
+At container-creation time, Podman looks up the `PODMAN_SYSTEMD_UNIT` environment variable and stores it verbatim in the container's label.
+This variable is now set by all systemd units generated by **[podman-generate-systemd](podman-generate-systemd.1.md)** and is set to `%n` (i.e., the name of systemd unit starting the container).
This data is then being used in the auto-update sequence to instruct systemd (via DBUS) to restart the unit and hence to restart the container.
-Note that`podman auto-update` relies on systemd. The systemd units are expected to be generated with `podman-generate-systemd --new`, or similar units that create new containers in order to run the updated images.
+Note that **podman auto-update** relies on systemd. The systemd units are expected to be generated with **[podman-generate-systemd --new](podman-generate-systemd.1.md#--new)**, or similar units that create new containers in order to run the updated images.
Systemd units that start and stop a container cannot run a new image.
-
### Systemd Unit and Timer
-Podman ships with a `podman-auto-update.service` systemd unit. This unit is triggered daily at midnight by the `podman-auto-update.timer` systemd timer. The timer can be altered for custom time-based updates if desired. The unit can further be invoked by other systemd units (e.g., via the dependency tree) or manually via `systemctl start podman-auto-update.service`.
-
+Podman ships with a `podman-auto-update.service` systemd unit. This unit is triggered daily at midnight by the `podman-auto-update.timer` systemd timer. The timer can be altered for custom time-based updates if desired. The unit can further be invoked by other systemd units (e.g., via the dependency tree) or manually via **systemctl start podman-auto-update.service**.
## OPTIONS
#### **--authfile**=*path*
-Path of the authentication file. Default is ${XDG\_RUNTIME\_DIR}/containers/auth.json, which is set using `podman login`.
-If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
+Path of the authentication file. Default is `${XDG_RUNTIME_DIR}/containers/auth.json`, which is set using **[podman login](podman-login.1.md)**.
+If the authorization state is not found there, `$HOME/.docker/config.json` is checked, which is set using **docker login**.
-Note: You can also override the default path of the authentication file by setting the REGISTRY\_AUTH\_FILE
-environment variable. `export REGISTRY_AUTH_FILE=path`
+Note: There is also the option to override the default path of the authentication file by setting the `REGISTRY_AUTH_FILE` environment variable. This can be done with **export REGISTRY_AUTH_FILE=_path_**.
## EXAMPLES
Autoupdate with registry policy
```
-# Start a container
+### Start a container
$ podman run --label "io.containers.autoupdate=registry" \
--label "io.containers.autoupdate.authfile=/some/authfile.json" \
-d busybox:latest top
bc219740a210455fa27deacc96d50a9e20516492f1417507c13ce1533dbdcd9d
-# Generate a systemd unit for this container
+### Generate a systemd unit for this container
$ podman generate systemd --new --files bc219740a210455fa27deacc96d50a9e20516492f1417507c13ce1533dbdcd9d
/home/user/containers/libpod/container-bc219740a210455fa27deacc96d50a9e20516492f1417507c13ce1533dbdcd9d.service
-# Load the new systemd unit and start it
+### Load the new systemd unit and start it
$ mv ./container-bc219740a210455fa27deacc96d50a9e20516492f1417507c13ce1533dbdcd9d.service ~/.config/systemd/user
$ systemctl --user daemon-reload
-# If the previously created containers or pods are using shared resources, such as ports, make sure to remove them before starting the generated systemd units.
+### If the previously created containers or pods are using shared resources, such as ports, make sure to remove them before starting the generated systemd units.
$ podman stop bc219740a210455fa27deacc96d50a9e20516492f1417507c13ce1533dbdcd9d
$ podman rm bc219740a210455fa27deacc96d50a9e20516492f1417507c13ce1533dbdcd9d
$ systemctl --user start container-bc219740a210455fa27deacc96d50a9e20516492f1417507c13ce1533dbdcd9d.service
-# Auto-update the container
+### Auto-update the container
$ podman auto-update
container-bc219740a210455fa27deacc96d50a9e20516492f1417507c13ce1533dbdcd9d.service
```
@@ -79,37 +76,37 @@ container-bc219740a210455fa27deacc96d50a9e20516492f1417507c13ce1533dbdcd9d.servi
Autoupdate with local policy
```
-# Start a container
+### Start a container
$ podman run --label "io.containers.autoupdate=local" \
-d busybox:latest top
be0889fd06f252a2e5141b37072c6bada68563026cb2b2649f53394d87ccc338
-# Generate a systemd unit for this container
+### Generate a systemd unit for this container
$ podman generate systemd --new --files be0889fd06f252a2e5141b37072c6bada68563026cb2b2649f53394d87ccc338
/home/user/containers/libpod/container-be0889fd06f252a2e5141b37072c6bada68563026cb2b2649f53394d87ccc338.service
-# Load the new systemd unit and start it
+### Load the new systemd unit and start it
$ mv ./container-be0889fd06f252a2e5141b37072c6bada68563026cb2b2649f53394d87ccc338.service ~/.config/systemd/user
$ systemctl --user daemon-reload
-# If the previously created containers or pods are using shared resources, such as ports, make sure to remove them before starting the generated systemd units.
+### If the previously created containers or pods are using shared resources, such as ports, make sure to remove them before starting the generated systemd units.
$ podman stop be0889fd06f252a2e5141b37072c6bada68563026cb2b2649f53394d87ccc338
$ podman rm be0889fd06f252a2e5141b37072c6bada68563026cb2b2649f53394d87ccc338
$ systemctl --user start container-be0889fd06f252a2e5141b37072c6bada68563026cb2b2649f53394d87ccc338.service
-# Get the name of the container
+### Get the name of the container
$ podman ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
01f5c8113e84 docker.io/library/busybox:latest top 2 seconds ago Up 3 seconds ago inspiring_galileo
-# Modify the image
+### Modify the image
$ podman commit --change CMD=/bin/bash inspiring_galileo busybox:latest
-# Auto-update the container
+### Auto-update the container
$ podman auto-update
container-be0889fd06f252a2e5141b37072c6bada68563026cb2b2649f53394d87ccc338.service
```
## SEE ALSO
-podman(1), podman-generate-systemd(1), podman-run(1), systemd.unit(5)
+**[podman(1)](podman.1.md)**, **[podman-generate-systemd(1)](podman-generate-systemd.1.md)**, **[podman-run(1)](podman-run.1.md)**, systemd.unit(5)
diff --git a/docs/source/markdown/podman-build.1.md b/docs/source/markdown/podman-build.1.md
index f173b24b4..c65fbccb9 100644
--- a/docs/source/markdown/podman-build.1.md
+++ b/docs/source/markdown/podman-build.1.md
@@ -111,7 +111,7 @@ given.
#### **--cert-dir**=*path*
Use certificates at *path* (\*.crt, \*.cert, \*.key) to connect to the registry.
-Default certificates directory is _/etc/containers/certs.d_. (This option is not available with the remote Podman client)
+Please refer to containers-certs.d(5) for details. (This option is not available with the remote Podman client)
#### **--cgroup-parent**=*path*
@@ -972,7 +972,7 @@ If you are using `useradd` within your build script, you should pass the
useradd to stop creating the lastlog file.
## SEE ALSO
-podman(1), buildah(1), containers-registries.conf(5), crun(8), runc(8), useradd(8), podman-ps(1), podman-rm(1)
+podman(1), buildah(1), containers-certs.d(5), containers-registries.conf(5), crun(8), runc(8), useradd(8), podman-ps(1), podman-rm(1)
## HISTORY
Aug 2020, Additional options and .dockerignore added by Dan Walsh `<dwalsh@redhat.com>`
diff --git a/docs/source/markdown/podman-commit.1.md b/docs/source/markdown/podman-commit.1.md
index 7485e9bd9..92574cdc5 100644
--- a/docs/source/markdown/podman-commit.1.md
+++ b/docs/source/markdown/podman-commit.1.md
@@ -9,34 +9,27 @@ podman\-commit - Create new image based on the changed container
**podman container commit** [*options*] *container* [*image*]
## DESCRIPTION
-**podman commit** creates an image based on a changed container. The author of the
-image can be set using the `--author` flag. Various image instructions can be
-configured with the `--change` flag and a commit message can be set using the
-`--message` flag. The container and its processes are paused while the image is
-committed. This minimizes the likelihood of data corruption when creating the new
-image. If this is not desired, the `--pause` flag can be set to false. When the commit
-is complete, Podman will print out the ID of the new image.
+**podman commit** creates an image based on a changed *container*. The author of the image can be set using the **--author** OPTION. Various image instructions can be configured with the **--change** OPTION and a commit message can be set using the **--message** OPTION. The *container* and its processes are paused while the image is committed. This minimizes the likelihood of data corruption when creating the new image. If this is not desired, the **--pause** OPTION can be set to *false*. When the commit is complete, Podman will print out the ID of the new image.
-If *image* does not begin with a registry name component, `localhost` will be added to the name.
-If *image* is not provided, the values for the `REPOSITORY` and `TAG` values of the created image will each be set to `<none>`.
+If `image` does not begin with a registry name component, `localhost` will be added to the name.
+If `image` is not provided, the values for the `REPOSITORY` and `TAG` values of the created image will each be set to `<none>`.
## OPTIONS
#### **--author**, **-a**=*author*
-Set the author for the committed image
+Set the author for the committed image.
#### **--change**, **-c**=*instruction*
Apply the following possible instructions to the created image:
**CMD** | **ENTRYPOINT** | **ENV** | **EXPOSE** | **LABEL** | **ONBUILD** | **STOPSIGNAL** | **USER** | **VOLUME** | **WORKDIR**
-Can be set multiple times
+Can be set multiple times.
-#### **--format**, **-f**=*format*
+#### **--format**, **-f** =**oci** | *docker*
-Set the format of the image manifest and metadata. The currently supported formats are _oci_ and _docker_. If
-not specifically set, the default format used is _oci_.
+Set the format of the image manifest and metadata. The currently supported formats are **oci** and *docker*. The default is **oci**.
#### **--iidfile**=*ImageIDfile*
@@ -44,23 +37,24 @@ Write the image ID to the file.
#### **--include-volumes**
-Include in the committed image any volumes added to the container by the `--volume` or `--mount` options to the `podman create` and `podman run` commands.
+Include in the committed image any volumes added to the container by the **--volume** or **--mount** OPTIONS to the **[podman create](podman-create.1.md)** and **[podman run](podman-run.1.md)** commands. The default is **false**.
#### **--message**, **-m**=*message*
-Set commit message for committed image. The message field is not supported in _oci_ format.
+Set commit message for committed image.\
+*IMPORTANT: The message field is not supported in `oci` format.*
#### **--pause**, **-p**
-Pause the container when creating an image
+Pause the container when creating an image. The default is **false**.
#### **--quiet**, **-q**
-Suppress output
+Suppresses output. The default is **false**.
## EXAMPLES
-### Create image from container with entrypoint and label
+Create image from container with entrypoint and label
```
$ podman commit --change CMD=/bin/bash --change ENTRYPOINT=/bin/sh --change "LABEL blue=image" reverent_golick image-committed
Getting image source signatures
@@ -73,39 +67,39 @@ Storing signatures
e3ce4d93051ceea088d1c242624d659be32cf1667ef62f1d16d6b60193e2c7a8
```
-### Create image from container with commit message
+Create image from container with commit message
```
$ podman commit -q --message "committing container to image"
reverent_golick image-committed
-e3ce4d93051ceea088d1c242624d659be32cf1667ef62f1d16d6b60193e2c7a8 ```
+e3ce4d93051ceea088d1c242624d659be32cf1667ef62f1d16d6b60193e2c7a8
```
-### Create image from container with author
+Create image from container with author
```
$ podman commit -q --author "firstName lastName" reverent_golick image-committed
e3ce4d93051ceea088d1c242624d659be32cf1667ef62f1d16d6b60193e2c7a8
```
-### Pause a running container while creating the image
+Pause a running container while creating the image
```
$ podman commit -q --pause=true containerID image-committed
e3ce4d93051ceea088d1c242624d659be32cf1667ef62f1d16d6b60193e2c7a8
```
-### Create an image from a container with a default image tag
+Create an image from a container with a default image tag
```
$ podman commit containerID
e3ce4d93051ceea088d1c242624d659be32cf1667ef62f1d16d6b60193e2c7a8
```
-### Create an image from container with default required capabilities are SETUID and SETGID
+Create an image from container with default required capabilities are SETUID and SETGID
```
$ podman commit -q --change LABEL=io.containers.capabilities=setuid,setgid epic_nobel privimage
400d31a3f36dca751435e80a0e16da4859beb51ff84670ce6bdc5edb30b94066
```
## SEE ALSO
-podman(1), podman-run(1), podman-create(1)
+**[podman(1)](podman.1.md)**, **[podman-run(1)](podman-run.1.md)**, **[podman-create(1)](podman-create.1.md)**
## HISTORY
December 2017, Originally compiled by Urvashi Mohnani <umohnani@redhat.com>
diff --git a/docs/source/markdown/podman-container-checkpoint.1.md b/docs/source/markdown/podman-container-checkpoint.1.md
index 46b6cb646..271a44c9d 100644
--- a/docs/source/markdown/podman-container-checkpoint.1.md
+++ b/docs/source/markdown/podman-container-checkpoint.1.md
@@ -10,31 +10,25 @@ podman\-container\-checkpoint - Checkpoints one or more running containers
Checkpoints all the processes in one or more containers. You may use container IDs or names as input.
## OPTIONS
-#### **--keep**, **-k**
-
-Keep all temporary log and statistics files created by CRIU during checkpointing. These files
-are not deleted if checkpointing fails for further debugging. If checkpointing succeeds these
-files are theoretically not needed, but if these files are needed Podman can keep the files
-for further analysis.
-
#### **--all**, **-a**
Checkpoint all running containers.
-#### **--latest**, **-l**
-
-Instead of providing the container name or ID, checkpoint the last created container. (This option is not available with the remote Podman client)
-
-#### **--leave-running**, **-R**
+#### **--compress**, **-c**
-Leave the container running after checkpointing instead of stopping it.
+Specify the compression algorithm used for the checkpoint archive created
+with the **--export, -e** option. Possible algorithms are *gzip*, *none*
+and *zstd*. If no compression algorithm is specified Podman will use
+*zstd*.
-#### **--tcp-established**
+One possible reason to use *none* is to enable faster creation of checkpoint
+archives. Not compressing the checkpoint archive can result in faster checkpoint
+archive creation.
-Checkpoint a container with established TCP connections. If the checkpoint
-image contains established TCP connections, this options is required during
-restore. Defaults to not checkpointing containers with established TCP
-connections.
+```
+# podman container checkpoint -l --compress=none --export=dump.tar
+# podman container checkpoint -l --compress=gzip --export=dump.tar.gz
+```
#### **--export**, **-e**
@@ -56,11 +50,33 @@ This option must be used in combination with the **--export, -e** option.
When this option is specified, the content of volumes associated with
the container will not be included into the checkpoint tar.gz file.
+#### **--keep**, **-k**
+
+Keep all temporary log and statistics files created by CRIU during checkpointing. These files
+are not deleted if checkpointing fails for further debugging. If checkpointing succeeds these
+files are theoretically not needed, but if these files are needed Podman can keep the files
+for further analysis.
+
+#### **--latest**, **-l**
+
+Instead of providing the container name or ID, checkpoint the last created container. (This option is not available with the remote Podman client)
+
+#### **--leave-running**, **-R**
+
+Leave the container running after checkpointing instead of stopping it.
+
#### **--pre-checkpoint**, **-P**
Dump the container's memory information only, leaving the container running. Later
operations will supersede prior dumps. It only works on runc 1.0-rc3 or higher.
+#### **--tcp-established**
+
+Checkpoint a container with established TCP connections. If the checkpoint
+image contains established TCP connections, this options is required during
+restore. Defaults to not checkpointing containers with established TCP
+connections.
+
#### **--with-previous**
Check out the container with previous criu image files in pre-dump. It only works
diff --git a/docs/source/markdown/podman-container-restore.1.md b/docs/source/markdown/podman-container-restore.1.md
index ef8722279..82bf76d1e 100644
--- a/docs/source/markdown/podman-container-restore.1.md
+++ b/docs/source/markdown/podman-container-restore.1.md
@@ -95,6 +95,19 @@ This option must be used in combination with the **--import, -i** option.
When restoring containers from a checkpoint tar.gz file with this option,
the content of associated volumes will not be restored.
+#### **--publish**, **-p**
+
+Replaces the ports that the container publishes, as configured during the
+initial container start, with a new set of port forwarding rules.
+
+```
+# podman run --rm -p 2345:80 -d webserver
+# podman container checkpoint -l --export=dump.tar
+# podman container restore -p 5432:8080 --import=dump.tar
+```
+
+For more details please see **podman run --publish**.
+
## EXAMPLE
podman container restore mywebserver
@@ -104,7 +117,7 @@ podman container restore 860a4b23
podman container restore --import-previous pre-checkpoint.tar.gz --import checkpoint.tar.gz
## SEE ALSO
-podman(1), podman-container-checkpoint(1)
+podman(1), podman-container-checkpoint(1), podman-run(1)
## HISTORY
September 2018, Originally compiled by Adrian Reber <areber@redhat.com>
diff --git a/docs/source/markdown/podman-container-runlabel.1.md b/docs/source/markdown/podman-container-runlabel.1.md
index aee849cbd..e343a12fe 100644
--- a/docs/source/markdown/podman-container-runlabel.1.md
+++ b/docs/source/markdown/podman-container-runlabel.1.md
@@ -57,7 +57,7 @@ The runlabel command will not execute if --display is specified.
#### **--cert-dir**=*path*
Use certificates at *path* (\*.crt, \*.cert, \*.key) to connect to the registry.
-Default certificates directory is _/etc/containers/certs.d_. (This option is not available with the remote Podman client)
+Please refer to containers-certs.d(5) for details. (This option is not available with the remote Podman client)
#### **--creds**=*[username[:password]]*
@@ -105,7 +105,7 @@ $ sudo podman container runlabel --display run foobar
```
## SEE ALSO
-podman(1)
+podman(1), containers-certs.d(5)
## HISTORY
September 2018, Originally compiled by Brent Baude (bbaude at redhat dot com)
diff --git a/docs/source/markdown/podman-generate-systemd.1.md b/docs/source/markdown/podman-generate-systemd.1.md
index 3565a42bc..28c455b78 100644
--- a/docs/source/markdown/podman-generate-systemd.1.md
+++ b/docs/source/markdown/podman-generate-systemd.1.md
@@ -75,7 +75,7 @@ Description=Podman container-de1e3223b1b888bc02d0962dd6cb5855eb00734061013ffdd34
Documentation=man:podman-generate-systemd(1)
Wants=network.target
After=network-online.target
-RequiresMountsFor=/var/lib/containers/storage /var/run/container/storage
+RequiresMountsFor=/var/run/container/storage
[Service]
Restart=always
@@ -104,7 +104,7 @@ Description=Podman container-busy_moser.service
Documentation=man:podman-generate-systemd(1)
Wants=network.target
After=network-online.target
-RequiresMountsFor=/var/lib/containers/storage /var/run/container/storage
+RequiresMountsFor=/var/run/container/storage
[Service]
Environment=PODMAN_SYSTEMD_UNIT=%n
@@ -146,7 +146,7 @@ Requires=container-amazing_chandrasekhar.service container-jolly_shtern.service
Before=container-amazing_chandrasekhar.service container-jolly_shtern.service
Wants=network.target
After=network-online.target
-RequiresMountsFor=/var/lib/containers/storage /var/run/container/storage
+RequiresMountsFor=/var/run/container/storage
[Service]
Restart=on-failure
diff --git a/docs/source/markdown/podman-image-sign.1.md b/docs/source/markdown/podman-image-sign.1.md
index b9addc062..616aab8ff 100644
--- a/docs/source/markdown/podman-image-sign.1.md
+++ b/docs/source/markdown/podman-image-sign.1.md
@@ -26,7 +26,7 @@ Sign all the manifests of the multi-architecture image (default false).
#### **--cert-dir**=*path*
Use certificates at *path* (\*.crt, \*.cert, \*.key) to connect to the registry.
-Default certificates directory is _/etc/containers/certs.d_. (This option is not available with the remote Podman client)
+Please refer to containers-certs.d(5) for details. (This option is not available with the remote Podman client)
#### **--directory**, **-d**=*dir*
@@ -61,7 +61,7 @@ the signature will be written into sub-directories of
the signature will be 'read' from that same location on a pull-related function.
## SEE ALSO
-containers-registries.d(5)
+containers-certs.d(5), containers-registries.d(5)
## HISTORY
November 2018, Originally compiled by Qi Wang (qiwan at redhat dot com)
diff --git a/docs/source/markdown/podman-login.1.md b/docs/source/markdown/podman-login.1.md
index 274869042..88d025c70 100644
--- a/docs/source/markdown/podman-login.1.md
+++ b/docs/source/markdown/podman-login.1.md
@@ -54,7 +54,7 @@ Return the logged-in user for the registry. Return error if no login is found.
#### **--cert-dir**=*path*
Use certificates at *path* (\*.crt, \*.cert, \*.key) to connect to the registry.
-Default certificates directory is _/etc/containers/certs.d_. (This option is not available with the remote Podman client)
+Please refer to containers-certs.d(5) for details. (This option is not available with the remote Podman client)
#### **--tls-verify**=*true|false*
@@ -108,7 +108,7 @@ Login Succeeded!
```
## SEE ALSO
-podman(1), podman-logout(1), containers-auth.json(5), containers-registries.conf(5)
+podman(1), podman-logout(1), containers-auth.json(5), containers-certs.d(5), containers-registries.conf(5)
## HISTORY
August 2017, Originally compiled by Urvashi Mohnani <umohnani@redhat.com>
diff --git a/docs/source/markdown/podman-manifest-add.1.md b/docs/source/markdown/podman-manifest-add.1.md
index 59d5e5d2c..376301589 100644
--- a/docs/source/markdown/podman-manifest-add.1.md
+++ b/docs/source/markdown/podman-manifest-add.1.md
@@ -44,7 +44,7 @@ environment variable. `export REGISTRY_AUTH_FILE=path`
#### **--cert-dir**=*path*
Use certificates at *path* (\*.crt, \*.cert, \*.key) to connect to the registry.
-Default certificates directory is _/etc/containers/certs.d_. (This option is not available with the remote Podman client)
+Please refer to containers-certs.d(5) for details. (This option is not available with the remote Podman client)
#### **--creds**=*creds*
@@ -93,6 +93,28 @@ architecture value, but which expect different versions of its instruction set.
$ podman manifest add mylist:v1.11 containers-storage:quay.io/username/myimage
+ **dir:**_path_
+ An existing local directory _path_ storing the manifest, layer tarballs, and signatures as individual files. This
+ is a non-standardized format, primarily useful for debugging or noninvasive container inspection.
+
+ $ podman manifest add dir:/tmp/myimage
+
+ **docker-archive:**_path_[**:**_docker-reference_]
+ An image is stored in the `docker save` formatted file. _docker-reference_ is only used when creating such a
+ file, and it must not contain a digest.
+
+ $ podman manifest add docker-archive:/tmp/myimage
+
+ **docker-daemon:**_docker-reference_
+ An image in _docker-reference_ format stored in the docker daemon internal storage. The _docker-reference_ can also be an image ID (docker-daemon:algo:digest).
+
+ $ sudo podman manifest add docker-daemon:docker.io/library/myimage:33
+
+ **oci-archive:**_path_**:**_tag_
+ An image _tag_ in a directory compliant with "Open Container Image Layout Specification" at _path_.
+
+ $ podman manifest add oci-archive:/tmp/myimage
+
## EXAMPLE
```
@@ -110,4 +132,4 @@ podman manifest add --arch arm64 --variant v8 mylist:v1.11 docker://71c201d10fff
```
## SEE ALSO
-podman(1), podman-manifest(1), podman-manifest-create(1), podman-manifest-inspect(1), podman-manifest-push(1), podman-manifest-remove(1), podman-rmi(1)
+podman(1), podman-manifest(1), podman-manifest-create(1), podman-manifest-inspect(1), podman-manifest-push(1), podman-manifest-remove(1), podman-rmi(1), containers-certs.d(5)
diff --git a/docs/source/markdown/podman-manifest-push.1.md b/docs/source/markdown/podman-manifest-push.1.md
index 5097ada64..2cf1cc375 100644
--- a/docs/source/markdown/podman-manifest-push.1.md
+++ b/docs/source/markdown/podman-manifest-push.1.md
@@ -30,7 +30,7 @@ environment variable. `export REGISTRY_AUTH_FILE=path`
#### **--cert-dir**=*path*
Use certificates at *path* (\*.crt, \*.cert, \*.key) to connect to the registry.
-Default certificates directory is _/etc/containers/certs.d_. (This option is not available with the remote Podman client)
+Please refer to containers-certs.d(5) for details. (This option is not available with the remote Podman client)
#### **--creds**=*creds*
@@ -107,4 +107,4 @@ podman manifest push mylist:v1.11 docker://registry.example.org/mylist:v1.11
```
## SEE ALSO
-podman(1), podman-manifest(1), podman-manifest-add(1), podman-manifest-create(1), podman-manifest-inspect(1), podman-manifest-remove(1), podman-rmi(1)
+podman(1), podman-manifest(1), podman-manifest-add(1), podman-manifest-create(1), podman-manifest-inspect(1), podman-manifest-remove(1), podman-rmi(1), containers-certs.d(5)
diff --git a/docs/source/markdown/podman-play-kube.1.md b/docs/source/markdown/podman-play-kube.1.md
index 98b303ae7..ad5ae7e4c 100644
--- a/docs/source/markdown/podman-play-kube.1.md
+++ b/docs/source/markdown/podman-play-kube.1.md
@@ -48,7 +48,7 @@ environment variable. `export REGISTRY_AUTH_FILE=path`
#### **--cert-dir**=*path*
Use certificates at *path* (\*.crt, \*.cert, \*.key) to connect to the registry.
-Default certificates directory is _/etc/containers/certs.d_. (This option is not available with the remote Podman client)
+Please refer to containers-certs.d(5) for details. (This option is not available with the remote Podman client)
#### **--configmap**=*path*
@@ -132,7 +132,7 @@ $ podman play kube demo.yml --network cni1,cni2
Please take into account that CNI networks must be created first using podman-network-create(1).
## SEE ALSO
-podman(1), podman-container(1), podman-pod(1), podman-generate-kube(1), podman-play(1), podman-network-create(1)
+podman(1), podman-container(1), podman-pod(1), podman-generate-kube(1), podman-play(1), podman-network-create(1), containers-certs.d(5)
## HISTORY
December 2018, Originally compiled by Brent Baude (bbaude at redhat dot com)
diff --git a/docs/source/markdown/podman-pull.1.md b/docs/source/markdown/podman-pull.1.md
index 9b42d07ac..54f990601 100644
--- a/docs/source/markdown/podman-pull.1.md
+++ b/docs/source/markdown/podman-pull.1.md
@@ -80,7 +80,7 @@ environment variable. `export REGISTRY_AUTH_FILE=path`
#### **--cert-dir**=*path*
Use certificates at *path* (\*.crt, \*.cert, \*.key) to connect to the registry.
-Default certificates directory is _/etc/containers/certs.d_. (This option is not available with the remote Podman client)
+Please refer to containers-certs.d(5) for details. (This option is not available with the remote Podman client)
#### **--creds**=*[username[:password]]*
@@ -217,7 +217,7 @@ registries.conf is the configuration file which specifies which container regist
NOTE: Use the environment variable `TMPDIR` to change the temporary storage location of downloaded container images. Podman defaults to use `/var/tmp`.
## SEE ALSO
-podman(1), podman-push(1), podman-login(1), containers-registries.conf(5)
+podman(1), podman-push(1), podman-login(1), containers-certs.d(5), containers-registries.conf(5)
## HISTORY
July 2017, Originally compiled by Urvashi Mohnani <umohnani@redhat.com>
diff --git a/docs/source/markdown/podman-push.1.md b/docs/source/markdown/podman-push.1.md
index f42ee1020..e26d73891 100644
--- a/docs/source/markdown/podman-push.1.md
+++ b/docs/source/markdown/podman-push.1.md
@@ -71,7 +71,7 @@ value can be entered. The password is entered without echo.
#### **--cert-dir**=*path*
Use certificates at *path* (\*.crt, \*.cert, \*.key) to connect to the registry.
-Default certificates directory is _/etc/containers/certs.d_. (This option is not available with the remote Podman client)
+Please refer to containers-certs.d(5) for details. (This option is not available with the remote Podman client)
#### **--compress**
@@ -161,4 +161,4 @@ Storing signatures
```
## SEE ALSO
-podman(1), podman-pull(1), podman-login(1)
+podman(1), podman-pull(1), podman-login(1), containers-certs.d(5)
diff --git a/docs/source/markdown/podman-system-service.1.md b/docs/source/markdown/podman-system-service.1.md
index 2c8be73c2..dfb026de1 100644
--- a/docs/source/markdown/podman-system-service.1.md
+++ b/docs/source/markdown/podman-system-service.1.md
@@ -30,6 +30,10 @@ Note: The default systemd unit files (system and user) change the log-level opti
The time until the session expires in _seconds_. The default is 5
seconds. A value of `0` means no timeout, therefore the session will not expire.
+#### **--cors**
+
+CORS headers to inject to the HTTP response. The default value is empty string which disables CORS headers.
+
#### **--help**, **-h**
Print usage statement.
diff --git a/docs/tutorials/mac_experimental.md b/docs/tutorials/mac_experimental.md
new file mode 100644
index 000000000..8df64dc99
--- /dev/null
+++ b/docs/tutorials/mac_experimental.md
@@ -0,0 +1,99 @@
+# Using podman-machine on MacOS (x86_64 and Apple silicon)
+
+## Setup
+
+You must obtain a compressed tarball that contains the following:
+* a qcow image
+* a podman binary
+* a gvproxy binary
+
+You must also have installed brew prior to following this process. See https://brew.sh/ for
+installation instructions.
+
+Note: If your user has admin rights, you can ignore the use of `sudo` in these instructions.
+
+
+1. Install qemu from brew to obtain the required runtime dependencies.
+
+ ```
+ brew install qemu
+ ```
+
+2. If you are running MacOS on the Intel architecture, you can skip to step 8.
+3. Uninstall the brew package
+
+ ```
+ brew uninstall qemu
+ ```
+
+4. Get upstream qemu source code.
+
+ ```
+ git clone https://github.com/qemu/qemu
+ ```
+
+5. Apply patches that have not been merged into upstream qemu.
+
+ ```
+ cd qemu
+ git config user.name "YOUR_NAME"
+ git config user.email johndoe@example.com
+ git checkout v5.2.0
+ curl https://patchwork.kernel.org/series/418581/mbox/ | git am --exclude=MAINTAINERS
+ curl -L https://gist.github.com/citruz/9896cd6fb63288ac95f81716756cb9aa/raw/2d613e9a003b28dfe688f33055706d3873025a40/xcode-12-4.patch | git apply -
+ ```
+
+6. Install qemu build dependencies
+
+ ```
+ brew install libffi gettext pkg-config autoconf automake pixman ninja make
+ ```
+
+7. Configure, compile, and install qemu
+ ```
+ mkdir build
+ cd build
+ ../configure --target-list=aarch64-softmmu --disable-gnutls
+ gmake -j8
+ sudo gmake install
+ ```
+
+
+8. Uncompress and place provided binaries into filesystem
+
+ **Note**: In the following instructions, you need to know the name of the compressed file
+that you were given. It will be used in two of the steps below.
+
+ ```
+ cd ~
+ tar xvf `compressed_file_ending_in_xz`
+ sudo cp -v `unpacked_directory`/{gvproxy,podman} /usr/local/bin
+ ```
+
+9. Sign all binaries
+
+ If you have a Mac with Apple Silicon, issue the following command:
+ ```
+ sudo codesign --entitlements ~/qemu/accel/hvf/entitlements.plist --force -s - /usr/local/bin/qemu-* /usr/local/bin/gvproxy /usr/local/bin/podman
+ ```
+
+ If you have a Mac with an Intel processor, issue the following command:
+
+ ```
+ echo '<?xml version="1.0" encoding="utf-8"?>
+ <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+ <plist version="1.0"> <dict> <key>com.apple.security.hypervisor</key> <true/> </dict> </plist>
+ ' > ~/entitlements.plist
+ sudo codesign --entitlements ~/entitlements.plist --force -s - /usr/local/bin/qemu-* /usr/local/bin/gvproxy /usr/local/bin/podman
+ ```
+
+
+## Test podman
+
+1. podman machine init --image-path /path/to/image
+2. podman machine start
+3. podman images
+4. git clone http://github.com/baude/alpine_nginx && cd alpine_nginx
+5. podman build -t alpine_nginx .
+4. podman run -dt -p 9999:80 alpine_nginx
+5. curl http://localhost:9999
diff --git a/docs/tutorials/podman-go-bindings.md b/docs/tutorials/podman-go-bindings.md
index a952f2dc0..2bbf4e5de 100644
--- a/docs/tutorials/podman-go-bindings.md
+++ b/docs/tutorials/podman-go-bindings.md
@@ -174,7 +174,7 @@ This binding takes three arguments:
```Go
// Pull Busybox image (Sample 1)
fmt.Println("Pulling Busybox image...")
- _, err = images.Pull(connText, "docker.io/busybox", entities.ImagePullOptions{})
+ _, err = images.Pull(connText, "docker.io/busybox", &images.PullOptions{})
if err != nil {
fmt.Println(err)
os.Exit(1)
@@ -183,7 +183,7 @@ This binding takes three arguments:
// Pull Fedora image (Sample 2)
rawImage := "registry.fedoraproject.org/fedora:latest"
fmt.Println("Pulling Fedora image...")
- _, err = images.Pull(connText, rawImage, entities.ImagePullOptions{})
+ _, err = images.Pull(connText, rawImage, &images.PullOptions{})
if err != nil {
fmt.Println(err)
os.Exit(1)
@@ -229,7 +229,7 @@ This binding takes three arguments:
```Go
// List images
- imageSummary, err := images.List(connText, nil, nil)
+ imageSummary, err := images.List(connText, &images.ListOptions{})
if err != nil {
fmt.Println(err)
os.Exit(1)
@@ -287,7 +287,7 @@ containers.Wait() takes three arguments:
// Container create
s := specgen.NewSpecGenerator(rawImage, false)
s.Terminal = true
- r, err := containers.CreateWithSpec(connText, s)
+ r, err := containers.CreateWithSpec(connText, s, nil)
if err != nil {
fmt.Println(err)
os.Exit(1)
@@ -302,7 +302,7 @@ containers.Wait() takes three arguments:
}
running := define.ContainerStateRunning
- _, err = containers.Wait(connText, r.ID, &running)
+ _, err = containers.Wait(connText, r.ID, &containers.WaitOptions{Condition: []define.ContainerStatus{running}})
if err != nil {
fmt.Println(err)
os.Exit(1)
@@ -346,7 +346,7 @@ containers.List() takes seven arguments:
```Go
// Container list
var latestContainers = 1
- containerLatestList, err := containers.List(connText, nil, nil, &latestContainers, nil, nil, nil)
+ containerLatestList, err := containers.List(connText, &containers.ListOptions{Last: &latestContainers})
if err != nil {
fmt.Println(err)
os.Exit(1)
diff --git a/docs/tutorials/remote_client.md b/docs/tutorials/remote_client.md
index e39d804a6..889947397 100644
--- a/docs/tutorials/remote_client.md
+++ b/docs/tutorials/remote_client.md
@@ -108,5 +108,9 @@ podman-remote system connection --help
You can use the Podman remote clients to manage your containers running on a Linux server. The communication between client and server relies heavily on SSH connections and the use of SSH keys are encouraged. Once you have Podman installed on your remote client, you should set up a connection using `podman-remote system connection add` which will then be used by subsequent Podman commands.
+# Troubleshooting
+
+See the [Troubleshooting](../../troubleshooting.md) document if you run into issues.
+
## History
Adapted from the [Mac and Windows tutorial](https://github.com/containers/podman/blob/master/docs/tutorials/mac_win_client.md)
diff --git a/go.mod b/go.mod
index 4c320f7c5..66a27f0ef 100644
--- a/go.mod
+++ b/go.mod
@@ -12,12 +12,12 @@ require (
github.com/containernetworking/cni v0.8.1
github.com/containernetworking/plugins v0.9.1
github.com/containers/buildah v1.21.0
- github.com/containers/common v0.38.4
+ github.com/containers/common v0.39.1-0.20210527140106-e5800a20386a
github.com/containers/conmon v2.0.20+incompatible
github.com/containers/image/v5 v5.12.0
github.com/containers/ocicrypt v1.1.1
github.com/containers/psgo v1.5.2
- github.com/containers/storage v1.31.2
+ github.com/containers/storage v1.32.1
github.com/coreos/go-systemd/v22 v22.3.2
github.com/coreos/stream-metadata-go v0.0.0-20210225230131-70edb9eb47b3
github.com/cri-o/ocicni v0.2.1-0.20210301205850-541cf7c703cf
@@ -25,7 +25,7 @@ require (
github.com/davecgh/go-spew v1.1.1
github.com/digitalocean/go-qemu v0.0.0-20210209191958-152a1535e49f
github.com/docker/distribution v2.7.1+incompatible
- github.com/docker/docker v20.10.6+incompatible
+ github.com/docker/docker v20.10.7+incompatible
github.com/docker/go-connections v0.4.0
github.com/docker/go-plugins-helpers v0.0.0-20200102110956-c9a8a2d92ccc
github.com/docker/go-units v0.4.0
@@ -42,14 +42,14 @@ require (
github.com/mattn/go-colorable v0.1.8 // indirect
github.com/moby/term v0.0.0-20201216013528-df9cb8a40635
github.com/mrunalp/fileutils v0.5.0
- github.com/onsi/ginkgo v1.16.2
- github.com/onsi/gomega v1.12.0
+ github.com/onsi/ginkgo v1.16.4
+ github.com/onsi/gomega v1.13.0
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6
github.com/opencontainers/runc v1.0.0-rc95
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417
github.com/opencontainers/runtime-tools v0.9.0
- github.com/opencontainers/selinux v1.8.1
+ github.com/opencontainers/selinux v1.8.2
github.com/pkg/errors v0.9.1
github.com/pmezard/go-difflib v1.0.0
github.com/rootless-containers/rootlesskit v0.14.2
@@ -58,10 +58,10 @@ require (
github.com/spf13/pflag v1.0.5
github.com/stretchr/testify v1.7.0
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
- github.com/uber/jaeger-client-go v2.28.0+incompatible
+ github.com/uber/jaeger-client-go v2.29.1+incompatible
github.com/vbauerster/mpb/v6 v6.0.4
github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852
- go.etcd.io/bbolt v1.3.5
+ go.etcd.io/bbolt v1.3.6
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015
diff --git a/go.sum b/go.sum
index 39827f61a..af13ed423 100644
--- a/go.sum
+++ b/go.sum
@@ -90,6 +90,8 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
+github.com/bits-and-blooms/bitset v1.2.0 h1:Kn4yilvwNtMACtf1eYDlG8H77R07mZSPbMjLyS07ChA=
+github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
@@ -218,8 +220,9 @@ github.com/containernetworking/plugins v0.9.1 h1:FD1tADPls2EEi3flPc2OegIY1M9pUa9
github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8=
github.com/containers/buildah v1.21.0 h1:LuwuqRPjan3X3AIdGwfkEkqMgmrDMNpQznFqNdHgCz8=
github.com/containers/buildah v1.21.0/go.mod h1:yPdlpVd93T+i91yGxrJbW1YOWrqN64j5ZhHOZmHUejs=
-github.com/containers/common v0.38.4 h1:WYv4R6Sw1qiOPZtBNbKglrmisXdPcq3fZ3bGy4prrjo=
github.com/containers/common v0.38.4/go.mod h1:egfpX/Y3+19Dz4Wa1eRZDdgzoEOeneieF9CQppKzLBg=
+github.com/containers/common v0.39.1-0.20210527140106-e5800a20386a h1:XzYOUf7qjgVJ59YGqAzehlbT63EgjUJhMnfhsPSSJV0=
+github.com/containers/common v0.39.1-0.20210527140106-e5800a20386a/go.mod h1:CxHAf4iQOZZ8nASIjMdYHHRyA8dMR4tINSS7WQWlv90=
github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg=
github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
github.com/containers/image/v5 v5.12.0 h1:1hNS2QkzFQ4lH3GYQLyAXB0acRMhS1Ubm6oV++8vw4w=
@@ -235,8 +238,9 @@ github.com/containers/psgo v1.5.2/go.mod h1:2ubh0SsreMZjSXW1Hif58JrEcFudQyIy9EzP
github.com/containers/storage v1.23.5/go.mod h1:ha26Q6ngehFNhf3AWoXldvAvwI4jFe3ETQAf/CeZPyM=
github.com/containers/storage v1.30.1/go.mod h1:NDJkiwxnSHD1Is+4DGcyR3SIEYSDOa0xnAW+uGQFx9E=
github.com/containers/storage v1.31.1/go.mod h1:IFEf+yRTS0pvCGQt2tBv1Kzz2XUSPvED6uFBmWG7V/E=
-github.com/containers/storage v1.31.2 h1:wWi7OsNtHUydGdK0EpQiK94MfQNj5qK2GtxNLoj4tU4=
-github.com/containers/storage v1.31.2/go.mod h1:J3q772EVbN9vgqoN/dkvInKnp4xK9ZXm7wHNfuiIDgE=
+github.com/containers/storage v1.32.0/go.mod h1:J3q772EVbN9vgqoN/dkvInKnp4xK9ZXm7wHNfuiIDgE=
+github.com/containers/storage v1.32.1 h1:JgvHY5dokiff+Ee4TdvPYO++Oq2BAave5DmyPetH2iU=
+github.com/containers/storage v1.32.1/go.mod h1:do6oIF71kfkVS3CPUZr+6He94fIaj6pzF8ywevPuuOw=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
@@ -293,8 +297,9 @@ github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BU
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v1.4.2-0.20191219165747-a9416c67da9f/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v20.10.3-0.20210216175712-646072ed6524+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
-github.com/docker/docker v20.10.6+incompatible h1:oXI3Vas8TI8Eu/EjH4srKHJBVqraSzJybhxY7Om9faQ=
github.com/docker/docker v20.10.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v20.10.7+incompatible h1:Z6O9Nhsjv+ayUEeI1IojKbYcsGdgYSNqxe1s2MYzUhQ=
+github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.6.3 h1:zI2p9+1NQYdnG6sMU26EX4aVGlqbInSQxQXLvzJ4RPQ=
github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
@@ -530,8 +535,9 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o
github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/compress v1.12.2 h1:2KCfW3I9M7nSc5wOqXAlW2v2U6v+w6cbjvbfp+OykW8=
github.com/klauspost/compress v1.12.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
+github.com/klauspost/compress v1.12.3 h1:G5AfA94pHPysR56qqrkO2pxEexdDzrpFJ6yt/VqWxVU=
+github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -645,8 +651,9 @@ github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISqnKUg=
-github.com/onsi/ginkgo v1.16.2 h1:HFB2fbVIlhIfCfOW81bZFbiC/RvnpXSdhbF2/DJr134=
github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E=
+github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
+github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
@@ -655,8 +662,9 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48=
-github.com/onsi/gomega v1.12.0 h1:p4oGGk2M2UJc0wWN4lHFvIB71lxsh0T/UiKCCgFADY8=
github.com/onsi/gomega v1.12.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY=
+github.com/onsi/gomega v1.13.0 h1:7lLHu94wT9Ij0o6EWWclhu0aOh32VxhkwEJvzuWPeak=
+github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY=
github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
@@ -691,8 +699,9 @@ github.com/opencontainers/runtime-tools v0.9.0/go.mod h1:r3f7wjNzSs2extwzU3Y+6pK
github.com/opencontainers/selinux v1.5.1/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g=
github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
-github.com/opencontainers/selinux v1.8.1 h1:yvEZh7CsfnJNwKzG9ZeXwbvR05RAZsu5RS/3vA6qFTA=
github.com/opencontainers/selinux v1.8.1/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
+github.com/opencontainers/selinux v1.8.2 h1:c4ca10UMgRcvZ6h0K4HtS15UaVSBEaE+iln2LVpAuGc=
+github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8=
github.com/openshift/imagebuilder v1.2.2-0.20210415181909-87f3e48c2656 h1:WaxyNFpmIDu4i6so9r6LVFIbSaXqsj8oitMitt86ae4=
github.com/openshift/imagebuilder v1.2.2-0.20210415181909-87f3e48c2656/go.mod h1:9aJRczxCH0mvT6XQ+5STAQaPWz7OsWcU5/mRkt8IWeo=
github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913 h1:TnbXhKzrTOyuvWrjI8W6pcoI9XPbLHFXCdN2dtUw7Rw=
@@ -823,8 +832,8 @@ github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/u-root/u-root v7.0.0+incompatible/go.mod h1:RYkpo8pTHrNjW08opNd/U6p/RJE7K0D8fXO0d47+3YY=
-github.com/uber/jaeger-client-go v2.28.0+incompatible h1:G4QSBfvPKvg5ZM2j9MrJFdfI5iSljY/WnJqOGFao6HI=
-github.com/uber/jaeger-client-go v2.28.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
+github.com/uber/jaeger-client-go v2.29.1+incompatible h1:R9ec3zO3sGpzs0abd43Y+fBZRJ9uiH6lXyR/+u6brW4=
+github.com/uber/jaeger-client-go v2.29.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8=
github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
@@ -847,7 +856,6 @@ github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae h1:4hwBBUfQCFe3Cym0ZtKyq7L16eZUtYKs+BaHDN6mAns=
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
-github.com/willf/bitset v1.1.11 h1:N7Z7E9UvjW+sGsEl7k/SJrvY2reP1A07MrGuCjIOjRE=
github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonpointer v0.0.0-20190809123943-df4f5c81cb3b h1:6cLsL+2FW6dRAdl5iMtHgRogVCff0QpRi9653YmdcJA=
@@ -867,8 +875,9 @@ github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPS
github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0=
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
+go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU=
+go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 h1:A/5uWzF44DlIgdm/PQFwfMkW0JX+cIcQi/SwLAmZP5M=
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
@@ -1049,6 +1058,7 @@ golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201009025420-dfb3f7c4e634/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201101102859-da207088b7d1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
diff --git a/libpod/container.go b/libpod/container.go
index 591cf9bc5..c6f0cd618 100644
--- a/libpod/container.go
+++ b/libpod/container.go
@@ -14,7 +14,6 @@ import (
"github.com/containers/image/v5/manifest"
"github.com/containers/podman/v3/libpod/define"
"github.com/containers/podman/v3/libpod/lock"
- "github.com/containers/podman/v3/pkg/rootless"
"github.com/containers/storage"
"github.com/cri-o/ocicni/pkg/ocicni"
spec "github.com/opencontainers/runtime-spec/specs-go"
@@ -1168,7 +1167,7 @@ func (c *Container) Networks() ([]string, bool, error) {
func (c *Container) networks() ([]string, bool, error) {
networks, err := c.runtime.state.GetNetworks(c)
if err != nil && errors.Cause(err) == define.ErrNoSuchNetwork {
- if len(c.config.Networks) == 0 && !rootless.IsRootless() {
+ if len(c.config.Networks) == 0 && c.config.NetMode.IsBridge() {
return []string{c.runtime.netPlugin.GetDefaultNetworkName()}, true, nil
}
return c.config.Networks, false, nil
diff --git a/libpod/container_api.go b/libpod/container_api.go
index 4ccb240e7..b75d0b41d 100644
--- a/libpod/container_api.go
+++ b/libpod/container_api.go
@@ -12,6 +12,7 @@ import (
"github.com/containers/podman/v3/libpod/define"
"github.com/containers/podman/v3/libpod/events"
"github.com/containers/podman/v3/pkg/signal"
+ "github.com/containers/storage/pkg/archive"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -776,6 +777,9 @@ type ContainerCheckpointOptions struct {
// ImportPrevious tells the API to restore container with two
// images. One is TargetFile, the other is ImportPrevious.
ImportPrevious string
+ // Compression tells the API which compression to use for
+ // the exported checkpoint archive.
+ Compression archive.Compression
}
// Checkpoint checkpoints a container
diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go
index 1b2f5a496..a3acc3198 100644
--- a/libpod/container_internal_linux.go
+++ b/libpod/container_internal_linux.go
@@ -985,7 +985,7 @@ func (c *Container) exportCheckpoint(options ContainerCheckpointOptions) error {
}
input, err := archive.TarWithOptions(c.bundlePath(), &archive.TarOptions{
- Compression: archive.Gzip,
+ Compression: options.Compression,
IncludeSourceDir: true,
IncludeFiles: includeFiles,
})
@@ -1668,17 +1668,16 @@ func (c *Container) generateResolvConf() (string, error) {
return "", err
}
- // Ensure that the container's /etc/resolv.conf is compatible with its
- // network configuration.
- // TODO: set ipv6 enable bool more sanely
- resolv, err := resolvconf.FilterResolvDNS(contents, true, c.config.CreateNetNS)
- if err != nil {
- return "", errors.Wrapf(err, "error parsing host resolv.conf")
- }
-
+ ipv6 := false
// Check if CNI gave back and DNS servers for us to add in
cniResponse := c.state.NetworkStatus
for _, i := range cniResponse {
+ for _, ip := range i.IPs {
+ // Note: only using To16() does not work since it also returns a vaild ip for ipv4
+ if ip.Address.IP.To4() == nil && ip.Address.IP.To16() != nil {
+ ipv6 = true
+ }
+ }
if i.DNS.Nameservers != nil {
cniNameServers = append(cniNameServers, i.DNS.Nameservers...)
logrus.Debugf("adding nameserver(s) from cni response of '%q'", i.DNS.Nameservers)
@@ -1689,6 +1688,25 @@ func (c *Container) generateResolvConf() (string, error) {
}
}
+ if c.config.NetMode.IsSlirp4netns() {
+ ctrNetworkSlipOpts := []string{}
+ if c.config.NetworkOptions != nil {
+ ctrNetworkSlipOpts = append(ctrNetworkSlipOpts, c.config.NetworkOptions["slirp4netns"]...)
+ }
+ slirpOpts, err := parseSlirp4netnsNetworkOptions(c.runtime, ctrNetworkSlipOpts)
+ if err != nil {
+ return "", err
+ }
+ ipv6 = slirpOpts.enableIPv6
+ }
+
+ // Ensure that the container's /etc/resolv.conf is compatible with its
+ // network configuration.
+ resolv, err := resolvconf.FilterResolvDNS(contents, ipv6, c.config.CreateNetNS)
+ if err != nil {
+ return "", errors.Wrapf(err, "error parsing host resolv.conf")
+ }
+
dns := make([]net.IP, 0, len(c.runtime.config.Containers.DNSServers))
for _, i := range c.runtime.config.Containers.DNSServers {
result := net.ParseIP(i)
diff --git a/libpod/container_log_linux.go b/libpod/container_log_linux.go
index ec4fa9724..892ee34e3 100644
--- a/libpod/container_log_linux.go
+++ b/libpod/container_log_linux.go
@@ -6,14 +6,12 @@ package libpod
import (
"context"
"fmt"
- "io"
- "math"
"strings"
"time"
- "github.com/containers/podman/v3/libpod/define"
+ "github.com/containers/podman/v3/libpod/events"
"github.com/containers/podman/v3/libpod/logs"
- journal "github.com/coreos/go-systemd/v22/sdjournal"
+ "github.com/coreos/go-systemd/v22/sdjournal"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -24,122 +22,187 @@ const (
// journaldLogErr is the journald priority signifying stderr
journaldLogErr = "3"
-
- // bufLen is the length of the buffer to read from a k8s-file
- // formatted log line
- // let's set it as 2k just to be safe if k8s-file format ever changes
- bufLen = 16384
)
func (c *Container) readFromJournal(ctx context.Context, options *logs.LogOptions, logChannel chan *logs.LogLine) error {
- var config journal.JournalReaderConfig
- if options.Tail < 0 {
- config.NumFromTail = 0
- } else if options.Tail == 0 {
- config.NumFromTail = math.MaxUint64
- } else {
- config.NumFromTail = uint64(options.Tail)
+ journal, err := sdjournal.NewJournal()
+ if err != nil {
+ return err
}
- if options.Multi {
- config.Formatter = journalFormatterWithID
- } else {
- config.Formatter = journalFormatter
- }
- defaultTime := time.Time{}
- if options.Since != defaultTime {
- // coreos/go-systemd/sdjournal doesn't correctly handle requests for data in the future
- // return nothing instead of falsely printing
- if time.Now().Before(options.Since) {
- return nil
- }
- // coreos/go-systemd/sdjournal expects a negative time.Duration for times in the past
- config.Since = -time.Since(options.Since)
+ // While logs are written to the `logChannel`, we inspect each event
+ // and stop once the container has died. Having logs and events in one
+ // stream prevents a race condition that we faced in #10323.
+
+ // Add the filters for events.
+ match := sdjournal.Match{Field: "SYSLOG_IDENTIFIER", Value: "podman"}
+ if err := journal.AddMatch(match.String()); err != nil {
+ return errors.Wrapf(err, "adding filter to journald logger: %v", match)
+ }
+ match = sdjournal.Match{Field: "PODMAN_ID", Value: c.ID()}
+ if err := journal.AddMatch(match.String()); err != nil {
+ return errors.Wrapf(err, "adding filter to journald logger: %v", match)
}
- config.Matches = append(config.Matches, journal.Match{
- Field: "CONTAINER_ID_FULL",
- Value: c.ID(),
- })
- options.WaitGroup.Add(1)
- r, err := journal.NewJournalReader(config)
- if err != nil {
+ // Add the filter for logs. Note the disjunction so that we match
+ // either the events or the logs.
+ if err := journal.AddDisjunction(); err != nil {
+ return errors.Wrap(err, "adding filter disjunction to journald logger")
+ }
+ match = sdjournal.Match{Field: "CONTAINER_ID_FULL", Value: c.ID()}
+ if err := journal.AddMatch(match.String()); err != nil {
+ return errors.Wrapf(err, "adding filter to journald logger: %v", match)
+ }
+
+ if err := journal.SeekHead(); err != nil {
return err
}
- if r == nil {
- return errors.Errorf("journal reader creation failed")
+ // API requires Next() immediately after SeekHead().
+ if _, err := journal.Next(); err != nil {
+ return errors.Wrap(err, "initial journal cursor")
}
- if options.Tail == math.MaxInt64 {
- r.Rewind()
+
+ // API requires a next|prev before getting a cursor.
+ if _, err := journal.Previous(); err != nil {
+ return errors.Wrap(err, "initial journal cursor")
}
- state, err := c.State()
- if err != nil {
- return err
+
+ // Note that the initial cursor may not yet be ready, so we'll do an
+ // exponential backoff.
+ var cursor string
+ var cursorError error
+ for i := 1; i <= 3; i++ {
+ cursor, cursorError = journal.GetCursor()
+ if err != nil {
+ continue
+ }
+ time.Sleep(time.Duration(i*100) * time.Millisecond)
+ break
+ }
+ if cursorError != nil {
+ return errors.Wrap(cursorError, "inital journal cursor")
+ }
+
+ // We need the container's events in the same journal to guarantee
+ // consistency, see #10323.
+ if options.Follow && c.runtime.config.Engine.EventsLogger != "journald" {
+ return errors.Errorf("using --follow with the journald --log-driver but without the journald --events-backend (%s) is not supported", c.runtime.config.Engine.EventsLogger)
}
- if options.Follow && state == define.ContainerStateRunning {
- go func() {
- done := make(chan bool)
- until := make(chan time.Time)
- go func() {
- select {
- case <-ctx.Done():
- until <- time.Time{}
- case <-done:
- // nothing to do anymore
+ options.WaitGroup.Add(1)
+ go func() {
+ defer func() {
+ options.WaitGroup.Done()
+ if err := journal.Close(); err != nil {
+ logrus.Errorf("Unable to close journal: %v", err)
+ }
+ }()
+
+ afterTimeStamp := false // needed for options.Since
+ tailQueue := []*logs.LogLine{} // needed for options.Tail
+ doTail := options.Tail > 0
+ for {
+ select {
+ case <-ctx.Done():
+ // Remote client may have closed/lost the connection.
+ return
+ default:
+ // Fallthrough
+ }
+
+ if _, err := journal.Next(); err != nil {
+ logrus.Errorf("Failed to move journal cursor to next entry: %v", err)
+ return
+ }
+ latestCursor, err := journal.GetCursor()
+ if err != nil {
+ logrus.Errorf("Failed to get journal cursor: %v", err)
+ return
+ }
+
+ // Hit the end of the journal.
+ if cursor == latestCursor {
+ if doTail {
+ // Flush *once* we hit the end of the journal.
+ startIndex := int64(len(tailQueue)-1) - options.Tail
+ if startIndex < 0 {
+ startIndex = 0
+ }
+ for i := startIndex; i < int64(len(tailQueue)); i++ {
+ logChannel <- tailQueue[i]
+ }
+ tailQueue = nil
+ doTail = false
+ }
+ // Unless we follow, quit.
+ if !options.Follow {
+ return
}
- }()
- go func() {
- // FIXME (#10323): we are facing a terrible
- // race condition here. At the time the
- // container dies and `c.Wait()` has returned,
- // we may not have received all journald logs.
- // So far there is no other way than waiting
- // for a second. Ultimately, `r.Follow` is
- // racy and we may have to implement our custom
- // logic here.
- c.Wait(ctx)
- time.Sleep(time.Second)
- until <- time.Time{}
- }()
- follower := journaldFollowBuffer{logChannel, options.Multi}
- err := r.Follow(until, follower)
+ // Sleep until something's happening on the journal.
+ journal.Wait(sdjournal.IndefiniteWait)
+ continue
+ }
+ cursor = latestCursor
+
+ entry, err := journal.GetEntry()
if err != nil {
- logrus.Debugf(err.Error())
+ logrus.Errorf("Failed to get journal entry: %v", err)
+ return
}
- r.Close()
- options.WaitGroup.Done()
- done <- true
- return
- }()
- return nil
- }
- go func() {
- bytes := make([]byte, bufLen)
- // /me complains about no do-while in go
- ec, err := r.Read(bytes)
- for ec != 0 && err == nil {
- // because we are reusing bytes, we need to make
- // sure the old data doesn't get into the new line
- bytestr := string(bytes[:ec])
- logLine, err2 := logs.NewJournaldLogLine(bytestr, options.Multi)
- if err2 != nil {
- logrus.Error(err2)
+ if !afterTimeStamp {
+ entryTime := time.Unix(0, int64(entry.RealtimeTimestamp)*int64(time.Microsecond))
+ if entryTime.Before(options.Since) {
+ continue
+ }
+ afterTimeStamp = true
+ }
+
+ // If we're reading an event and the container exited/died,
+ // then we're done and can return.
+ event, ok := entry.Fields["PODMAN_EVENT"]
+ if ok {
+ status, err := events.StringToStatus(event)
+ if err != nil {
+ logrus.Errorf("Failed to translate event: %v", err)
+ return
+ }
+ if status == events.Exited {
+ return
+ }
+ continue
+ }
+
+ var message string
+ var formatError error
+
+ if options.Multi {
+ message, formatError = journalFormatterWithID(entry)
+ } else {
+ message, formatError = journalFormatter(entry)
+ }
+
+ if formatError != nil {
+ logrus.Errorf("Failed to parse journald log entry: %v", err)
+ return
+ }
+
+ logLine, err := logs.NewJournaldLogLine(message, options.Multi)
+ if err != nil {
+ logrus.Errorf("Failed parse log line: %v", err)
+ return
+ }
+ if doTail {
+ tailQueue = append(tailQueue, logLine)
continue
}
logChannel <- logLine
- ec, err = r.Read(bytes)
}
- if err != nil && err != io.EOF {
- logrus.Error(err)
- }
- r.Close()
- options.WaitGroup.Done()
}()
+
return nil
}
-func journalFormatterWithID(entry *journal.JournalEntry) (string, error) {
+func journalFormatterWithID(entry *sdjournal.JournalEntry) (string, error) {
output, err := formatterPrefix(entry)
if err != nil {
return "", err
@@ -162,7 +225,7 @@ func journalFormatterWithID(entry *journal.JournalEntry) (string, error) {
return output, nil
}
-func journalFormatter(entry *journal.JournalEntry) (string, error) {
+func journalFormatter(entry *sdjournal.JournalEntry) (string, error) {
output, err := formatterPrefix(entry)
if err != nil {
return "", err
@@ -176,7 +239,7 @@ func journalFormatter(entry *journal.JournalEntry) (string, error) {
return output, nil
}
-func formatterPrefix(entry *journal.JournalEntry) (string, error) {
+func formatterPrefix(entry *sdjournal.JournalEntry) (string, error) {
usec := entry.RealtimeTimestamp
tsString := time.Unix(0, int64(usec)*int64(time.Microsecond)).Format(logs.LogTimeFormat)
output := fmt.Sprintf("%s ", tsString)
@@ -202,7 +265,7 @@ func formatterPrefix(entry *journal.JournalEntry) (string, error) {
return output, nil
}
-func formatterMessage(entry *journal.JournalEntry) (string, error) {
+func formatterMessage(entry *sdjournal.JournalEntry) (string, error) {
// Finally, append the message
msg, ok := entry.Fields["MESSAGE"]
if !ok {
@@ -211,18 +274,3 @@ func formatterMessage(entry *journal.JournalEntry) (string, error) {
msg = strings.TrimSuffix(msg, "\n")
return msg, nil
}
-
-type journaldFollowBuffer struct {
- logChannel chan *logs.LogLine
- withID bool
-}
-
-func (f journaldFollowBuffer) Write(p []byte) (int, error) {
- bytestr := string(p)
- logLine, err := logs.NewJournaldLogLine(bytestr, f.withID)
- if err != nil {
- return -1, err
- }
- f.logChannel <- logLine
- return len(p), nil
-}
diff --git a/libpod/events/filters.go b/libpod/events/filters.go
index acfb96302..4d27e8fc4 100644
--- a/libpod/events/filters.go
+++ b/libpod/events/filters.go
@@ -97,18 +97,41 @@ func parseFilter(filter string) (string, string, error) {
return filterSplit[0], filterSplit[1], nil
}
-func generateEventOptions(filters []string, since, until string) ([]EventFilter, error) {
- options := make([]EventFilter, 0, len(filters))
+// applyFilters applies the EventFilter slices in sequence. Filters under the
+// same key are disjunctive while each key must match (conjuctive).
+func applyFilters(event *Event, filterMap map[string][]EventFilter) bool {
+ for _, filters := range filterMap {
+ success := false
+ for _, filter := range filters {
+ if filter(event) {
+ success = true
+ break
+ }
+ }
+ if !success {
+ return false
+ }
+ }
+ return true
+}
+
+// generateEventFilter parses the specified filters into a filter map that can
+// later on be used to filter events. Keys are conjunctive, values are
+// disjunctive.
+func generateEventFilters(filters []string, since, until string) (map[string][]EventFilter, error) {
+ filterMap := make(map[string][]EventFilter)
for _, filter := range filters {
key, val, err := parseFilter(filter)
if err != nil {
return nil, err
}
- funcFilter, err := generateEventFilter(key, val)
+ filterFunc, err := generateEventFilter(key, val)
if err != nil {
return nil, err
}
- options = append(options, funcFilter)
+ filterSlice := filterMap[key]
+ filterSlice = append(filterSlice, filterFunc)
+ filterMap[key] = filterSlice
}
if len(since) > 0 {
@@ -116,7 +139,8 @@ func generateEventOptions(filters []string, since, until string) ([]EventFilter,
if err != nil {
return nil, errors.Wrapf(err, "unable to convert since time of %s", since)
}
- options = append(options, generateEventSinceOption(timeSince))
+ filterFunc := generateEventSinceOption(timeSince)
+ filterMap["since"] = []EventFilter{filterFunc}
}
if len(until) > 0 {
@@ -124,7 +148,8 @@ func generateEventOptions(filters []string, since, until string) ([]EventFilter,
if err != nil {
return nil, errors.Wrapf(err, "unable to convert until time of %s", until)
}
- options = append(options, generateEventUntilOption(timeUntil))
+ filterFunc := generateEventUntilOption(timeUntil)
+ filterMap["until"] = []EventFilter{filterFunc}
}
- return options, nil
+ return filterMap, nil
}
diff --git a/libpod/events/journal_linux.go b/libpod/events/journal_linux.go
index 23e5f15b1..7006290e9 100644
--- a/libpod/events/journal_linux.go
+++ b/libpod/events/journal_linux.go
@@ -69,9 +69,9 @@ func (e EventJournalD) Write(ee Event) error {
// Read reads events from the journal and sends qualified events to the event channel
func (e EventJournalD) Read(ctx context.Context, options ReadOptions) error {
defer close(options.EventChannel)
- eventOptions, err := generateEventOptions(options.Filters, options.Since, options.Until)
+ filterMap, err := generateEventFilters(options.Filters, options.Since, options.Until)
if err != nil {
- return errors.Wrapf(err, "failed to generate event options")
+ return errors.Wrapf(err, "failed to parse event filters")
}
var untilTime time.Time
if len(options.Until) > 0 {
@@ -159,11 +159,7 @@ func (e EventJournalD) Read(ctx context.Context, options ReadOptions) error {
}
continue
}
- include := true
- for _, filter := range eventOptions {
- include = include && filter(newEvent)
- }
- if include {
+ if applyFilters(newEvent, filterMap) {
options.EventChannel <- newEvent
}
}
diff --git a/libpod/events/logfile.go b/libpod/events/logfile.go
index 0f00525e8..952444f2b 100644
--- a/libpod/events/logfile.go
+++ b/libpod/events/logfile.go
@@ -44,9 +44,9 @@ func (e EventLogFile) Write(ee Event) error {
// Reads from the log file
func (e EventLogFile) Read(ctx context.Context, options ReadOptions) error {
defer close(options.EventChannel)
- eventOptions, err := generateEventOptions(options.Filters, options.Since, options.Until)
+ filterMap, err := generateEventFilters(options.Filters, options.Since, options.Until)
if err != nil {
- return errors.Wrapf(err, "unable to generate event options")
+ return errors.Wrapf(err, "failed to parse event filters")
}
t, err := e.getTail(options)
if err != nil {
@@ -92,11 +92,7 @@ func (e EventLogFile) Read(ctx context.Context, options ReadOptions) error {
default:
return errors.Errorf("event type %s is not valid in %s", event.Type.String(), e.options.LogFilePath)
}
- include := true
- for _, filter := range eventOptions {
- include = include && filter(event)
- }
- if include && copy {
+ if copy && applyFilters(event, filterMap) {
options.EventChannel <- event
}
}
diff --git a/libpod/network/config.go b/libpod/network/config.go
index ac4478602..9a3bc4763 100644
--- a/libpod/network/config.go
+++ b/libpod/network/config.go
@@ -44,17 +44,17 @@ type CNIPlugins interface {
// HostLocalBridge describes a configuration for a bridge plugin
// https://github.com/containernetworking/plugins/tree/master/plugins/main/bridge#network-configuration-reference
type HostLocalBridge struct {
- PluginType string `json:"type"`
- BrName string `json:"bridge,omitempty"`
- IsGW bool `json:"isGateway"`
- IsDefaultGW bool `json:"isDefaultGateway,omitempty"`
- ForceAddress bool `json:"forceAddress,omitempty"`
- IPMasq bool `json:"ipMasq,omitempty"`
- MTU int `json:"mtu,omitempty"`
- HairpinMode bool `json:"hairpinMode,omitempty"`
- PromiscMode bool `json:"promiscMode,omitempty"`
- Vlan int `json:"vlan,omitempty"`
- IPAM IPAMHostLocalConf `json:"ipam"`
+ PluginType string `json:"type"`
+ BrName string `json:"bridge,omitempty"`
+ IsGW bool `json:"isGateway"`
+ IsDefaultGW bool `json:"isDefaultGateway,omitempty"`
+ ForceAddress bool `json:"forceAddress,omitempty"`
+ IPMasq bool `json:"ipMasq,omitempty"`
+ MTU int `json:"mtu,omitempty"`
+ HairpinMode bool `json:"hairpinMode,omitempty"`
+ PromiscMode bool `json:"promiscMode,omitempty"`
+ Vlan int `json:"vlan,omitempty"`
+ IPAM IPAMConfig `json:"ipam"`
}
// Bytes outputs []byte
@@ -62,9 +62,9 @@ func (h *HostLocalBridge) Bytes() ([]byte, error) {
return json.MarshalIndent(h, "", "\t")
}
-// IPAMHostLocalConf describes an IPAM configuration
+// IPAMConfig describes an IPAM configuration
// https://github.com/containernetworking/plugins/tree/master/plugins/ipam/host-local#network-configuration-reference
-type IPAMHostLocalConf struct {
+type IPAMConfig struct {
PluginType string `json:"type"`
Routes []IPAMRoute `json:"routes,omitempty"`
ResolveConf string `json:"resolveConf,omitempty"`
@@ -81,7 +81,7 @@ type IPAMLocalHostRangeConf struct {
}
// Bytes outputs the configuration as []byte
-func (i IPAMHostLocalConf) Bytes() ([]byte, error) {
+func (i IPAMConfig) Bytes() ([]byte, error) {
return json.MarshalIndent(i, "", "\t")
}
@@ -101,19 +101,12 @@ func (p PortMapConfig) Bytes() ([]byte, error) {
return json.MarshalIndent(p, "", "\t")
}
-// IPAMDHCP describes the ipamdhcp config
-type IPAMDHCP struct {
- DHCP string `json:"type"`
- Routes []IPAMRoute `json:"routes,omitempty"`
- Ranges [][]IPAMLocalHostRangeConf `json:"ranges,omitempty"`
-}
-
// MacVLANConfig describes the macvlan config
type MacVLANConfig struct {
- PluginType string `json:"type"`
- Master string `json:"master"`
- IPAM IPAMDHCP `json:"ipam"`
- MTU int `json:"mtu,omitempty"`
+ PluginType string `json:"type"`
+ Master string `json:"master"`
+ IPAM IPAMConfig `json:"ipam"`
+ MTU int `json:"mtu,omitempty"`
}
// Bytes outputs the configuration as []byte
diff --git a/libpod/network/netconflist.go b/libpod/network/netconflist.go
index d2031df6d..d6c33740e 100644
--- a/libpod/network/netconflist.go
+++ b/libpod/network/netconflist.go
@@ -45,7 +45,7 @@ func NewNcList(name, version string, labels NcLabels) NcList {
}
// NewHostLocalBridge creates a new LocalBridge for host-local
-func NewHostLocalBridge(name string, isGateWay, isDefaultGW, ipMasq bool, mtu int, vlan int, ipamConf IPAMHostLocalConf) *HostLocalBridge {
+func NewHostLocalBridge(name string, isGateWay, isDefaultGW, ipMasq bool, mtu int, vlan int, ipamConf IPAMConfig) *HostLocalBridge {
hostLocalBridge := HostLocalBridge{
PluginType: "bridge",
BrName: name,
@@ -65,8 +65,8 @@ func NewHostLocalBridge(name string, isGateWay, isDefaultGW, ipMasq bool, mtu in
}
// NewIPAMHostLocalConf creates a new IPAMHostLocal configuration
-func NewIPAMHostLocalConf(routes []IPAMRoute, ipamRanges [][]IPAMLocalHostRangeConf) (IPAMHostLocalConf, error) {
- ipamConf := IPAMHostLocalConf{
+func NewIPAMHostLocalConf(routes []IPAMRoute, ipamRanges [][]IPAMLocalHostRangeConf) (IPAMConfig, error) {
+ ipamConf := IPAMConfig{
PluginType: "host-local",
Routes: routes,
// Possible future support ? Leaving for clues
@@ -177,8 +177,10 @@ func HasDNSNamePlugin(paths []string) bool {
// NewMacVLANPlugin creates a macvlanconfig with a given device name
func NewMacVLANPlugin(device string, gateway net.IP, ipRange *net.IPNet, subnet *net.IPNet, mtu int) (MacVLANConfig, error) {
- i := IPAMDHCP{DHCP: "dhcp"}
- if gateway != nil || ipRange != nil || subnet != nil {
+ i := IPAMConfig{PluginType: "dhcp"}
+ if gateway != nil ||
+ (ipRange != nil && ipRange.IP != nil && ipRange.Mask != nil) ||
+ (subnet != nil && subnet.IP != nil && subnet.Mask != nil) {
ipam, err := NewIPAMLocalHostRange(subnet, ipRange, gateway)
if err != nil {
return MacVLANConfig{}, err
@@ -186,6 +188,12 @@ func NewMacVLANPlugin(device string, gateway net.IP, ipRange *net.IPNet, subnet
ranges := make([][]IPAMLocalHostRangeConf, 0)
ranges = append(ranges, ipam)
i.Ranges = ranges
+ route, err := NewIPAMDefaultRoute(IsIPv6(subnet.IP))
+ if err != nil {
+ return MacVLANConfig{}, err
+ }
+ i.Routes = []IPAMRoute{route}
+ i.PluginType = "host-local"
}
m := MacVLANConfig{
diff --git a/libpod/networking_linux.go b/libpod/networking_linux.go
index 0e8a4f768..c928e02a6 100644
--- a/libpod/networking_linux.go
+++ b/libpod/networking_linux.go
@@ -273,7 +273,6 @@ func (r *Runtime) GetRootlessCNINetNs(new bool) (*RootlessCNI, error) {
if err != nil {
return nil, errors.Wrap(err, "error creating rootless cni network namespace")
}
-
// setup slirp4netns here
path := r.config.Engine.NetworkCmdPath
if path == "" {
@@ -437,9 +436,32 @@ func (r *Runtime) GetRootlessCNINetNs(new bool) (*RootlessCNI, error) {
return rootlessCNINS, nil
}
+// setPrimaryMachineIP is used for podman-machine and it sets
+// and environment variable with the IP address of the podman-machine
+// host.
+func setPrimaryMachineIP() error {
+ // no connection is actually made here
+ conn, err := net.Dial("udp", "8.8.8.8:80")
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err := conn.Close(); err != nil {
+ logrus.Error(err)
+ }
+ }()
+ addr := conn.LocalAddr().(*net.UDPAddr)
+ return os.Setenv("PODMAN_MACHINE_HOST", addr.IP.String())
+}
+
// setUpOCICNIPod will set up the cni networks, on error it will also tear down the cni
// networks. If rootless it will join/create the rootless cni namespace.
func (r *Runtime) setUpOCICNIPod(podNetwork ocicni.PodNetwork) ([]ocicni.NetResult, error) {
+ if r.config.MachineEnabled() {
+ if err := setPrimaryMachineIP(); err != nil {
+ return nil, err
+ }
+ }
rootlessCNINS, err := r.GetRootlessCNINetNs(true)
if err != nil {
return nil, err
diff --git a/libpod/runtime.go b/libpod/runtime.go
index 713026a9e..d775b55e1 100644
--- a/libpod/runtime.go
+++ b/libpod/runtime.go
@@ -17,6 +17,7 @@ import (
"github.com/containers/common/libimage"
"github.com/containers/common/pkg/config"
+ "github.com/containers/common/pkg/defaultnet"
"github.com/containers/common/pkg/secrets"
"github.com/containers/image/v5/pkg/sysregistriesv2"
is "github.com/containers/image/v5/storage"
@@ -217,8 +218,6 @@ func newRuntimeFromConfig(ctx context.Context, conf *config.Config, options ...R
return nil, err
}
- runtime.libimageEventsShutdown = make(chan bool)
-
return runtime, nil
}
@@ -460,6 +459,11 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (retErr error) {
}
}
+ // If we need to make a default network - do so now.
+ if err := defaultnet.Create(runtime.config.Network.DefaultNetwork, runtime.config.Network.DefaultSubnet, runtime.config.Network.NetworkConfigDir, runtime.config.Engine.StaticDir, runtime.config.Engine.MachineEnabled); err != nil {
+ logrus.Errorf("Failed to created default CNI network: %v", err)
+ }
+
// Set up the CNI net plugin
netPlugin, err := ocicni.InitCNI(runtime.config.Network.DefaultNetwork, runtime.config.Network.NetworkConfigDir, runtime.config.Network.CNIPluginDirs...)
if err != nil {
@@ -701,6 +705,8 @@ var libimageEventsMap = map[libimage.EventType]events.Status{
// events on the libimage.Runtime. The gourtine will be cleaned up implicitly
// when the main() exists.
func (r *Runtime) libimageEvents() {
+ r.libimageEventsShutdown = make(chan bool)
+
toLibpodEventStatus := func(e *libimage.Event) events.Status {
status, found := libimageEventsMap[e.Type]
if !found {
@@ -709,9 +715,8 @@ func (r *Runtime) libimageEvents() {
return status
}
+ eventChannel := r.libimageRuntime.EventChannel()
go func() {
- eventChannel := r.libimageRuntime.EventChannel()
-
for {
// Make sure to read and write all events before
// checking if we're about to shutdown.
@@ -780,7 +785,9 @@ func (r *Runtime) Shutdown(force bool) error {
// attempt to shut it down
if r.store != nil {
// Wait for the events to be written.
- r.libimageEventsShutdown <- true
+ if r.libimageEventsShutdown != nil {
+ r.libimageEventsShutdown <- true
+ }
// Note that the libimage runtime shuts down the store.
if err := r.libimageRuntime.Shutdown(force); err != nil {
diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go
index 4e4b2a8ab..6c69d1b72 100644
--- a/libpod/runtime_ctr.go
+++ b/libpod/runtime_ctr.go
@@ -581,6 +581,15 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
if err := c.stop(c.StopTimeout()); err != nil && errors.Cause(err) != define.ErrConmonDead {
return errors.Wrapf(err, "cannot remove container %s as it could not be stopped", c.ID())
}
+
+ // We unlocked as part of stop() above - there's a chance someone
+ // else got in and removed the container before we reacquired the
+ // lock.
+ // Do a quick ping of the database to check if the container
+ // still exists.
+ if ok, _ := r.state.HasContainer(c.ID()); !ok {
+ return nil
+ }
}
// Remove all active exec sessions
diff --git a/pkg/api/handlers/compat/containers_create.go b/pkg/api/handlers/compat/containers_create.go
index 162a98135..0b5cbd343 100644
--- a/pkg/api/handlers/compat/containers_create.go
+++ b/pkg/api/handlers/compat/containers_create.go
@@ -62,7 +62,7 @@ func CreateContainer(w http.ResponseWriter, r *http.Request) {
}
// Take body structure and convert to cliopts
- cliOpts, args, err := common.ContainerCreateToContainerCLIOpts(body, rtc.Engine.CgroupManager)
+ cliOpts, args, err := common.ContainerCreateToContainerCLIOpts(body, rtc)
if err != nil {
utils.Error(w, "Something went wrong.", http.StatusInternalServerError, errors.Wrap(err, "make cli opts()"))
return
@@ -71,13 +71,12 @@ func CreateContainer(w http.ResponseWriter, r *http.Request) {
imgNameOrID := newImage.ID()
// if the img had multi names with the same sha256 ID, should use the InputName, not the ID
if len(newImage.Names()) > 1 {
- imageRef, err := utils.ParseDockerReference(resolvedName)
- if err != nil {
+ if err := utils.IsRegistryReference(resolvedName); err != nil {
utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest, err)
return
}
// maybe the InputName has no tag, so use full name to display
- imgNameOrID = imageRef.DockerReference().String()
+ imgNameOrID = resolvedName
}
sg := specgen.NewSpecGenerator(imgNameOrID, cliOpts.RootFS)
diff --git a/pkg/api/handlers/compat/events.go b/pkg/api/handlers/compat/events.go
index 405e616c5..9fbac91e0 100644
--- a/pkg/api/handlers/compat/events.go
+++ b/pkg/api/handlers/compat/events.go
@@ -75,7 +75,7 @@ func GetEvents(w http.ResponseWriter, r *http.Request) {
coder := json.NewEncoder(w)
coder.SetEscapeHTML(true)
- for stream := true; stream; stream = query.Stream {
+ for {
select {
case err := <-errorChannel:
if err != nil {
diff --git a/pkg/api/handlers/compat/images.go b/pkg/api/handlers/compat/images.go
index 6c8af66c3..ac212474b 100644
--- a/pkg/api/handlers/compat/images.go
+++ b/pkg/api/handlers/compat/images.go
@@ -1,7 +1,6 @@
package compat
import (
- "context"
"encoding/json"
"fmt"
"io/ioutil"
@@ -13,12 +12,12 @@ import (
"github.com/containers/common/libimage"
"github.com/containers/common/pkg/config"
"github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/pkg/shortnames"
"github.com/containers/image/v5/types"
"github.com/containers/podman/v3/libpod"
"github.com/containers/podman/v3/pkg/api/handlers"
"github.com/containers/podman/v3/pkg/api/handlers/utils"
"github.com/containers/podman/v3/pkg/auth"
- "github.com/containers/podman/v3/pkg/channel"
"github.com/containers/podman/v3/pkg/domain/entities"
"github.com/containers/podman/v3/pkg/domain/infra/abi"
"github.com/containers/storage"
@@ -213,6 +212,11 @@ func CreateImageFromSrc(w http.ResponseWriter, r *http.Request) {
})
}
+type pullResult struct {
+ images []*libimage.Image
+ err error
+}
+
func CreateImageFromImage(w http.ResponseWriter, r *http.Request) {
// 200 no error
// 404 repo does not exist or no read access
@@ -234,6 +238,14 @@ func CreateImageFromImage(w http.ResponseWriter, r *http.Request) {
fromImage := mergeNameAndTagOrDigest(query.FromImage, query.Tag)
+ // without this early check this function would return 200 but reported error via body stream soon after
+ // it's better to let caller know early via HTTP status code that request cannot be processed
+ _, err := shortnames.Resolve(runtime.SystemContext(), fromImage)
+ if err != nil {
+ utils.Error(w, "Something went wrong.", http.StatusBadRequest, errors.Wrap(err, "failed to resolve image name"))
+ return
+ }
+
authConf, authfile, key, err := auth.GetCredentials(r)
if err != nil {
utils.Error(w, "failed to retrieve repository credentials", http.StatusBadRequest, errors.Wrapf(err, "failed to parse %q header for %s", key, r.URL.String()))
@@ -274,26 +286,14 @@ func CreateImageFromImage(w http.ResponseWriter, r *http.Request) {
}
pullOptions.Writer = os.Stderr // allows for debugging on the server
- stderr := channel.NewWriter(make(chan []byte))
- defer stderr.Close()
-
progress := make(chan types.ProgressProperties)
+
pullOptions.Progress = progress
- var img string
- runCtx, cancel := context.WithCancel(context.Background())
+ pullResChan := make(chan pullResult)
go func() {
- defer cancel()
- pulledImages, err := runtime.LibimageRuntime().Pull(runCtx, fromImage, config.PullPolicyAlways, pullOptions)
- if err != nil {
- stderr.Write([]byte(err.Error() + "\n"))
- } else {
- if len(pulledImages) == 0 {
- utils.Error(w, "Something went wrong.", http.StatusBadRequest, errors.New("internal error: no images pulled"))
- return
- }
- img = pulledImages[0].ID()
- }
+ pulledImages, err := runtime.LibimageRuntime().Pull(r.Context(), fromImage, config.PullPolicyAlways, pullOptions)
+ pullResChan <- pullResult{images: pulledImages, err: err}
}()
flush := func() {
@@ -308,7 +308,6 @@ func CreateImageFromImage(w http.ResponseWriter, r *http.Request) {
enc := json.NewEncoder(w)
enc.SetEscapeHTML(true)
- var failed bool
loop: // break out of for/select infinite loop
for {
@@ -338,32 +337,31 @@ loop: // break out of for/select infinite loop
}
report.Id = e.Artifact.Digest.Encoded()[0:12]
if err := enc.Encode(report); err != nil {
- stderr.Write([]byte(err.Error()))
- }
- flush()
- case e := <-stderr.Chan():
- failed = true
- report.Error = string(e)
- if err := enc.Encode(report); err != nil {
logrus.Warnf("Failed to json encode error %q", err.Error())
}
flush()
- case <-runCtx.Done():
- if !failed {
- if utils.IsLibpodRequest(r) {
- report.Status = "Pull complete"
+ case pullRes := <-pullResChan:
+ err := pullRes.err
+ pulledImages := pullRes.images
+ if err != nil {
+ report.Error = err.Error()
+ } else {
+ if len(pulledImages) > 0 {
+ img := pulledImages[0].ID()
+ if utils.IsLibpodRequest(r) {
+ report.Status = "Pull complete"
+ } else {
+ report.Status = "Download complete"
+ }
+ report.Id = img[0:12]
} else {
- report.Status = "Download complete"
- }
- report.Id = img[0:12]
- if err := enc.Encode(report); err != nil {
- logrus.Warnf("Failed to json encode error %q", err.Error())
+ report.Error = "internal error: no images pulled"
}
- flush()
}
- break loop // break out of for/select infinite loop
- case <-r.Context().Done():
- // Client has closed connection
+ if err := enc.Encode(report); err != nil {
+ logrus.Warnf("Failed to json encode error %q", err.Error())
+ }
+ flush()
break loop // break out of for/select infinite loop
}
}
diff --git a/pkg/api/handlers/compat/images_push.go b/pkg/api/handlers/compat/images_push.go
index db02af445..62f8cdc77 100644
--- a/pkg/api/handlers/compat/images_push.go
+++ b/pkg/api/handlers/compat/images_push.go
@@ -1,7 +1,6 @@
package compat
import (
- "context"
"encoding/json"
"fmt"
"io/ioutil"
@@ -12,7 +11,6 @@ import (
"github.com/containers/podman/v3/libpod"
"github.com/containers/podman/v3/pkg/api/handlers/utils"
"github.com/containers/podman/v3/pkg/auth"
- "github.com/containers/podman/v3/pkg/channel"
"github.com/containers/podman/v3/pkg/domain/entities"
"github.com/containers/podman/v3/pkg/domain/infra/abi"
"github.com/containers/storage"
@@ -101,46 +99,33 @@ func PushImage(w http.ResponseWriter, r *http.Request) {
destination = imageName
}
- errorWriter := channel.NewWriter(make(chan []byte))
- defer errorWriter.Close()
-
- statusWriter := channel.NewWriter(make(chan []byte))
- defer statusWriter.Close()
-
- runCtx, cancel := context.WithCancel(context.Background())
- var failed bool
-
- go func() {
- defer cancel()
-
- statusWriter.Write([]byte(fmt.Sprintf("The push refers to repository [%s]", imageName)))
-
- err := imageEngine.Push(runCtx, imageName, destination, options)
- if err != nil {
- if errors.Cause(err) != storage.ErrImageUnknown {
- errorWriter.Write([]byte("An image does not exist locally with the tag: " + imageName))
- } else {
- errorWriter.Write([]byte(err.Error()))
- }
- }
- }()
-
- flush := func() {
- if flusher, ok := w.(http.Flusher); ok {
- flusher.Flush()
- }
+ flush := func() {}
+ if flusher, ok := w.(http.Flusher); ok {
+ flush = flusher.Flush
}
w.WriteHeader(http.StatusOK)
w.Header().Add("Content-Type", "application/json")
flush()
+ var report jsonmessage.JSONMessage
enc := json.NewEncoder(w)
enc.SetEscapeHTML(true)
+ report.Status = fmt.Sprintf("The push refers to repository [%s]", imageName)
+ if err := enc.Encode(report); err != nil {
+ logrus.Warnf("Failed to json encode error %q", err.Error())
+ }
+ flush()
+
+ pushErrChan := make(chan error)
+ go func() {
+ pushErrChan <- imageEngine.Push(r.Context(), imageName, destination, options)
+ }()
+
loop: // break out of for/select infinite loop
for {
- var report jsonmessage.JSONMessage
+ report = jsonmessage.JSONMessage{}
select {
case e := <-options.Progress:
@@ -160,43 +145,50 @@ loop: // break out of for/select infinite loop
}
report.ID = e.Artifact.Digest.Encoded()[0:12]
if err := enc.Encode(report); err != nil {
- errorWriter.Write([]byte(err.Error()))
+ logrus.Warnf("Failed to json encode error %q", err.Error())
}
flush()
- case e := <-statusWriter.Chan():
- report.Status = string(e)
- if err := enc.Encode(report); err != nil {
- errorWriter.Write([]byte(err.Error()))
+ case err := <-pushErrChan:
+ if err != nil {
+ var msg string
+ if errors.Cause(err) != storage.ErrImageUnknown {
+ msg = "An image does not exist locally with the tag: " + imageName
+ } else {
+ msg = err.Error()
+ }
+ report.Error = &jsonmessage.JSONError{
+ Message: msg,
+ }
+ report.ErrorMessage = msg
+ if err := enc.Encode(report); err != nil {
+ logrus.Warnf("Failed to json encode error %q", err.Error())
+ }
+ flush()
+ break loop
}
- flush()
- case e := <-errorWriter.Chan():
- failed = true
- report.Error = &jsonmessage.JSONError{
- Message: string(e),
+
+ digestBytes, err := ioutil.ReadAll(digestFile)
+ if err != nil {
+ report.Error = &jsonmessage.JSONError{
+ Message: err.Error(),
+ }
+ report.ErrorMessage = err.Error()
+ if err := enc.Encode(report); err != nil {
+ logrus.Warnf("Failed to json encode error %q", err.Error())
+ }
+ flush()
+ break loop
}
- report.ErrorMessage = string(e)
+ tag := query.Tag
+ if tag == "" {
+ tag = "latest"
+ }
+ report.Status = fmt.Sprintf("%s: digest: %s", tag, string(digestBytes))
if err := enc.Encode(report); err != nil {
logrus.Warnf("Failed to json encode error %q", err.Error())
}
+
flush()
- case <-runCtx.Done():
- if !failed {
- digestBytes, err := ioutil.ReadAll(digestFile)
- if err == nil {
- tag := query.Tag
- if tag == "" {
- tag = "latest"
- }
- report.Status = fmt.Sprintf("%s: digest: %s", tag, string(digestBytes))
- if err := enc.Encode(report); err != nil {
- logrus.Warnf("Failed to json encode error %q", err.Error())
- }
- flush()
- }
- }
- break loop // break out of for/select infinite loop
- case <-r.Context().Done():
- // Client has closed connection
break loop // break out of for/select infinite loop
}
}
diff --git a/pkg/api/handlers/compat/resize.go b/pkg/api/handlers/compat/resize.go
index 23ed33a22..f65e313fc 100644
--- a/pkg/api/handlers/compat/resize.go
+++ b/pkg/api/handlers/compat/resize.go
@@ -46,20 +46,13 @@ func ResizeTTY(w http.ResponseWriter, r *http.Request) {
utils.ContainerNotFound(w, name, err)
return
}
- if state, err := ctnr.State(); err != nil {
- utils.InternalServerError(w, errors.Wrapf(err, "cannot obtain container state"))
- return
- } else if state != define.ContainerStateRunning && !query.IgnoreNotRunning {
- utils.Error(w, "Container not running", http.StatusConflict,
- fmt.Errorf("container %q in wrong state %q", name, state.String()))
- return
- }
- // If container is not running, ignore since this can be a race condition, and is expected
if err := ctnr.AttachResize(sz); err != nil {
- if errors.Cause(err) != define.ErrCtrStateInvalid || !query.IgnoreNotRunning {
+ if errors.Cause(err) != define.ErrCtrStateInvalid {
utils.InternalServerError(w, errors.Wrapf(err, "cannot resize container"))
- return
+ } else {
+ utils.Error(w, "Container not running", http.StatusConflict, err)
}
+ return
}
// This is not a 204, even though we write nothing, for compatibility
// reasons.
diff --git a/pkg/api/handlers/libpod/images.go b/pkg/api/handlers/libpod/images.go
index a90408bfd..fc6ab4b4c 100644
--- a/pkg/api/handlers/libpod/images.go
+++ b/pkg/api/handlers/libpod/images.go
@@ -482,7 +482,7 @@ func PushImage(w http.ResponseWriter, r *http.Request) {
destination = source
}
- if _, err := utils.ParseDockerReference(destination); err != nil {
+ if err := utils.IsRegistryReference(destination); err != nil {
utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest, err)
return
}
diff --git a/pkg/api/handlers/libpod/images_pull.go b/pkg/api/handlers/libpod/images_pull.go
index 7545ba235..e88b53a4b 100644
--- a/pkg/api/handlers/libpod/images_pull.go
+++ b/pkg/api/handlers/libpod/images_pull.go
@@ -48,7 +48,7 @@ func ImagesPull(w http.ResponseWriter, r *http.Request) {
}
// Make sure that the reference has no transport or the docker one.
- if _, err := utils.ParseDockerReference(query.Reference); err != nil {
+ if err := utils.IsRegistryReference(query.Reference); err != nil {
utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest, err)
return
}
@@ -85,7 +85,7 @@ func ImagesPull(w http.ResponseWriter, r *http.Request) {
var pulledImages []*libimage.Image
var pullError error
- runCtx, cancel := context.WithCancel(context.Background())
+ runCtx, cancel := context.WithCancel(r.Context())
go func() {
defer cancel()
pulledImages, pullError = runtime.LibimageRuntime().Pull(runCtx, query.Reference, config.PullPolicyAlways, pullOptions)
diff --git a/pkg/api/handlers/libpod/manifests.go b/pkg/api/handlers/libpod/manifests.go
index f21eb2e80..2f36db583 100644
--- a/pkg/api/handlers/libpod/manifests.go
+++ b/pkg/api/handlers/libpod/manifests.go
@@ -169,7 +169,7 @@ func ManifestPush(w http.ResponseWriter, r *http.Request) {
errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
return
}
- if _, err := utils.ParseDockerReference(query.Destination); err != nil {
+ if err := utils.IsRegistryReference(query.Destination); err != nil {
utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest, err)
return
}
diff --git a/pkg/api/handlers/libpod/swagger.go b/pkg/api/handlers/libpod/swagger.go
index 9450a70d9..19eced986 100644
--- a/pkg/api/handlers/libpod/swagger.go
+++ b/pkg/api/handlers/libpod/swagger.go
@@ -95,7 +95,7 @@ type swagInfoResponse struct {
// swagger:response NetworkRmReport
type swagNetworkRmReport struct {
// in:body
- Body entities.NetworkRmReport
+ Body []entities.NetworkRmReport
}
// Network inspect
diff --git a/pkg/api/handlers/utils/images.go b/pkg/api/handlers/utils/images.go
index 2662cd368..2a1908d63 100644
--- a/pkg/api/handlers/utils/images.go
+++ b/pkg/api/handlers/utils/images.go
@@ -15,22 +15,19 @@ import (
"github.com/pkg/errors"
)
-// ParseDockerReference parses the specified image name to a
-// `types.ImageReference` and enforces it to refer to a docker-transport
-// reference.
-func ParseDockerReference(name string) (types.ImageReference, error) {
- dockerPrefix := fmt.Sprintf("%s://", docker.Transport.Name())
+// IsRegistryReference checks if the specified name points to the "docker://"
+// transport. If it points to no supported transport, we'll assume a
+// non-transport reference pointing to an image (e.g., "fedora:latest").
+func IsRegistryReference(name string) error {
imageRef, err := alltransports.ParseImageName(name)
- if err == nil && imageRef.Transport().Name() != docker.Transport.Name() {
- return nil, errors.Errorf("reference %q must be a docker reference", name)
- } else if err != nil {
- origErr := err
- imageRef, err = alltransports.ParseImageName(fmt.Sprintf("%s%s", dockerPrefix, name))
- if err != nil {
- return nil, errors.Wrapf(origErr, "reference %q must be a docker reference", name)
- }
+ if err != nil {
+ // No supported transport -> assume a docker-stype reference.
+ return nil
}
- return imageRef, nil
+ if imageRef.Transport().Name() == docker.Transport.Name() {
+ return nil
+ }
+ return errors.Errorf("unsupport transport %s in %q: only docker transport is supported", imageRef.Transport().Name(), name)
}
// ParseStorageReference parses the specified image name to a
diff --git a/pkg/api/server/handler_api.go b/pkg/api/server/handler_api.go
index 28b8706a8..becc674c0 100644
--- a/pkg/api/server/handler_api.go
+++ b/pkg/api/server/handler_api.go
@@ -63,6 +63,12 @@ func (s *APIServer) APIHandler(h http.HandlerFunc) http.HandlerFunc {
w.Header().Set("Libpod-API-Version", lv)
w.Header().Set("Server", "Libpod/"+lv+" ("+runtime.GOOS+")")
+ if s.CorsHeaders != "" {
+ w.Header().Set("Access-Control-Allow-Origin", s.CorsHeaders)
+ w.Header().Set("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept, X-Registry-Auth, Connection, Upgrade, X-Registry-Config")
+ w.Header().Set("Access-Control-Allow-Methods", "HEAD, GET, POST, DELETE, PUT, OPTIONS")
+ }
+
h(w, r)
logrus.Debugf("APIHandler(%s) -- %s %s END", rid, r.Method, r.URL.String())
}
diff --git a/pkg/api/server/register_containers.go b/pkg/api/server/register_containers.go
index aa999905e..88ebb4df5 100644
--- a/pkg/api/server/register_containers.go
+++ b/pkg/api/server/register_containers.go
@@ -1364,6 +1364,8 @@ func (s *APIServer) registerContainersHandlers(r *mux.Router) error {
// $ref: "#/responses/ok"
// 404:
// $ref: "#/responses/NoSuchContainer"
+ // 409:
+ // $ref: "#/responses/ConflictError"
// 500:
// $ref: "#/responses/InternalError"
r.HandleFunc(VersionedPath("/libpod/containers/{name}/resize"), s.APIHandler(compat.ResizeTTY)).Methods(http.MethodPost)
diff --git a/pkg/api/server/register_networks.go b/pkg/api/server/register_networks.go
index 9a5ccb789..4d9806316 100644
--- a/pkg/api/server/register_networks.go
+++ b/pkg/api/server/register_networks.go
@@ -241,7 +241,9 @@ func (s *APIServer) registerNetworkHandlers(r *mux.Router) error {
// tags:
// - networks
// summary: List networks
- // description: Display summary of network configurations
+ // description: |
+ // Display summary of network configurations.
+ // - In a 200 response, all of the fields named Bytes are returned as a Base64 encoded string.
// parameters:
// - in: query
// name: filters
diff --git a/pkg/api/server/server.go b/pkg/api/server/server.go
index 972541bc6..1e8faf8f5 100644
--- a/pkg/api/server/server.go
+++ b/pkg/api/server/server.go
@@ -34,10 +34,12 @@ type APIServer struct {
context.CancelFunc // Stop APIServer
idleTracker *idle.Tracker // Track connections to support idle shutdown
pprof *http.Server // Sidecar http server for providing performance data
+ CorsHeaders string // Inject CORS headers to each request
}
// Number of seconds to wait for next request, if exceeded shutdown server
const (
+ DefaultCorsHeaders = ""
DefaultServiceDuration = 300 * time.Second
UnlimitedServiceDuration = 0 * time.Second
)
@@ -45,17 +47,22 @@ const (
// shutdownOnce ensures Shutdown() may safely be called from several go routines
var shutdownOnce sync.Once
+type Options struct {
+ Timeout time.Duration
+ CorsHeaders string
+}
+
// NewServer will create and configure a new API server with all defaults
func NewServer(runtime *libpod.Runtime) (*APIServer, error) {
- return newServer(runtime, DefaultServiceDuration, nil)
+ return newServer(runtime, DefaultServiceDuration, nil, DefaultCorsHeaders)
}
// NewServerWithSettings will create and configure a new API server using provided settings
-func NewServerWithSettings(runtime *libpod.Runtime, duration time.Duration, listener *net.Listener) (*APIServer, error) {
- return newServer(runtime, duration, listener)
+func NewServerWithSettings(runtime *libpod.Runtime, listener *net.Listener, opts Options) (*APIServer, error) {
+ return newServer(runtime, opts.Timeout, listener, opts.CorsHeaders)
}
-func newServer(runtime *libpod.Runtime, duration time.Duration, listener *net.Listener) (*APIServer, error) {
+func newServer(runtime *libpod.Runtime, duration time.Duration, listener *net.Listener, corsHeaders string) (*APIServer, error) {
// If listener not provided try socket activation protocol
if listener == nil {
if _, found := os.LookupEnv("LISTEN_PID"); !found {
@@ -71,6 +78,11 @@ func newServer(runtime *libpod.Runtime, duration time.Duration, listener *net.Li
}
listener = &listeners[0]
}
+ if corsHeaders == "" {
+ logrus.Debug("CORS Headers were not set")
+ } else {
+ logrus.Debugf("CORS Headers were set to %s", corsHeaders)
+ }
logrus.Infof("API server listening on %q", (*listener).Addr())
router := mux.NewRouter().UseEncodedPath()
@@ -88,6 +100,7 @@ func newServer(runtime *libpod.Runtime, duration time.Duration, listener *net.Li
idleTracker: idle,
Listener: *listener,
Runtime: runtime,
+ CorsHeaders: corsHeaders,
}
router.NotFoundHandler = http.HandlerFunc(
diff --git a/pkg/bindings/containers/attach.go b/pkg/bindings/containers/attach.go
index fd8a7011d..adef1e7c8 100644
--- a/pkg/bindings/containers/attach.go
+++ b/pkg/bindings/containers/attach.go
@@ -138,7 +138,7 @@ func Attach(ctx context.Context, nameOrID string, stdin io.Reader, stdout io.Wri
winCtx, winCancel := context.WithCancel(ctx)
defer winCancel()
- go attachHandleResize(ctx, winCtx, winChange, false, nameOrID, file)
+ attachHandleResize(ctx, winCtx, winChange, false, nameOrID, file)
}
// If we are attaching around a start, we need to "signal"
@@ -327,32 +327,38 @@ func (f *rawFormatter) Format(entry *logrus.Entry) ([]byte, error) {
return append(buffer, '\r'), nil
}
-// This is intended to be run as a goroutine, handling resizing for a container
-// or exec session.
+// This is intended to not be run as a goroutine, handling resizing for a container
+// or exec session. It will call resize once and then starts a goroutine which calls resize on winChange
func attachHandleResize(ctx, winCtx context.Context, winChange chan os.Signal, isExec bool, id string, file *os.File) {
- // Prime the pump, we need one reset to ensure everything is ready
- winChange <- sig.SIGWINCH
- for {
- select {
- case <-winCtx.Done():
- return
- case <-winChange:
- w, h, err := terminal.GetSize(int(file.Fd()))
- if err != nil {
- logrus.Warnf("failed to obtain TTY size: %v", err)
- }
+ resize := func() {
+ w, h, err := terminal.GetSize(int(file.Fd()))
+ if err != nil {
+ logrus.Warnf("failed to obtain TTY size: %v", err)
+ }
- var resizeErr error
- if isExec {
- resizeErr = ResizeExecTTY(ctx, id, new(ResizeExecTTYOptions).WithHeight(h).WithWidth(w))
- } else {
- resizeErr = ResizeContainerTTY(ctx, id, new(ResizeTTYOptions).WithHeight(h).WithWidth(w))
- }
- if resizeErr != nil {
- logrus.Warnf("failed to resize TTY: %v", resizeErr)
- }
+ var resizeErr error
+ if isExec {
+ resizeErr = ResizeExecTTY(ctx, id, new(ResizeExecTTYOptions).WithHeight(h).WithWidth(w))
+ } else {
+ resizeErr = ResizeContainerTTY(ctx, id, new(ResizeTTYOptions).WithHeight(h).WithWidth(w))
+ }
+ if resizeErr != nil {
+ logrus.Warnf("failed to resize TTY: %v", resizeErr)
}
}
+
+ resize()
+
+ go func() {
+ for {
+ select {
+ case <-winCtx.Done():
+ return
+ case <-winChange:
+ resize()
+ }
+ }
+ }()
}
// Configure the given terminal for raw mode
@@ -457,7 +463,7 @@ func ExecStartAndAttach(ctx context.Context, sessionID string, options *ExecStar
winCtx, winCancel := context.WithCancel(ctx)
defer winCancel()
- go attachHandleResize(ctx, winCtx, winChange, true, sessionID, terminalFile)
+ attachHandleResize(ctx, winCtx, winChange, true, sessionID, terminalFile)
}
if options.GetAttachInput() {
diff --git a/pkg/bindings/images/build.go b/pkg/bindings/images/build.go
index b56afbceb..346d55c47 100644
--- a/pkg/bindings/images/build.go
+++ b/pkg/bindings/images/build.go
@@ -450,7 +450,7 @@ func nTar(excludes []string, sources ...string) (io.ReadCloser, error) {
hdr.Typeflag = tar.TypeLink
hdr.Linkname = orig
hdr.Size = 0
-
+ hdr.Name = name
return tw.WriteHeader(hdr)
}
f, err := os.Open(path)
diff --git a/pkg/checkpoint/checkpoint_restore.go b/pkg/checkpoint/checkpoint_restore.go
index 7a8f71c66..0d45cab5f 100644
--- a/pkg/checkpoint/checkpoint_restore.go
+++ b/pkg/checkpoint/checkpoint_restore.go
@@ -11,6 +11,7 @@ import (
"github.com/containers/podman/v3/libpod"
"github.com/containers/podman/v3/pkg/domain/entities"
"github.com/containers/podman/v3/pkg/errorhandling"
+ "github.com/containers/podman/v3/pkg/specgen/generate"
"github.com/containers/storage/pkg/archive"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
@@ -95,6 +96,14 @@ func CRImportCheckpoint(ctx context.Context, runtime *libpod.Runtime, restoreOpt
newName = true
}
+ if len(restoreOptions.PublishPorts) > 0 {
+ ports, _, _, err := generate.ParsePortMapping(restoreOptions.PublishPorts)
+ if err != nil {
+ return nil, err
+ }
+ ctrConfig.PortMappings = ports
+ }
+
pullOptions := &libimage.PullOptions{}
pullOptions.Writer = os.Stderr
if _, err := runtime.LibimageRuntime().Pull(ctx, ctrConfig.RootfsImageName, config.PullPolicyMissing, pullOptions); err != nil {
diff --git a/pkg/domain/entities/containers.go b/pkg/domain/entities/containers.go
index eacc14d50..8ed9b9b61 100644
--- a/pkg/domain/entities/containers.go
+++ b/pkg/domain/entities/containers.go
@@ -9,6 +9,7 @@ import (
"github.com/containers/image/v5/types"
"github.com/containers/podman/v3/libpod/define"
"github.com/containers/podman/v3/pkg/specgen"
+ "github.com/containers/storage/pkg/archive"
"github.com/cri-o/ocicni/pkg/ocicni"
)
@@ -178,6 +179,7 @@ type CheckpointOptions struct {
TCPEstablished bool
PreCheckPoint bool
WithPrevious bool
+ Compression archive.Compression
}
type CheckpointReport struct {
@@ -197,6 +199,7 @@ type RestoreOptions struct {
Name string
TCPEstablished bool
ImportPrevious string
+ PublishPorts []specgen.PortMapping
}
type RestoreReport struct {
diff --git a/pkg/domain/entities/events.go b/pkg/domain/entities/events.go
index 930ca53ae..5e7cc9ad1 100644
--- a/pkg/domain/entities/events.go
+++ b/pkg/domain/entities/events.go
@@ -30,29 +30,41 @@ func ConvertToLibpodEvent(e Event) *libpodEvents.Event {
if err != nil {
return nil
}
+ image := e.Actor.Attributes["image"]
+ name := e.Actor.Attributes["name"]
+ details := e.Actor.Attributes
+ delete(details, "image")
+ delete(details, "name")
+ delete(details, "containerExitCode")
return &libpodEvents.Event{
ContainerExitCode: exitCode,
ID: e.Actor.ID,
- Image: e.Actor.Attributes["image"],
- Name: e.Actor.Attributes["name"],
+ Image: image,
+ Name: name,
Status: status,
Time: time.Unix(e.Time, e.TimeNano),
Type: t,
+ Details: libpodEvents.Details{
+ Attributes: details,
+ },
}
}
// ConvertToEntitiesEvent converts a libpod event to an entities one.
func ConvertToEntitiesEvent(e libpodEvents.Event) *Event {
+ attributes := e.Details.Attributes
+ if attributes == nil {
+ attributes = make(map[string]string)
+ }
+ attributes["image"] = e.Image
+ attributes["name"] = e.Name
+ attributes["containerExitCode"] = strconv.Itoa(e.ContainerExitCode)
return &Event{dockerEvents.Message{
Type: e.Type.String(),
Action: e.Status.String(),
Actor: dockerEvents.Actor{
- ID: e.ID,
- Attributes: map[string]string{
- "image": e.Image,
- "name": e.Name,
- "containerExitCode": strconv.Itoa(e.ContainerExitCode),
- },
+ ID: e.ID,
+ Attributes: attributes,
},
Scope: "local",
Time: e.Time.Unix(),
diff --git a/pkg/domain/entities/system.go b/pkg/domain/entities/system.go
index 31a6185dc..cca4bf44e 100644
--- a/pkg/domain/entities/system.go
+++ b/pkg/domain/entities/system.go
@@ -11,9 +11,10 @@ import (
// ServiceOptions provides the input for starting an API Service
type ServiceOptions struct {
- URI string // Path to unix domain socket service should listen on
- Timeout time.Duration // duration of inactivity the service should wait before shutting down
- Command *cobra.Command // CLI command provided. Used in V1 code
+ URI string // Path to unix domain socket service should listen on
+ Timeout time.Duration // duration of inactivity the service should wait before shutting down
+ Command *cobra.Command // CLI command provided. Used in V1 code
+ CorsHeaders string // CORS headers
}
// SystemPruneOptions provides options to prune system.
diff --git a/pkg/domain/infra/abi/containers.go b/pkg/domain/infra/abi/containers.go
index 237a43441..4908e72f6 100644
--- a/pkg/domain/infra/abi/containers.go
+++ b/pkg/domain/infra/abi/containers.go
@@ -483,6 +483,7 @@ func (ic *ContainerEngine) ContainerCheckpoint(ctx context.Context, namesOrIds [
KeepRunning: options.LeaveRunning,
PreCheckPoint: options.PreCheckPoint,
WithPrevious: options.WithPrevious,
+ Compression: options.Compression,
}
if options.All {
diff --git a/pkg/domain/infra/abi/network.go b/pkg/domain/infra/abi/network.go
index 33ab280e5..7900caaa6 100644
--- a/pkg/domain/infra/abi/network.go
+++ b/pkg/domain/infra/abi/network.go
@@ -11,7 +11,7 @@ import (
)
func (ic *ContainerEngine) NetworkList(ctx context.Context, options entities.NetworkListOptions) ([]*entities.NetworkListReport, error) {
- var reports []*entities.NetworkListReport
+ reports := make([]*entities.NetworkListReport, 0)
config, err := ic.Libpod.GetConfig()
if err != nil {
diff --git a/pkg/machine/config.go b/pkg/machine/config.go
index 652229963..58794ce42 100644
--- a/pkg/machine/config.go
+++ b/pkg/machine/config.go
@@ -32,6 +32,7 @@ var (
ErrVMAlreadyExists = errors.New("VM already exists")
ErrVMAlreadyRunning = errors.New("VM already running")
ErrMultipleActiveVM = errors.New("only one VM can be active at a time")
+ ForwarderBinaryName = "gvproxy"
)
type Download struct {
diff --git a/pkg/machine/ignition.go b/pkg/machine/ignition.go
index 00068a136..a5c7210af 100644
--- a/pkg/machine/ignition.go
+++ b/pkg/machine/ignition.go
@@ -118,6 +118,7 @@ func getDirs(usrName string) []Directory {
// in one swoop, then the leading dirs are creates as root.
newDirs := []string{
"/home/" + usrName + "/.config",
+ "/home/" + usrName + "/.config/containers",
"/home/" + usrName + "/.config/systemd",
"/home/" + usrName + "/.config/systemd/user",
"/home/" + usrName + "/.config/systemd/user/default.target.wants",
@@ -159,6 +160,22 @@ func getFiles(usrName string) []File {
},
})
+ // Set containers.conf up for core user to use cni networks
+ // by default
+ files = append(files, File{
+ Node: Node{
+ Group: getNodeGrp(usrName),
+ Path: "/home/" + usrName + "/.config/containers/containers.conf",
+ User: getNodeUsr(usrName),
+ },
+ FileEmbedded1: FileEmbedded1{
+ Append: nil,
+ Contents: Resource{
+ Source: strToPtr("data:,%5Bcontainers%5D%0D%0Anetns%3D%22bridge%22%0D%0Arootless_networking%3D%22cni%22"),
+ },
+ Mode: intToPtr(484),
+ },
+ })
// Add a file into linger
files = append(files, File{
Node: Node{
diff --git a/pkg/machine/qemu/machine.go b/pkg/machine/qemu/machine.go
index 269a2a2da..31c355d4a 100644
--- a/pkg/machine/qemu/machine.go
+++ b/pkg/machine/qemu/machine.go
@@ -13,6 +13,8 @@ import (
"strings"
"time"
+ "github.com/containers/podman/v3/pkg/rootless"
+
"github.com/containers/podman/v3/pkg/machine"
"github.com/containers/podman/v3/utils"
"github.com/containers/storage/pkg/homedir"
@@ -82,9 +84,10 @@ func NewMachine(opts machine.InitOptions) (machine.VM, error) {
cmd = append(cmd, []string{"-qmp", monitor.Network + ":/" + monitor.Address + ",server=on,wait=off"}...)
// Add network
- cmd = append(cmd, "-nic", "user,model=virtio,hostfwd=tcp::"+strconv.Itoa(vm.Port)+"-:22")
-
- socketPath, err := getSocketDir()
+ // Right now the mac address is hardcoded so that the host networking gives it a specific IP address. This is
+ // why we can only run one vm at a time right now
+ cmd = append(cmd, []string{"-netdev", "socket,id=vlan,fd=3", "-device", "virtio-net-pci,netdev=vlan,mac=5a:94:ef:e4:0c:ee"}...)
+ socketPath, err := getRuntimeDir()
if err != nil {
return nil, err
}
@@ -235,12 +238,35 @@ func (v *MachineVM) Init(opts machine.InitOptions) error {
// Start executes the qemu command line and forks it
func (v *MachineVM) Start(name string, _ machine.StartOptions) error {
var (
- conn net.Conn
- err error
- wait time.Duration = time.Millisecond * 500
+ conn net.Conn
+ err error
+ qemuSocketConn net.Conn
+ wait time.Duration = time.Millisecond * 500
)
+ if err := v.startHostNetworking(); err != nil {
+ return errors.Errorf("unable to start host networking: %q", err)
+ }
+ qemuSocketPath, _, err := v.getSocketandPid()
+
+ for i := 0; i < 6; i++ {
+ qemuSocketConn, err = net.Dial("unix", qemuSocketPath)
+ if err == nil {
+ break
+ }
+ time.Sleep(wait)
+ wait++
+ }
+ if err != nil {
+ return err
+ }
+
+ fd, err := qemuSocketConn.(*net.UnixConn).File()
+ if err != nil {
+ return err
+ }
+
attr := new(os.ProcAttr)
- files := []*os.File{os.Stdin, os.Stdout, os.Stderr}
+ files := []*os.File{os.Stdin, os.Stdout, os.Stderr, fd}
attr.Files = files
logrus.Debug(v.CmdLine)
cmd := v.CmdLine
@@ -256,7 +282,7 @@ func (v *MachineVM) Start(name string, _ machine.StartOptions) error {
return err
}
fmt.Println("Waiting for VM ...")
- socketPath, err := getSocketDir()
+ socketPath, err := getRuntimeDir()
if err != nil {
return err
}
@@ -309,16 +335,42 @@ func (v *MachineVM) Stop(name string, _ machine.StopOptions) error {
logrus.Error(err)
}
}()
- _, err = qmpMonitor.Run(input)
- return err
+ if _, err = qmpMonitor.Run(input); err != nil {
+ return err
+ }
+ _, pidFile, err := v.getSocketandPid()
+ if err != nil {
+ return err
+ }
+ if _, err := os.Stat(pidFile); os.IsNotExist(err) {
+ logrus.Infof("pid file %s does not exist", pidFile)
+ return nil
+ }
+ pidString, err := ioutil.ReadFile(pidFile)
+ if err != nil {
+ return err
+ }
+ pidNum, err := strconv.Atoi(string(pidString))
+ if err != nil {
+ return err
+ }
+
+ p, err := os.FindProcess(pidNum)
+ if p == nil && err != nil {
+ return err
+ }
+ return p.Kill()
}
// NewQMPMonitor creates the monitor subsection of our vm
func NewQMPMonitor(network, name string, timeout time.Duration) (Monitor, error) {
- rtDir, err := getSocketDir()
+ rtDir, err := getRuntimeDir()
if err != nil {
return Monitor{}, err
}
+ if !rootless.IsRootless() {
+ rtDir = "/run"
+ }
rtDir = filepath.Join(rtDir, "podman")
if _, err := os.Stat(filepath.Join(rtDir)); os.IsNotExist(err) {
// TODO 0644 is fine on linux but macos is weird
@@ -408,7 +460,7 @@ func (v *MachineVM) SSH(name string, opts machine.SSHOptions) error {
sshDestination := v.RemoteUsername + "@localhost"
port := strconv.Itoa(v.Port)
- args := []string{"-i", v.IdentityPath, "-p", port, sshDestination}
+ args := []string{"-i", v.IdentityPath, "-p", port, sshDestination, "-o", "UserKnownHostsFile /dev/null", "-o", "StrictHostKeyChecking no"}
if len(opts.Args) > 0 {
args = append(args, opts.Args...)
} else {
@@ -533,3 +585,47 @@ func CheckActiveVM() (bool, string, error) {
}
return false, "", nil
}
+
+// startHostNetworking runs a binary on the host system that allows users
+// to setup port forwarding to the podman virtual machine
+func (v *MachineVM) startHostNetworking() error {
+ binary, err := exec.LookPath(machine.ForwarderBinaryName)
+ if err != nil {
+ return err
+ }
+ // Listen on all at port 7777 for setting up and tearing
+ // down forwarding
+ listenSocket := "tcp://0.0.0.0:7777"
+ qemuSocket, pidFile, err := v.getSocketandPid()
+ if err != nil {
+ return err
+ }
+ attr := new(os.ProcAttr)
+ // Pass on stdin, stdout, stderr
+ files := []*os.File{os.Stdin, os.Stdout, os.Stderr}
+ attr.Files = files
+ cmd := []string{binary}
+ cmd = append(cmd, []string{"-listen", listenSocket, "-listen-qemu", fmt.Sprintf("unix://%s", qemuSocket), "-pid-file", pidFile}...)
+ // Add the ssh port
+ cmd = append(cmd, []string{"-ssh-port", fmt.Sprintf("%d", v.Port)}...)
+ if logrus.GetLevel() == logrus.DebugLevel {
+ cmd = append(cmd, "--debug")
+ fmt.Println(cmd)
+ }
+ _, err = os.StartProcess(cmd[0], cmd, attr)
+ return err
+}
+
+func (v *MachineVM) getSocketandPid() (string, string, error) {
+ rtPath, err := getRuntimeDir()
+ if err != nil {
+ return "", "", err
+ }
+ if !rootless.IsRootless() {
+ rtPath = "/run"
+ }
+ socketDir := filepath.Join(rtPath, "podman")
+ pidFile := filepath.Join(socketDir, fmt.Sprintf("%s.pid", v.Name))
+ qemuSocket := filepath.Join(socketDir, fmt.Sprintf("qemu_%s.sock", v.Name))
+ return qemuSocket, pidFile, nil
+}
diff --git a/pkg/machine/qemu/options_darwin.go b/pkg/machine/qemu/options_darwin.go
index 46ccf24cb..440937131 100644
--- a/pkg/machine/qemu/options_darwin.go
+++ b/pkg/machine/qemu/options_darwin.go
@@ -6,7 +6,7 @@ import (
"github.com/pkg/errors"
)
-func getSocketDir() (string, error) {
+func getRuntimeDir() (string, error) {
tmpDir, ok := os.LookupEnv("TMPDIR")
if !ok {
return "", errors.New("unable to resolve TMPDIR")
diff --git a/pkg/machine/qemu/options_darwin_amd64.go b/pkg/machine/qemu/options_darwin_amd64.go
index 69f7982b2..ee1036291 100644
--- a/pkg/machine/qemu/options_darwin_amd64.go
+++ b/pkg/machine/qemu/options_darwin_amd64.go
@@ -5,7 +5,7 @@ var (
)
func (v *MachineVM) addArchOptions() []string {
- opts := []string{"-cpu", "host"}
+ opts := []string{"-machine", "q35,accel=hvf:tcg"}
return opts
}
diff --git a/pkg/machine/qemu/options_linux.go b/pkg/machine/qemu/options_linux.go
index 0a2e40d8f..c73a68cc6 100644
--- a/pkg/machine/qemu/options_linux.go
+++ b/pkg/machine/qemu/options_linux.go
@@ -1,7 +1,13 @@
package qemu
-import "github.com/containers/podman/v3/pkg/util"
+import (
+ "github.com/containers/podman/v3/pkg/rootless"
+ "github.com/containers/podman/v3/pkg/util"
+)
-func getSocketDir() (string, error) {
+func getRuntimeDir() (string, error) {
+ if !rootless.IsRootless() {
+ return "/run", nil
+ }
return util.GetRuntimeDir()
}
diff --git a/pkg/rootless/rootless_linux.c b/pkg/rootless/rootless_linux.c
index 918b9a7e6..0d1d6e93e 100644
--- a/pkg/rootless/rootless_linux.c
+++ b/pkg/rootless/rootless_linux.c
@@ -311,10 +311,10 @@ static void __attribute__((constructor)) init()
do_socket_activation = true;
saved_systemd_listen_pid = strdup(listen_pid);
saved_systemd_listen_fds = strdup(listen_fds);
- saved_systemd_listen_fdnames = strdup(listen_fdnames);
+ if (listen_fdnames != NULL)
+ saved_systemd_listen_fdnames = strdup(listen_fdnames);
if (saved_systemd_listen_pid == NULL
- || saved_systemd_listen_fds == NULL
- || saved_systemd_listen_fdnames == NULL)
+ || saved_systemd_listen_fds == NULL)
{
fprintf (stderr, "save socket listen environments error: %s\n", strerror (errno));
_exit (EXIT_FAILURE);
@@ -700,7 +700,9 @@ reexec_userns_join (int pid_to_join, char *pause_pid_file_path)
sprintf (s, "%d", getpid());
setenv ("LISTEN_PID", s, true);
setenv ("LISTEN_FDS", saved_systemd_listen_fds, true);
- setenv ("LISTEN_FDNAMES", saved_systemd_listen_fdnames, true);
+ // Setting fdnames is optional for systemd_socket_activation
+ if (saved_systemd_listen_fdnames != NULL)
+ setenv ("LISTEN_FDNAMES", saved_systemd_listen_fdnames, true);
}
setenv ("_CONTAINERS_USERNS_CONFIGURED", "init", 1);
@@ -896,7 +898,9 @@ reexec_in_user_namespace (int ready, char *pause_pid_file_path, char *file_to_re
sprintf (s, "%d", getpid());
setenv ("LISTEN_PID", s, true);
setenv ("LISTEN_FDS", saved_systemd_listen_fds, true);
- setenv ("LISTEN_FDNAMES", saved_systemd_listen_fdnames, true);
+ // Setting fdnames is optional for systemd_socket_activation
+ if (saved_systemd_listen_fdnames != NULL)
+ setenv ("LISTEN_FDNAMES", saved_systemd_listen_fdnames, true);
}
setenv ("_CONTAINERS_USERNS_CONFIGURED", "init", 1);
diff --git a/pkg/specgen/generate/kube/kube.go b/pkg/specgen/generate/kube/kube.go
index 054388384..fb563f935 100644
--- a/pkg/specgen/generate/kube/kube.go
+++ b/pkg/specgen/generate/kube/kube.go
@@ -12,6 +12,7 @@ import (
"github.com/containers/common/pkg/secrets"
ann "github.com/containers/podman/v3/pkg/annotations"
"github.com/containers/podman/v3/pkg/specgen"
+ "github.com/containers/podman/v3/pkg/specgen/generate"
"github.com/containers/podman/v3/pkg/util"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
@@ -182,6 +183,19 @@ func ToSpecGen(ctx context.Context, opts *CtrSpecGenOptions) (*specgen.SpecGener
if imageData.Config.WorkingDir != "" {
s.WorkDir = imageData.Config.WorkingDir
}
+ if s.User == "" {
+ s.User = imageData.Config.User
+ }
+
+ exposed, err := generate.GenExposedPorts(imageData.Config.ExposedPorts)
+ if err != nil {
+ return nil, err
+ }
+
+ for k, v := range s.Expose {
+ exposed[k] = v
+ }
+ s.Expose = exposed
// Pull entrypoint and cmd from image
s.Entrypoint = imageData.Config.Entrypoint
s.Command = imageData.Config.Cmd
diff --git a/pkg/specgen/generate/namespaces.go b/pkg/specgen/generate/namespaces.go
index 278f35c22..f41186ae4 100644
--- a/pkg/specgen/generate/namespaces.go
+++ b/pkg/specgen/generate/namespaces.go
@@ -66,7 +66,7 @@ func GetDefaultNamespaceMode(nsType string, cfg *config.Config, pod *libpod.Pod)
case "cgroup":
return specgen.ParseCgroupNamespace(cfg.Containers.CgroupNS)
case "net":
- ns, _, err := specgen.ParseNetworkNamespace(cfg.Containers.NetNS)
+ ns, _, err := specgen.ParseNetworkNamespace(cfg.Containers.NetNS, cfg.Containers.RootlessNetworking == "cni")
return ns, err
}
diff --git a/pkg/specgen/generate/pod_create.go b/pkg/specgen/generate/pod_create.go
index 20151f016..07c56b799 100644
--- a/pkg/specgen/generate/pod_create.go
+++ b/pkg/specgen/generate/pod_create.go
@@ -125,7 +125,7 @@ func createPodOptions(p *specgen.PodSpecGenerator, rt *libpod.Runtime) ([]libpod
options = append(options, libpod.WithPodUseImageHosts())
}
if len(p.PortMappings) > 0 {
- ports, _, _, err := parsePortMapping(p.PortMappings)
+ ports, _, _, err := ParsePortMapping(p.PortMappings)
if err != nil {
return nil, err
}
diff --git a/pkg/specgen/generate/ports.go b/pkg/specgen/generate/ports.go
index 6832664a7..c00ad19fb 100644
--- a/pkg/specgen/generate/ports.go
+++ b/pkg/specgen/generate/ports.go
@@ -24,7 +24,7 @@ const (
// Parse port maps to OCICNI port mappings.
// Returns a set of OCICNI port mappings, and maps of utilized container and
// host ports.
-func parsePortMapping(portMappings []specgen.PortMapping) ([]ocicni.PortMapping, map[string]map[string]map[uint16]uint16, map[string]map[string]map[uint16]uint16, error) {
+func ParsePortMapping(portMappings []specgen.PortMapping) ([]ocicni.PortMapping, map[string]map[string]map[uint16]uint16, map[string]map[string]map[uint16]uint16, error) {
// First, we need to validate the ports passed in the specgen, and then
// convert them into CNI port mappings.
type tempMapping struct {
@@ -254,7 +254,7 @@ func parsePortMapping(portMappings []specgen.PortMapping) ([]ocicni.PortMapping,
// Make final port mappings for the container
func createPortMappings(ctx context.Context, s *specgen.SpecGenerator, imageData *libimage.ImageData) ([]ocicni.PortMapping, error) {
- finalMappings, containerPortValidate, hostPortValidate, err := parsePortMapping(s.PortMappings)
+ finalMappings, containerPortValidate, hostPortValidate, err := ParsePortMapping(s.PortMappings)
if err != nil {
return nil, err
}
@@ -268,31 +268,18 @@ func createPortMappings(ctx context.Context, s *specgen.SpecGenerator, imageData
logrus.Debugf("Adding exposed ports")
- // We need to merge s.Expose into image exposed ports
expose := make(map[uint16]string)
- for k, v := range s.Expose {
- expose[k] = v
- }
if imageData != nil {
- for imgExpose := range imageData.Config.ExposedPorts {
- // Expose format is portNumber[/protocol]
- splitExpose := strings.SplitN(imgExpose, "/", 2)
- num, err := strconv.Atoi(splitExpose[0])
- if err != nil {
- return nil, errors.Wrapf(err, "unable to convert image EXPOSE statement %q to port number", imgExpose)
- }
- if num > 65535 || num < 1 {
- return nil, errors.Errorf("%d from image EXPOSE statement %q is not a valid port number", num, imgExpose)
- }
- // No need to validate protocol, we'll do it below.
- if len(splitExpose) == 1 {
- expose[uint16(num)] = "tcp"
- } else {
- expose[uint16(num)] = splitExpose[1]
- }
+ expose, err = GenExposedPorts(imageData.Config.ExposedPorts)
+ if err != nil {
+ return nil, err
}
}
+ // We need to merge s.Expose into image exposed ports
+ for k, v := range s.Expose {
+ expose[k] = v
+ }
// There's been a request to expose some ports. Let's do that.
// Start by figuring out what needs to be exposed.
// This is a map of container port number to protocols to expose.
@@ -417,3 +404,25 @@ func checkProtocol(protocol string, allowSCTP bool) ([]string, error) {
return finalProto, nil
}
+
+func GenExposedPorts(exposedPorts map[string]struct{}) (map[uint16]string, error) {
+ expose := make(map[uint16]string)
+ for imgExpose := range exposedPorts {
+ // Expose format is portNumber[/protocol]
+ splitExpose := strings.SplitN(imgExpose, "/", 2)
+ num, err := strconv.Atoi(splitExpose[0])
+ if err != nil {
+ return nil, errors.Wrapf(err, "unable to convert image EXPOSE statement %q to port number", imgExpose)
+ }
+ if num > 65535 || num < 1 {
+ return nil, errors.Errorf("%d from image EXPOSE statement %q is not a valid port number", num, imgExpose)
+ }
+ // No need to validate protocol, we'll do it below.
+ if len(splitExpose) == 1 {
+ expose[uint16(num)] = "tcp"
+ } else {
+ expose[uint16(num)] = splitExpose[1]
+ }
+ }
+ return expose, nil
+}
diff --git a/pkg/specgen/namespaces.go b/pkg/specgen/namespaces.go
index f665fc0be..80852930a 100644
--- a/pkg/specgen/namespaces.go
+++ b/pkg/specgen/namespaces.go
@@ -253,7 +253,7 @@ func ParseUserNamespace(ns string) (Namespace, error) {
// ParseNetworkNamespace parses a network namespace specification in string
// form.
// Returns a namespace and (optionally) a list of CNI networks to join.
-func ParseNetworkNamespace(ns string) (Namespace, []string, error) {
+func ParseNetworkNamespace(ns string, rootlessDefaultCNI bool) (Namespace, []string, error) {
toReturn := Namespace{}
var cniNetworks []string
// Net defaults to Slirp on rootless
@@ -264,7 +264,11 @@ func ParseNetworkNamespace(ns string) (Namespace, []string, error) {
toReturn.NSMode = FromPod
case ns == "" || ns == string(Default) || ns == string(Private):
if rootless.IsRootless() {
- toReturn.NSMode = Slirp
+ if rootlessDefaultCNI {
+ toReturn.NSMode = Bridge
+ } else {
+ toReturn.NSMode = Slirp
+ }
} else {
toReturn.NSMode = Bridge
}
diff --git a/pkg/systemd/generate/common.go b/pkg/systemd/generate/common.go
index 0f667e2f4..e183125a7 100644
--- a/pkg/systemd/generate/common.go
+++ b/pkg/systemd/generate/common.go
@@ -36,7 +36,7 @@ Description=Podman {{{{.ServiceName}}}}.service
Documentation=man:podman-generate-systemd(1)
Wants=network.target
After=network-online.target
-RequiresMountsFor={{{{.GraphRoot}}}} {{{{.RunRoot}}}}
+RequiresMountsFor={{{{.RunRoot}}}}
`
// filterPodFlags removes --pod, --pod-id-file and --infra-conmon-pidfile from the specified command.
@@ -60,7 +60,7 @@ func filterPodFlags(command []string, argCount int) []string {
return processed
}
-// filterCommonContainerFlags removes --conmon-pidfile, --cidfile and --cgroups from the specified command.
+// filterCommonContainerFlags removes --sdnotify, --rm and --cgroups from the specified command.
// argCount is the number of last arguments which should not be filtered, e.g. the container entrypoint.
func filterCommonContainerFlags(command []string, argCount int) []string {
processed := []string{}
@@ -68,11 +68,14 @@ func filterCommonContainerFlags(command []string, argCount int) []string {
s := command[i]
switch {
- case s == "--conmon-pidfile", s == "--cidfile", s == "--cgroups":
+ case s == "--rm":
+ // Boolean flags support --flag and --flag={true,false}.
+ continue
+ case s == "--sdnotify", s == "--cgroups":
i++
continue
- case strings.HasPrefix(s, "--conmon-pidfile="),
- strings.HasPrefix(s, "--cidfile="),
+ case strings.HasPrefix(s, "--sdnotify="),
+ strings.HasPrefix(s, "--rm="),
strings.HasPrefix(s, "--cgroups="):
continue
}
diff --git a/pkg/systemd/generate/common_test.go b/pkg/systemd/generate/common_test.go
index fdcc9d21b..3e2ac015f 100644
--- a/pkg/systemd/generate/common_test.go
+++ b/pkg/systemd/generate/common_test.go
@@ -93,22 +93,22 @@ func TestFilterCommonContainerFlags(t *testing.T) {
},
{
[]string{"podman", "run", "--conmon-pidfile", "foo", "alpine"},
- []string{"podman", "run", "alpine"},
+ []string{"podman", "run", "--conmon-pidfile", "foo", "alpine"},
1,
},
{
[]string{"podman", "run", "--conmon-pidfile=foo", "alpine"},
- []string{"podman", "run", "alpine"},
+ []string{"podman", "run", "--conmon-pidfile=foo", "alpine"},
1,
},
{
[]string{"podman", "run", "--cidfile", "foo", "alpine"},
- []string{"podman", "run", "alpine"},
+ []string{"podman", "run", "--cidfile", "foo", "alpine"},
1,
},
{
[]string{"podman", "run", "--cidfile=foo", "alpine"},
- []string{"podman", "run", "alpine"},
+ []string{"podman", "run", "--cidfile=foo", "alpine"},
1,
},
{
@@ -122,25 +122,15 @@ func TestFilterCommonContainerFlags(t *testing.T) {
1,
},
{
- []string{"podman", "run", "--cgroups", "foo", "--conmon-pidfile", "foo", "--cidfile", "foo", "alpine"},
+ []string{"podman", "run", "--cgroups=foo", "--rm", "alpine"},
[]string{"podman", "run", "alpine"},
1,
},
{
- []string{"podman", "run", "--cgroups=foo", "--conmon-pidfile=foo", "--cidfile=foo", "alpine"},
- []string{"podman", "run", "alpine"},
- 1,
- },
- {
- []string{"podman", "run", "--cgroups", "foo", "--conmon-pidfile", "foo", "--cidfile", "foo", "alpine", "--cgroups", "foo", "--conmon-pidfile", "foo", "--cidfile", "foo"},
- []string{"podman", "run", "alpine", "--cgroups", "foo", "--conmon-pidfile", "foo", "--cidfile", "foo"},
+ []string{"podman", "run", "--cgroups", "--rm=bogus", "alpine", "--cgroups", "foo", "--conmon-pidfile", "foo", "--cidfile", "foo", "--rm"},
+ []string{"podman", "run", "alpine", "--cgroups", "foo", "--conmon-pidfile", "foo", "--cidfile", "foo", "--rm"},
7,
},
- {
- []string{"podman", "run", "--cgroups=foo", "--conmon-pidfile=foo", "--cidfile=foo", "alpine", "--cgroups=foo", "--conmon-pidfile=foo", "--cidfile=foo"},
- []string{"podman", "run", "alpine", "--cgroups=foo", "--conmon-pidfile=foo", "--cidfile=foo"},
- 4,
- },
}
for _, test := range tests {
diff --git a/pkg/systemd/generate/containers.go b/pkg/systemd/generate/containers.go
index eb1fb67ff..0e6e1b4df 100644
--- a/pkg/systemd/generate/containers.go
+++ b/pkg/systemd/generate/containers.go
@@ -25,6 +25,10 @@ type containerInfo struct {
ServiceName string
// Name or ID of the container.
ContainerNameOrID string
+ // Type of the unit.
+ Type string
+ // NotifyAccess of the unit.
+ NotifyAccess string
// StopTimeout sets the timeout Podman waits before killing the container
// during service stop.
StopTimeout uint
@@ -102,10 +106,19 @@ TimeoutStopSec={{{{.TimeoutStopSec}}}}
ExecStartPre={{{{.ExecStartPre}}}}
{{{{- end}}}}
ExecStart={{{{.ExecStart}}}}
+{{{{- if .ExecStop}}}}
ExecStop={{{{.ExecStop}}}}
+{{{{- end}}}}
+{{{{- if .ExecStopPost}}}}
ExecStopPost={{{{.ExecStopPost}}}}
+{{{{- end}}}}
+{{{{- if .PIDFile}}}}
PIDFile={{{{.PIDFile}}}}
-Type=forking
+{{{{- end}}}}
+Type={{{{.Type}}}}
+{{{{- if .NotifyAccess}}}}
+NotifyAccess={{{{.NotifyAccess}}}}
+{{{{- end}}}}
[Install]
WantedBy=multi-user.target default.target
@@ -152,14 +165,14 @@ func generateContainerInfo(ctr *libpod.Container, options entities.GenerateSyste
return nil, errors.Errorf("could not determine storage store for container")
}
- graphRoot := store.GraphRoot()
- if graphRoot == "" {
- return nil, errors.Errorf("could not lookup container's graphroot: got empty string")
- }
-
- runRoot := store.RunRoot()
- if runRoot == "" {
- return nil, errors.Errorf("could not lookup container's runroot: got empty string")
+ var runRoot string
+ if options.New {
+ runRoot = "%t/containers"
+ } else {
+ runRoot = store.RunRoot()
+ if runRoot == "" {
+ return nil, errors.Errorf("could not lookup container's runroot: got empty string")
+ }
}
envs := config.Spec.Process.Env
@@ -172,7 +185,6 @@ func generateContainerInfo(ctr *libpod.Container, options entities.GenerateSyste
StopTimeout: timeout,
GenerateTimestamp: true,
CreateCommand: createCommand,
- GraphRoot: graphRoot,
RunRoot: runRoot,
containerEnv: envs,
}
@@ -209,6 +221,7 @@ func executeContainerTemplate(info *containerInfo, options entities.GenerateSyst
info.Executable = executable
}
+ info.Type = "forking"
info.EnvVariable = define.EnvVariable
info.ExecStart = "{{{{.Executable}}}} start {{{{.ContainerNameOrID}}}}"
info.ExecStop = "{{{{.Executable}}}} stop {{{{if (ge .StopTimeout 0)}}}}-t {{{{.StopTimeout}}}}{{{{end}}}} {{{{.ContainerNameOrID}}}}"
@@ -222,8 +235,12 @@ func executeContainerTemplate(info *containerInfo, options entities.GenerateSyst
// invalid `info.CreateCommand`. Hence, we're doing a best effort unit
// generation and don't try aiming at completeness.
if options.New {
- info.PIDFile = "%t/" + info.ServiceName + ".pid"
- info.ContainerIDFile = "%t/" + info.ServiceName + ".ctr-id"
+ info.Type = "notify"
+ info.NotifyAccess = "all"
+ info.PIDFile = ""
+ info.ContainerIDFile = ""
+ info.ExecStop = ""
+ info.ExecStopPost = ""
// The create command must at least have three arguments:
// /usr/bin/podman run $IMAGE
index := 0
@@ -246,9 +263,9 @@ func executeContainerTemplate(info *containerInfo, options entities.GenerateSyst
}
startCommand = append(startCommand,
"run",
- "--conmon-pidfile", "{{{{.PIDFile}}}}",
- "--cidfile", "{{{{.ContainerIDFile}}}}",
+ "--sdnotify=conmon",
"--cgroups=no-conmon",
+ "--rm",
)
remainingCmd := info.CreateCommand[index:]
@@ -337,11 +354,7 @@ func executeContainerTemplate(info *containerInfo, options entities.GenerateSyst
startCommand = append(startCommand, remainingCmd...)
startCommand = escapeSystemdArguments(startCommand)
-
- info.ExecStartPre = "/bin/rm -f {{{{.PIDFile}}}} {{{{.ContainerIDFile}}}}"
info.ExecStart = strings.Join(startCommand, " ")
- info.ExecStop = "{{{{.Executable}}}} {{{{if .RootFlags}}}}{{{{ .RootFlags}}}} {{{{end}}}}stop --ignore --cidfile {{{{.ContainerIDFile}}}} {{{{if (ge .StopTimeout 0)}}}}-t {{{{.StopTimeout}}}}{{{{end}}}}"
- info.ExecStopPost = "{{{{.Executable}}}} {{{{if .RootFlags}}}}{{{{ .RootFlags}}}} {{{{end}}}}rm --ignore -f --cidfile {{{{.ContainerIDFile}}}}"
}
info.TimeoutStopSec = minTimeoutStopSec + info.StopTimeout
diff --git a/pkg/systemd/generate/containers_test.go b/pkg/systemd/generate/containers_test.go
index 75b08526b..12a8f3004 100644
--- a/pkg/systemd/generate/containers_test.go
+++ b/pkg/systemd/generate/containers_test.go
@@ -48,7 +48,7 @@ Description=Podman container-639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea
Documentation=man:podman-generate-systemd(1)
Wants=network.target
After=network-online.target
-RequiresMountsFor=/var/lib/containers/storage /var/run/containers/storage
+RequiresMountsFor=/var/run/containers/storage
[Service]
Environment=PODMAN_SYSTEMD_UNIT=%n
@@ -74,7 +74,7 @@ Description=Podman container-foobar.service
Documentation=man:podman-generate-systemd(1)
Wants=network.target
After=network-online.target
-RequiresMountsFor=/var/lib/containers/storage /var/run/containers/storage
+RequiresMountsFor=/var/run/containers/storage
[Service]
Environment=PODMAN_SYSTEMD_UNIT=%n
@@ -98,7 +98,7 @@ Description=Podman container-foobar.service
Documentation=man:podman-generate-systemd(1)
Wants=network.target
After=network-online.target
-RequiresMountsFor=/var/lib/containers/storage /var/run/containers/storage
+RequiresMountsFor=/var/run/containers/storage
BindsTo=a.service b.service c.service pod.service
After=a.service b.service c.service pod.service
@@ -124,18 +124,15 @@ Description=Podman jadda-jadda.service
Documentation=man:podman-generate-systemd(1)
Wants=network.target
After=network-online.target
-RequiresMountsFor=/var/lib/containers/storage /var/run/containers/storage
+RequiresMountsFor=/var/run/containers/storage
[Service]
Environment=PODMAN_SYSTEMD_UNIT=%n
Restart=always
TimeoutStopSec=70
-ExecStartPre=/bin/rm -f %t/jadda-jadda.pid %t/jadda-jadda.ctr-id
-ExecStart=/usr/bin/podman container run --conmon-pidfile %t/jadda-jadda.pid --cidfile %t/jadda-jadda.ctr-id --cgroups=no-conmon -d --replace --name jadda-jadda --hostname hello-world awesome-image:latest command arg1 ... argN "foo=arg \"with \" space"
-ExecStop=/usr/bin/podman container stop --ignore --cidfile %t/jadda-jadda.ctr-id -t 10
-ExecStopPost=/usr/bin/podman container rm --ignore -f --cidfile %t/jadda-jadda.ctr-id
-PIDFile=%t/jadda-jadda.pid
-Type=forking
+ExecStart=/usr/bin/podman container run --sdnotify=conmon --cgroups=no-conmon --rm -d --replace --name jadda-jadda --hostname hello-world awesome-image:latest command arg1 ... argN "foo=arg \"with \" space"
+Type=notify
+NotifyAccess=all
[Install]
WantedBy=multi-user.target default.target
@@ -149,18 +146,15 @@ Description=Podman jadda-jadda.service
Documentation=man:podman-generate-systemd(1)
Wants=network.target
After=network-online.target
-RequiresMountsFor=/var/lib/containers/storage /var/run/containers/storage
+RequiresMountsFor=/var/run/containers/storage
[Service]
Environment=PODMAN_SYSTEMD_UNIT=%n
Restart=always
TimeoutStopSec=70
-ExecStartPre=/bin/rm -f %t/jadda-jadda.pid %t/jadda-jadda.ctr-id
-ExecStart=/usr/bin/podman run --conmon-pidfile %t/jadda-jadda.pid --cidfile %t/jadda-jadda.ctr-id --cgroups=no-conmon --replace -d --name jadda-jadda --hostname hello-world awesome-image:latest command arg1 ... argN
-ExecStop=/usr/bin/podman stop --ignore --cidfile %t/jadda-jadda.ctr-id -t 10
-ExecStopPost=/usr/bin/podman rm --ignore -f --cidfile %t/jadda-jadda.ctr-id
-PIDFile=%t/jadda-jadda.pid
-Type=forking
+ExecStart=/usr/bin/podman run --sdnotify=conmon --cgroups=no-conmon --rm --replace -d --name jadda-jadda --hostname hello-world awesome-image:latest command arg1 ... argN
+Type=notify
+NotifyAccess=all
[Install]
WantedBy=multi-user.target default.target
@@ -174,18 +168,15 @@ Description=Podman jadda-jadda.service
Documentation=man:podman-generate-systemd(1)
Wants=network.target
After=network-online.target
-RequiresMountsFor=/var/lib/containers/storage /var/run/containers/storage
+RequiresMountsFor=/var/run/containers/storage
[Service]
Environment=PODMAN_SYSTEMD_UNIT=%n
Restart=always
TimeoutStopSec=70
-ExecStartPre=/bin/rm -f %t/jadda-jadda.pid %t/jadda-jadda.ctr-id
-ExecStart=/usr/bin/podman run --conmon-pidfile %t/jadda-jadda.pid --cidfile %t/jadda-jadda.ctr-id --cgroups=no-conmon --pod-id-file %t/pod-foobar.pod-id-file --replace -d --name jadda-jadda --hostname hello-world awesome-image:latest command arg1 ... argN
-ExecStop=/usr/bin/podman stop --ignore --cidfile %t/jadda-jadda.ctr-id -t 10
-ExecStopPost=/usr/bin/podman rm --ignore -f --cidfile %t/jadda-jadda.ctr-id
-PIDFile=%t/jadda-jadda.pid
-Type=forking
+ExecStart=/usr/bin/podman run --sdnotify=conmon --cgroups=no-conmon --rm --pod-id-file %t/pod-foobar.pod-id-file --replace -d --name jadda-jadda --hostname hello-world awesome-image:latest command arg1 ... argN
+Type=notify
+NotifyAccess=all
[Install]
WantedBy=multi-user.target default.target
@@ -199,18 +190,15 @@ Description=Podman jadda-jadda.service
Documentation=man:podman-generate-systemd(1)
Wants=network.target
After=network-online.target
-RequiresMountsFor=/var/lib/containers/storage /var/run/containers/storage
+RequiresMountsFor=/var/run/containers/storage
[Service]
Environment=PODMAN_SYSTEMD_UNIT=%n
Restart=always
TimeoutStopSec=70
-ExecStartPre=/bin/rm -f %t/jadda-jadda.pid %t/jadda-jadda.ctr-id
-ExecStart=/usr/bin/podman run --conmon-pidfile %t/jadda-jadda.pid --cidfile %t/jadda-jadda.ctr-id --cgroups=no-conmon --replace --detach --name jadda-jadda --hostname hello-world awesome-image:latest command arg1 ... argN
-ExecStop=/usr/bin/podman stop --ignore --cidfile %t/jadda-jadda.ctr-id -t 10
-ExecStopPost=/usr/bin/podman rm --ignore -f --cidfile %t/jadda-jadda.ctr-id
-PIDFile=%t/jadda-jadda.pid
-Type=forking
+ExecStart=/usr/bin/podman run --sdnotify=conmon --cgroups=no-conmon --rm --replace --detach --name jadda-jadda --hostname hello-world awesome-image:latest command arg1 ... argN
+Type=notify
+NotifyAccess=all
[Install]
WantedBy=multi-user.target default.target
@@ -224,18 +212,15 @@ Description=Podman container-639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea
Documentation=man:podman-generate-systemd(1)
Wants=network.target
After=network-online.target
-RequiresMountsFor=/var/lib/containers/storage /var/run/containers/storage
+RequiresMountsFor=/var/run/containers/storage
[Service]
Environment=PODMAN_SYSTEMD_UNIT=%n
Restart=always
TimeoutStopSec=70
-ExecStartPre=/bin/rm -f %t/container-639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401.pid %t/container-639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401.ctr-id
-ExecStart=/usr/bin/podman run --conmon-pidfile %t/container-639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401.pid --cidfile %t/container-639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401.ctr-id --cgroups=no-conmon -d awesome-image:latest
-ExecStop=/usr/bin/podman stop --ignore --cidfile %t/container-639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401.ctr-id -t 10
-ExecStopPost=/usr/bin/podman rm --ignore -f --cidfile %t/container-639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401.ctr-id
-PIDFile=%t/container-639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401.pid
-Type=forking
+ExecStart=/usr/bin/podman run --sdnotify=conmon --cgroups=no-conmon --rm -d awesome-image:latest
+Type=notify
+NotifyAccess=all
[Install]
WantedBy=multi-user.target default.target
@@ -250,20 +235,17 @@ Description=Podman jadda-jadda.service
Documentation=man:podman-generate-systemd(1)
Wants=network.target
After=network-online.target
-RequiresMountsFor=/var/lib/containers/storage /var/run/containers/storage
+RequiresMountsFor=/var/run/containers/storage
[Service]
Environment=PODMAN_SYSTEMD_UNIT=%n
Restart=always
TimeoutStopSec=102
-ExecStartPre=/bin/rm -f %t/jadda-jadda.pid %t/jadda-jadda.ctr-id
-ExecStart=/usr/bin/podman run --conmon-pidfile %t/jadda-jadda.pid --cidfile %t/jadda-jadda.ctr-id --cgroups=no-conmon ` +
+ExecStart=/usr/bin/podman run --sdnotify=conmon --cgroups=no-conmon --rm ` +
detachparam +
` awesome-image:latest
-ExecStop=/usr/bin/podman stop --ignore --cidfile %t/jadda-jadda.ctr-id -t 42
-ExecStopPost=/usr/bin/podman rm --ignore -f --cidfile %t/jadda-jadda.ctr-id
-PIDFile=%t/jadda-jadda.pid
-Type=forking
+Type=notify
+NotifyAccess=all
[Install]
WantedBy=multi-user.target default.target
@@ -279,18 +261,15 @@ Description=Podman jadda-jadda.service
Documentation=man:podman-generate-systemd(1)
Wants=network.target
After=network-online.target
-RequiresMountsFor=/var/lib/containers/storage /var/run/containers/storage
+RequiresMountsFor=/var/run/containers/storage
[Service]
Environment=PODMAN_SYSTEMD_UNIT=%n
Restart=always
TimeoutStopSec=102
-ExecStartPre=/bin/rm -f %t/jadda-jadda.pid %t/jadda-jadda.ctr-id
-ExecStart=/usr/bin/podman run --conmon-pidfile %t/jadda-jadda.pid --cidfile %t/jadda-jadda.ctr-id --cgroups=no-conmon -d --replace --name test -p 80:80 awesome-image:latest somecmd --detach=false
-ExecStop=/usr/bin/podman stop --ignore --cidfile %t/jadda-jadda.ctr-id -t 42
-ExecStopPost=/usr/bin/podman rm --ignore -f --cidfile %t/jadda-jadda.ctr-id
-PIDFile=%t/jadda-jadda.pid
-Type=forking
+ExecStart=/usr/bin/podman run --sdnotify=conmon --cgroups=no-conmon --rm -d --replace --name test -p 80:80 awesome-image:latest somecmd --detach=false
+Type=notify
+NotifyAccess=all
[Install]
WantedBy=multi-user.target default.target
@@ -304,18 +283,15 @@ Description=Podman jadda-jadda.service
Documentation=man:podman-generate-systemd(1)
Wants=network.target
After=network-online.target
-RequiresMountsFor=/var/lib/containers/storage /var/run/containers/storage
+RequiresMountsFor=/var/run/containers/storage
[Service]
Environment=PODMAN_SYSTEMD_UNIT=%n
Restart=always
TimeoutStopSec=102
-ExecStartPre=/bin/rm -f %t/jadda-jadda.pid %t/jadda-jadda.ctr-id
-ExecStart=/usr/bin/podman --events-backend none --runroot /root run --conmon-pidfile %t/jadda-jadda.pid --cidfile %t/jadda-jadda.ctr-id --cgroups=no-conmon -d awesome-image:latest
-ExecStop=/usr/bin/podman --events-backend none --runroot /root stop --ignore --cidfile %t/jadda-jadda.ctr-id -t 42
-ExecStopPost=/usr/bin/podman --events-backend none --runroot /root rm --ignore -f --cidfile %t/jadda-jadda.ctr-id
-PIDFile=%t/jadda-jadda.pid
-Type=forking
+ExecStart=/usr/bin/podman --events-backend none --runroot /root run --sdnotify=conmon --cgroups=no-conmon --rm -d awesome-image:latest
+Type=notify
+NotifyAccess=all
[Install]
WantedBy=multi-user.target default.target
@@ -329,18 +305,15 @@ Description=Podman jadda-jadda.service
Documentation=man:podman-generate-systemd(1)
Wants=network.target
After=network-online.target
-RequiresMountsFor=/var/lib/containers/storage /var/run/containers/storage
+RequiresMountsFor=/var/run/containers/storage
[Service]
Environment=PODMAN_SYSTEMD_UNIT=%n
Restart=always
TimeoutStopSec=70
-ExecStartPre=/bin/rm -f %t/jadda-jadda.pid %t/jadda-jadda.ctr-id
-ExecStart=/usr/bin/podman container run --conmon-pidfile %t/jadda-jadda.pid --cidfile %t/jadda-jadda.ctr-id --cgroups=no-conmon -d awesome-image:latest
-ExecStop=/usr/bin/podman container stop --ignore --cidfile %t/jadda-jadda.ctr-id -t 10
-ExecStopPost=/usr/bin/podman container rm --ignore -f --cidfile %t/jadda-jadda.ctr-id
-PIDFile=%t/jadda-jadda.pid
-Type=forking
+ExecStart=/usr/bin/podman container run --sdnotify=conmon --cgroups=no-conmon --rm -d awesome-image:latest
+Type=notify
+NotifyAccess=all
[Install]
WantedBy=multi-user.target default.target
@@ -354,18 +327,15 @@ Description=Podman jadda-jadda.service
Documentation=man:podman-generate-systemd(1)
Wants=network.target
After=network-online.target
-RequiresMountsFor=/var/lib/containers/storage /var/run/containers/storage
+RequiresMountsFor=/var/run/containers/storage
[Service]
Environment=PODMAN_SYSTEMD_UNIT=%n
Restart=always
TimeoutStopSec=70
-ExecStartPre=/bin/rm -f %t/jadda-jadda.pid %t/jadda-jadda.ctr-id
-ExecStart=/usr/bin/podman run --conmon-pidfile %t/jadda-jadda.pid --cidfile %t/jadda-jadda.ctr-id --cgroups=no-conmon -d --replace --name test --log-driver=journald --log-opt=tag={{.Name}} awesome-image:latest
-ExecStop=/usr/bin/podman stop --ignore --cidfile %t/jadda-jadda.ctr-id -t 10
-ExecStopPost=/usr/bin/podman rm --ignore -f --cidfile %t/jadda-jadda.ctr-id
-PIDFile=%t/jadda-jadda.pid
-Type=forking
+ExecStart=/usr/bin/podman run --sdnotify=conmon --cgroups=no-conmon --rm -d --replace --name test --log-driver=journald --log-opt=tag={{.Name}} awesome-image:latest
+Type=notify
+NotifyAccess=all
[Install]
WantedBy=multi-user.target default.target
@@ -379,18 +349,15 @@ Description=Podman jadda-jadda.service
Documentation=man:podman-generate-systemd(1)
Wants=network.target
After=network-online.target
-RequiresMountsFor=/var/lib/containers/storage /var/run/containers/storage
+RequiresMountsFor=/var/run/containers/storage
[Service]
Environment=PODMAN_SYSTEMD_UNIT=%n
Restart=always
TimeoutStopSec=70
-ExecStartPre=/bin/rm -f %t/jadda-jadda.pid %t/jadda-jadda.ctr-id
-ExecStart=/usr/bin/podman run --conmon-pidfile %t/jadda-jadda.pid --cidfile %t/jadda-jadda.ctr-id --cgroups=no-conmon -d --replace --name test awesome-image:latest sh -c "kill $$$$ && echo %%\\"
-ExecStop=/usr/bin/podman stop --ignore --cidfile %t/jadda-jadda.ctr-id -t 10
-ExecStopPost=/usr/bin/podman rm --ignore -f --cidfile %t/jadda-jadda.ctr-id
-PIDFile=%t/jadda-jadda.pid
-Type=forking
+ExecStart=/usr/bin/podman run --sdnotify=conmon --cgroups=no-conmon --rm -d --replace --name test awesome-image:latest sh -c "kill $$$$ && echo %%\\"
+Type=notify
+NotifyAccess=all
[Install]
WantedBy=multi-user.target default.target
@@ -404,18 +371,15 @@ Description=Podman jadda-jadda.service
Documentation=man:podman-generate-systemd(1)
Wants=network.target
After=network-online.target
-RequiresMountsFor=/var/lib/containers/storage /var/run/containers/storage
+RequiresMountsFor=/var/run/containers/storage
[Service]
Environment=PODMAN_SYSTEMD_UNIT=%n
Restart=always
TimeoutStopSec=70
-ExecStartPre=/bin/rm -f %t/jadda-jadda.pid %t/jadda-jadda.ctr-id
-ExecStart=/usr/bin/podman run --conmon-pidfile %t/jadda-jadda.pid --cidfile %t/jadda-jadda.ctr-id --cgroups=no-conmon -d awesome-image:latest podman run --cgroups=foo --conmon-pidfile=foo --cidfile=foo alpine
-ExecStop=/usr/bin/podman stop --ignore --cidfile %t/jadda-jadda.ctr-id -t 10
-ExecStopPost=/usr/bin/podman rm --ignore -f --cidfile %t/jadda-jadda.ctr-id
-PIDFile=%t/jadda-jadda.pid
-Type=forking
+ExecStart=/usr/bin/podman run --sdnotify=conmon --cgroups=no-conmon --rm -d --conmon-pidfile=foo --cidfile=foo awesome-image:latest podman run --cgroups=foo --conmon-pidfile=foo --cidfile=foo alpine
+Type=notify
+NotifyAccess=all
[Install]
WantedBy=multi-user.target default.target
@@ -429,18 +393,15 @@ Description=Podman jadda-jadda.service
Documentation=man:podman-generate-systemd(1)
Wants=network.target
After=network-online.target
-RequiresMountsFor=/var/lib/containers/storage /var/run/containers/storage
+RequiresMountsFor=/var/run/containers/storage
[Service]
Environment=PODMAN_SYSTEMD_UNIT=%n
Restart=always
TimeoutStopSec=70
-ExecStartPre=/bin/rm -f %t/jadda-jadda.pid %t/jadda-jadda.ctr-id
-ExecStart=/usr/bin/podman run --conmon-pidfile %t/jadda-jadda.pid --cidfile %t/jadda-jadda.ctr-id --cgroups=no-conmon --pod-id-file %t/pod-foobar.pod-id-file -d awesome-image:latest podman run --cgroups=foo --conmon-pidfile=foo --cidfile=foo --pod-id-file /tmp/pod-foobar.pod-id-file alpine
-ExecStop=/usr/bin/podman stop --ignore --cidfile %t/jadda-jadda.ctr-id -t 10
-ExecStopPost=/usr/bin/podman rm --ignore -f --cidfile %t/jadda-jadda.ctr-id
-PIDFile=%t/jadda-jadda.pid
-Type=forking
+ExecStart=/usr/bin/podman run --sdnotify=conmon --cgroups=no-conmon --rm --pod-id-file %t/pod-foobar.pod-id-file -d --conmon-pidfile=foo --cidfile=foo awesome-image:latest podman run --cgroups=foo --conmon-pidfile=foo --cidfile=foo --pod-id-file /tmp/pod-foobar.pod-id-file alpine
+Type=notify
+NotifyAccess=all
[Install]
WantedBy=multi-user.target default.target
@@ -454,19 +415,16 @@ Description=Podman jadda-jadda.service
Documentation=man:podman-generate-systemd(1)
Wants=network.target
After=network-online.target
-RequiresMountsFor=/var/lib/containers/storage /var/run/containers/storage
+RequiresMountsFor=/var/run/containers/storage
[Service]
Environment=PODMAN_SYSTEMD_UNIT=%n
Environment=FOO=abc "BAR=my test" USER=%%a
Restart=always
TimeoutStopSec=70
-ExecStartPre=/bin/rm -f %t/jadda-jadda.pid %t/jadda-jadda.ctr-id
-ExecStart=/usr/bin/podman run --conmon-pidfile %t/jadda-jadda.pid --cidfile %t/jadda-jadda.ctr-id --cgroups=no-conmon -d --env FOO --env=BAR --env=MYENV=2 -e USER awesome-image:latest
-ExecStop=/usr/bin/podman stop --ignore --cidfile %t/jadda-jadda.ctr-id -t 10
-ExecStopPost=/usr/bin/podman rm --ignore -f --cidfile %t/jadda-jadda.ctr-id
-PIDFile=%t/jadda-jadda.pid
-Type=forking
+ExecStart=/usr/bin/podman run --sdnotify=conmon --cgroups=no-conmon --rm -d --env FOO --env=BAR --env=MYENV=2 -e USER awesome-image:latest
+Type=notify
+NotifyAccess=all
[Install]
WantedBy=multi-user.target default.target
@@ -929,10 +887,10 @@ WantedBy=multi-user.target default.target
}
got, err := executeContainerTemplate(&test.info, opts)
if (err != nil) != test.wantErr {
- t.Errorf("CreateContainerSystemdUnit() error = \n%v, wantErr \n%v", err, test.wantErr)
+ t.Errorf("CreateContainerSystemdUnit() %s error = \n%v, wantErr \n%v", test.name, err, test.wantErr)
return
}
- assert.Equal(t, test.want, got)
+ assert.Equal(t, test.want, got, test.name)
})
}
}
diff --git a/pkg/systemd/generate/pods_test.go b/pkg/systemd/generate/pods_test.go
index 0e4d92c50..a11e1e11e 100644
--- a/pkg/systemd/generate/pods_test.go
+++ b/pkg/systemd/generate/pods_test.go
@@ -47,7 +47,7 @@ Description=Podman pod-123abc.service
Documentation=man:podman-generate-systemd(1)
Wants=network.target
After=network-online.target
-RequiresMountsFor=/var/lib/containers/storage /var/run/containers/storage
+RequiresMountsFor=/var/run/containers/storage
Requires=container-1.service container-2.service
Before=container-1.service container-2.service
@@ -75,7 +75,7 @@ Description=Podman pod-123abc.service
Documentation=man:podman-generate-systemd(1)
Wants=network.target
After=network-online.target
-RequiresMountsFor=/var/lib/containers/storage /var/run/containers/storage
+RequiresMountsFor=/var/run/containers/storage
Requires=container-1.service container-2.service
Before=container-1.service container-2.service
@@ -103,7 +103,7 @@ Description=Podman pod-123abc.service
Documentation=man:podman-generate-systemd(1)
Wants=network.target
After=network-online.target
-RequiresMountsFor=/var/lib/containers/storage /var/run/containers/storage
+RequiresMountsFor=/var/run/containers/storage
Requires=container-1.service container-2.service
Before=container-1.service container-2.service
@@ -131,7 +131,7 @@ Description=Podman pod-123abc.service
Documentation=man:podman-generate-systemd(1)
Wants=network.target
After=network-online.target
-RequiresMountsFor=/var/lib/containers/storage /var/run/containers/storage
+RequiresMountsFor=/var/run/containers/storage
Requires=container-1.service container-2.service
Before=container-1.service container-2.service
@@ -159,7 +159,7 @@ Description=Podman pod-123abc.service
Documentation=man:podman-generate-systemd(1)
Wants=network.target
After=network-online.target
-RequiresMountsFor=/var/lib/containers/storage /var/run/containers/storage
+RequiresMountsFor=/var/run/containers/storage
Requires=container-1.service container-2.service
Before=container-1.service container-2.service
diff --git a/rootless.md b/rootless.md
index 9edd5a437..bee5d337b 100644
--- a/rootless.md
+++ b/rootless.md
@@ -29,7 +29,7 @@ can easily fail
* Ubuntu supports non root overlay, but no other Linux distros do.
* Only other supported driver is VFS.
* Cannot use ping out of the box.
- * [(Can be fixed by setting sysctl on host)](https://github.com/containers/podman/blob/master/troubleshooting.md#6-rootless-containers-cannot-ping-hosts)
+ * [(Can be fixed by setting sysctl on host)](https://github.com/containers/podman/blob/master/troubleshooting.md#5-rootless-containers-cannot-ping-hosts)
* Requires new shadow-utils (not found in older (RHEL7/Centos7 distros) Should be fixed in RHEL7.7 release)
* A few commands do not work.
* mount/unmount (on fuse-overlay)
diff --git a/test/apiv2/20-containers.at b/test/apiv2/20-containers.at
index a81210855..ef51757c9 100644
--- a/test/apiv2/20-containers.at
+++ b/test/apiv2/20-containers.at
@@ -341,3 +341,12 @@ t GET containers/$cid/json 200 \
.HostConfig.NanoCpus=500000
t DELETE containers/$cid?v=true 204
+
+# Test Compat Create with default network mode (#10569)
+t POST containers/create Image=$IMAGE HostConfig='{"NetworkMode":"default"}' 201 \
+ .Id~[0-9a-f]\\{64\\}
+cid=$(jq -r '.Id' <<<"$output")
+t GET containers/$cid/json 200 \
+ .HostConfig.NetworkMode="bridge"
+
+t DELETE containers/$cid?v=true 204
diff --git a/test/apiv2/python/rest_api/fixtures/api_testcase.py b/test/apiv2/python/rest_api/fixtures/api_testcase.py
index 8b771774b..155e93928 100644
--- a/test/apiv2/python/rest_api/fixtures/api_testcase.py
+++ b/test/apiv2/python/rest_api/fixtures/api_testcase.py
@@ -49,7 +49,7 @@ class APITestCase(unittest.TestCase):
def setUp(self):
super().setUp()
- APITestCase.podman.run("run", "alpine", "/bin/ls", check=True)
+ APITestCase.podman.run("run", "-d", "alpine", "top", check=True)
def tearDown(self) -> None:
APITestCase.podman.run("pod", "rm", "--all", "--force", check=True)
diff --git a/test/apiv2/python/rest_api/test_v2_0_0_container.py b/test/apiv2/python/rest_api/test_v2_0_0_container.py
index f67013117..b4b3af2df 100644
--- a/test/apiv2/python/rest_api/test_v2_0_0_container.py
+++ b/test/apiv2/python/rest_api/test_v2_0_0_container.py
@@ -12,7 +12,7 @@ class ContainerTestCase(APITestCase):
r = requests.get(self.uri("/containers/json"), timeout=5)
self.assertEqual(r.status_code, 200, r.text)
obj = r.json()
- self.assertEqual(len(obj), 0)
+ self.assertEqual(len(obj), 1)
def test_list_all(self):
r = requests.get(self.uri("/containers/json?all=true"))
@@ -36,7 +36,7 @@ class ContainerTestCase(APITestCase):
self.assertId(r.content)
def test_delete(self):
- r = requests.delete(self.uri(self.resolve_container("/containers/{}")))
+ r = requests.delete(self.uri(self.resolve_container("/containers/{}?force=true")))
self.assertEqual(r.status_code, 204, r.text)
def test_stop(self):
diff --git a/test/e2e/checkpoint_test.go b/test/e2e/checkpoint_test.go
index 9d0049910..70a1d09ed 100644
--- a/test/e2e/checkpoint_test.go
+++ b/test/e2e/checkpoint_test.go
@@ -425,6 +425,106 @@ var _ = Describe("Podman checkpoint", func() {
// Remove exported checkpoint
os.Remove(fileName)
})
+ // This test does the same steps which are necessary for migrating
+ // a container from one host to another
+ It("podman checkpoint container with export and different compression algorithms", func() {
+ localRunString := getRunString([]string{"--rm", ALPINE, "top"})
+ session := podmanTest.Podman(localRunString)
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1))
+ cid := session.OutputToString()
+ fileName := "/tmp/checkpoint-" + cid + ".tar"
+
+ // Checkpoint with the default algorithm
+ result := podmanTest.Podman([]string{"container", "checkpoint", "-l", "-e", fileName})
+ result.WaitWithDefaultTimeout()
+
+ // As the container has been started with '--rm' it will be completely
+ // cleaned up after checkpointing.
+ Expect(result.ExitCode()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainers()).To(Equal(0))
+
+ // Restore container
+ result = podmanTest.Podman([]string{"container", "restore", "-i", fileName})
+ result.WaitWithDefaultTimeout()
+
+ Expect(result.ExitCode()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1))
+ Expect(podmanTest.GetContainerStatus()).To(ContainSubstring("Up"))
+
+ // Checkpoint with the zstd algorithm
+ result = podmanTest.Podman([]string{"container", "checkpoint", "-l", "-e", fileName, "--compress", "zstd"})
+ result.WaitWithDefaultTimeout()
+
+ // As the container has been started with '--rm' it will be completely
+ // cleaned up after checkpointing.
+ Expect(result.ExitCode()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainers()).To(Equal(0))
+
+ // Restore container
+ result = podmanTest.Podman([]string{"container", "restore", "-i", fileName})
+ result.WaitWithDefaultTimeout()
+
+ Expect(result.ExitCode()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1))
+ Expect(podmanTest.GetContainerStatus()).To(ContainSubstring("Up"))
+
+ // Checkpoint with the none algorithm
+ result = podmanTest.Podman([]string{"container", "checkpoint", "-l", "-e", fileName, "-c", "none"})
+ result.WaitWithDefaultTimeout()
+
+ // As the container has been started with '--rm' it will be completely
+ // cleaned up after checkpointing.
+ Expect(result.ExitCode()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainers()).To(Equal(0))
+
+ // Restore container
+ result = podmanTest.Podman([]string{"container", "restore", "-i", fileName})
+ result.WaitWithDefaultTimeout()
+
+ Expect(result.ExitCode()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1))
+ Expect(podmanTest.GetContainerStatus()).To(ContainSubstring("Up"))
+
+ // Checkpoint with the gzip algorithm
+ result = podmanTest.Podman([]string{"container", "checkpoint", "-l", "-e", fileName, "-c", "gzip"})
+ result.WaitWithDefaultTimeout()
+
+ // As the container has been started with '--rm' it will be completely
+ // cleaned up after checkpointing.
+ Expect(result.ExitCode()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainers()).To(Equal(0))
+
+ // Restore container
+ result = podmanTest.Podman([]string{"container", "restore", "-i", fileName})
+ result.WaitWithDefaultTimeout()
+
+ Expect(result.ExitCode()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1))
+ Expect(podmanTest.GetContainerStatus()).To(ContainSubstring("Up"))
+
+ // Checkpoint with the non-existing algorithm
+ result = podmanTest.Podman([]string{"container", "checkpoint", "-l", "-e", fileName, "-c", "non-existing"})
+ result.WaitWithDefaultTimeout()
+
+ Expect(result.ExitCode()).To(Equal(125))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1))
+ Expect(podmanTest.NumberOfContainers()).To(Equal(1))
+
+ result = podmanTest.Podman([]string{"rm", "-fa"})
+ result.WaitWithDefaultTimeout()
+ Expect(result.ExitCode()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainers()).To(Equal(0))
+
+ // Remove exported checkpoint
+ os.Remove(fileName)
+ })
It("podman checkpoint and restore container with root file-system changes", func() {
// Start the container
@@ -822,4 +922,58 @@ var _ = Describe("Podman checkpoint", func() {
os.Remove(checkpointFileName)
os.Remove(preCheckpointFileName)
})
+
+ It("podman checkpoint and restore container with different port mappings", func() {
+ localRunString := getRunString([]string{"-p", "1234:6379", "--rm", redis})
+ session := podmanTest.Podman(localRunString)
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ cid := session.OutputToString()
+ fileName := "/tmp/checkpoint-" + cid + ".tar.gz"
+
+ // Open a network connection to the redis server via initial port mapping
+ conn, err := net.Dial("tcp", "localhost:1234")
+ if err != nil {
+ os.Exit(1)
+ }
+ conn.Close()
+
+ // Checkpoint the container
+ result := podmanTest.Podman([]string{"container", "checkpoint", "-l", "-e", fileName})
+ result.WaitWithDefaultTimeout()
+
+ // As the container has been started with '--rm' it will be completely
+ // cleaned up after checkpointing.
+ Expect(result.ExitCode()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainers()).To(Equal(0))
+
+ // Restore container with different port mapping
+ result = podmanTest.Podman([]string{"container", "restore", "-p", "1235:6379", "-i", fileName})
+ result.WaitWithDefaultTimeout()
+
+ Expect(result.ExitCode()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1))
+ Expect(podmanTest.GetContainerStatus()).To(ContainSubstring("Up"))
+
+ // Open a network connection to the redis server via initial port mapping
+ // This should fail
+ conn, err = net.Dial("tcp", "localhost:1234")
+ Expect(err.Error()).To(ContainSubstring("connection refused"))
+ // Open a network connection to the redis server via new port mapping
+ conn, err = net.Dial("tcp", "localhost:1235")
+ if err != nil {
+ os.Exit(1)
+ }
+ conn.Close()
+
+ result = podmanTest.Podman([]string{"rm", "-fa"})
+ result.WaitWithDefaultTimeout()
+ Expect(result.ExitCode()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainers()).To(Equal(0))
+
+ // Remove exported checkpoint
+ os.Remove(fileName)
+ })
})
diff --git a/test/e2e/common_test.go b/test/e2e/common_test.go
index 359345096..7ffee961c 100644
--- a/test/e2e/common_test.go
+++ b/test/e2e/common_test.go
@@ -408,7 +408,14 @@ func (p *PodmanTestIntegration) RunLsContainer(name string) (*PodmanSessionInteg
podmanArgs = append(podmanArgs, "-d", ALPINE, "ls")
session := p.Podman(podmanArgs)
session.WaitWithDefaultTimeout()
- return session, session.ExitCode(), session.OutputToString()
+ if session.ExitCode() != 0 {
+ return session, session.ExitCode(), session.OutputToString()
+ }
+ cid := session.OutputToString()
+
+ wsession := p.Podman([]string{"wait", cid})
+ wsession.WaitWithDefaultTimeout()
+ return session, wsession.ExitCode(), cid
}
// RunNginxWithHealthCheck runs the alpine nginx container with an optional name and adds a healthcheck into it
@@ -431,7 +438,14 @@ func (p *PodmanTestIntegration) RunLsContainerInPod(name, pod string) (*PodmanSe
podmanArgs = append(podmanArgs, "-d", ALPINE, "ls")
session := p.Podman(podmanArgs)
session.WaitWithDefaultTimeout()
- return session, session.ExitCode(), session.OutputToString()
+ if session.ExitCode() != 0 {
+ return session, session.ExitCode(), session.OutputToString()
+ }
+ cid := session.OutputToString()
+
+ wsession := p.Podman([]string{"wait", cid})
+ wsession.WaitWithDefaultTimeout()
+ return session, wsession.ExitCode(), cid
}
// BuildImage uses podman build and buildah to build an image
diff --git a/test/e2e/events_test.go b/test/e2e/events_test.go
index 4dbbe9dd8..cc7c4d996 100644
--- a/test/e2e/events_test.go
+++ b/test/e2e/events_test.go
@@ -8,6 +8,7 @@ import (
"sync"
"time"
+ "github.com/containers/podman/v3/libpod/events"
. "github.com/containers/podman/v3/test/utils"
"github.com/containers/storage/pkg/stringid"
. "github.com/onsi/ginkgo"
@@ -134,12 +135,10 @@ var _ = Describe("Podman events", func() {
jsonArr := test.OutputToStringArray()
Expect(test.OutputToStringArray()).ShouldNot(BeEmpty())
- eventsMap := make(map[string]string)
- err := json.Unmarshal([]byte(jsonArr[0]), &eventsMap)
+ event := events.Event{}
+ err := json.Unmarshal([]byte(jsonArr[0]), &event)
Expect(err).ToNot(HaveOccurred())
- Expect(eventsMap).To(HaveKey("Status"))
-
test = podmanTest.Podman([]string{"events", "--stream=false", "--format", "{{json.}}"})
test.WaitWithDefaultTimeout()
Expect(test).To(Exit(0))
@@ -147,11 +146,9 @@ var _ = Describe("Podman events", func() {
jsonArr = test.OutputToStringArray()
Expect(test.OutputToStringArray()).ShouldNot(BeEmpty())
- eventsMap = make(map[string]string)
- err = json.Unmarshal([]byte(jsonArr[0]), &eventsMap)
+ event = events.Event{}
+ err = json.Unmarshal([]byte(jsonArr[0]), &event)
Expect(err).ToNot(HaveOccurred())
-
- Expect(eventsMap).To(HaveKey("Status"))
})
It("podman events --until future", func() {
diff --git a/test/e2e/generate_systemd_test.go b/test/e2e/generate_systemd_test.go
index 75d778f10..e03d6899e 100644
--- a/test/e2e/generate_systemd_test.go
+++ b/test/e2e/generate_systemd_test.go
@@ -215,7 +215,6 @@ var _ = Describe("Podman generate systemd", func() {
// Grepping the output (in addition to unit tests)
Expect(session.OutputToString()).To(ContainSubstring("# container-foo.service"))
Expect(session.OutputToString()).To(ContainSubstring(" --replace "))
- Expect(session.OutputToString()).To(ContainSubstring(" stop --ignore --cidfile %t/container-foo.ctr-id -t 42"))
if !IsRemote() {
// The podman commands in the unit should contain the root flags if generate systemd --new is used
Expect(session.OutputToString()).To(ContainSubstring(" --runroot"))
@@ -234,7 +233,6 @@ var _ = Describe("Podman generate systemd", func() {
// Grepping the output (in addition to unit tests)
Expect(session.OutputToString()).To(ContainSubstring("# container-foo.service"))
Expect(session.OutputToString()).To(ContainSubstring(" --replace "))
- Expect(session.OutputToString()).To(ContainSubstring(" stop --ignore --cidfile %t/container-foo.ctr-id -t 42"))
})
It("podman generate systemd --new without explicit detaching param", func() {
@@ -247,7 +245,7 @@ var _ = Describe("Podman generate systemd", func() {
Expect(session.ExitCode()).To(Equal(0))
// Grepping the output (in addition to unit tests)
- Expect(session.OutputToString()).To(ContainSubstring("--cgroups=no-conmon -d"))
+ Expect(session.OutputToString()).To(ContainSubstring(" -d "))
})
It("podman generate systemd --new with explicit detaching param in middle", func() {
diff --git a/test/e2e/logs_test.go b/test/e2e/logs_test.go
index 3051031a5..4d9cbb48b 100644
--- a/test/e2e/logs_test.go
+++ b/test/e2e/logs_test.go
@@ -163,7 +163,7 @@ var _ = Describe("Podman logs", func() {
})
It("podman logs on a created container should result in 0 exit code: "+log, func() {
- session := podmanTest.Podman([]string{"create", "-t", "--name", "log", ALPINE})
+ session := podmanTest.Podman([]string{"create", "--log-driver", log, "-t", "--name", "log", ALPINE})
session.WaitWithDefaultTimeout()
Expect(session).To(Exit(0))
diff --git a/test/e2e/network_test.go b/test/e2e/network_test.go
index 6f28d7e19..a7e61932e 100644
--- a/test/e2e/network_test.go
+++ b/test/e2e/network_test.go
@@ -533,7 +533,11 @@ var _ = Describe("Podman network", func() {
out, err := inspect.jq(".[0].plugins[0].master")
Expect(err).To(BeNil())
- Expect(out).To(Equal("\"lo\""))
+ Expect(out).To(Equal(`"lo"`))
+
+ ipamType, err := inspect.jq(".[0].plugins[0].ipam.type")
+ Expect(err).To(BeNil())
+ Expect(ipamType).To(Equal(`"dhcp"`))
nc = podmanTest.Podman([]string{"network", "rm", net})
nc.WaitWithDefaultTimeout()
@@ -571,13 +575,29 @@ var _ = Describe("Podman network", func() {
Expect(err).To(BeNil())
Expect(mtu).To(Equal("1500"))
+ name, err := inspect.jq(".[0].plugins[0].type")
+ Expect(err).To(BeNil())
+ Expect(name).To(Equal(`"macvlan"`))
+
+ netInt, err := inspect.jq(".[0].plugins[0].master")
+ Expect(err).To(BeNil())
+ Expect(netInt).To(Equal(`"lo"`))
+
+ ipamType, err := inspect.jq(".[0].plugins[0].ipam.type")
+ Expect(err).To(BeNil())
+ Expect(ipamType).To(Equal(`"host-local"`))
+
gw, err := inspect.jq(".[0].plugins[0].ipam.ranges[0][0].gateway")
Expect(err).To(BeNil())
- Expect(gw).To(Equal("\"192.168.1.254\""))
+ Expect(gw).To(Equal(`"192.168.1.254"`))
subnet, err := inspect.jq(".[0].plugins[0].ipam.ranges[0][0].subnet")
Expect(err).To(BeNil())
- Expect(subnet).To(Equal("\"192.168.1.0/24\""))
+ Expect(subnet).To(Equal(`"192.168.1.0/24"`))
+
+ routes, err := inspect.jq(".[0].plugins[0].ipam.routes[0].dst")
+ Expect(err).To(BeNil())
+ Expect(routes).To(Equal(`"0.0.0.0/0"`))
nc = podmanTest.Podman([]string{"network", "rm", net})
nc.WaitWithDefaultTimeout()
diff --git a/test/e2e/play_kube_test.go b/test/e2e/play_kube_test.go
index e0af27f7a..833991452 100644
--- a/test/e2e/play_kube_test.go
+++ b/test/e2e/play_kube_test.go
@@ -2119,7 +2119,7 @@ MemoryReservation: {{ .HostConfig.MemoryReservation }}`})
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
kube.WaitWithDefaultTimeout()
Expect(kube.ExitCode()).To(Equal(125))
- Expect(kube.ErrorToString()).To(ContainSubstring(invalidImageName))
+ Expect(kube.ErrorToString()).To(ContainSubstring("invalid reference format"))
})
It("podman play kube applies log driver to containers", func() {
diff --git a/test/e2e/run_device_test.go b/test/e2e/run_device_test.go
index 3137e3fe4..735e44d3e 100644
--- a/test/e2e/run_device_test.go
+++ b/test/e2e/run_device_test.go
@@ -113,4 +113,10 @@ var _ = Describe("Podman run device", func() {
Expect(session.ExitCode()).To(Equal(0))
Expect(session.OutputToString()).To(Equal("/dev/kmsg1"))
})
+
+ It("podman run --gpus noop", func() {
+ session := podmanTest.Podman([]string{"run", "--gpus", "all", ALPINE, "ls", "/"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ })
})
diff --git a/test/e2e/run_networking_test.go b/test/e2e/run_networking_test.go
index 37e837b1d..696cec76c 100644
--- a/test/e2e/run_networking_test.go
+++ b/test/e2e/run_networking_test.go
@@ -786,4 +786,18 @@ var _ = Describe("Podman run networking", func() {
Expect(session.ExitCode()).To(BeZero())
Expect(session.OutputToString()).To(ContainSubstring("search dns.podman"))
})
+
+ It("Rootless podman run with --net=bridge works and connects to default network", func() {
+ // This is harmless when run as root, so we'll just let it run.
+ ctrName := "testctr"
+ ctr := podmanTest.Podman([]string{"run", "-d", "--net=bridge", "--name", ctrName, ALPINE, "top"})
+ ctr.WaitWithDefaultTimeout()
+ Expect(ctr.ExitCode()).To(BeZero())
+
+ inspectOut := podmanTest.InspectContainer(ctrName)
+ Expect(len(inspectOut)).To(Equal(1))
+ Expect(len(inspectOut[0].NetworkSettings.Networks)).To(Equal(1))
+ _, ok := inspectOut[0].NetworkSettings.Networks["podman"]
+ Expect(ok).To(BeTrue())
+ })
})
diff --git a/test/system/001-basic.bats b/test/system/001-basic.bats
index 97ef61511..963c89281 100644
--- a/test/system/001-basic.bats
+++ b/test/system/001-basic.bats
@@ -49,6 +49,14 @@ function setup() {
@test "podman can pull an image" {
run_podman pull $IMAGE
+
+ # Also make sure that the tag@digest syntax is supported.
+ run_podman inspect --format "{{ .Digest }}" $IMAGE
+ digest=$output
+ run_podman pull $IMAGE@$digest
+
+ # Now untag the digest reference again.
+ run_podman untag $IMAGE $IMAGE@$digest
}
# PR #7212: allow --remote anywhere before subcommand, not just as 1st flag
diff --git a/test/system/030-run.bats b/test/system/030-run.bats
index 2ea981a85..32fc85c4e 100644
--- a/test/system/030-run.bats
+++ b/test/system/030-run.bats
@@ -600,12 +600,12 @@ json-file | f
echo "$randomcontent" > $testdir/content
# Workdir does not exist on the image but is volume mounted.
- run_podman run --rm --workdir /IamNotOnTheImage -v $testdir:/IamNotOnTheImage $IMAGE cat content
+ run_podman run --rm --workdir /IamNotOnTheImage -v $testdir:/IamNotOnTheImage:Z $IMAGE cat content
is "$output" "$randomcontent" "cat random content"
# Workdir does not exist on the image but is created by the runtime as it's
# a subdir of a volume.
- run_podman run --rm --workdir /IamNotOntheImage -v $testdir/content:/IamNotOntheImage/foo $IMAGE cat foo
+ run_podman run --rm --workdir /IamNotOntheImage -v $testdir/content:/IamNotOntheImage/foo:Z $IMAGE cat foo
is "$output" "$randomcontent" "cat random content"
# Make sure that running on a read-only rootfs works (#9230).
@@ -702,6 +702,8 @@ EOF
run_podman build -t nomtab $tmpdir
run_podman run --rm nomtab stat -c %N /etc/mtab
is "$output" "$expected" "/etc/mtab should be created"
+
+ run_podman rmi nomtab
}
# vim: filetype=sh
diff --git a/test/system/035-logs.bats b/test/system/035-logs.bats
index 3dd88e5eb..ccf83df14 100644
--- a/test/system/035-logs.bats
+++ b/test/system/035-logs.bats
@@ -73,4 +73,56 @@ ${cid[0]} d" "Sequential output from logs"
_log_test_multi journald
}
+@test "podman logs - journald log driver requires journald events backend" {
+ skip_if_remote "remote does not support --events-backend"
+ # We can't use journald on RHEL as rootless: rhbz#1895105
+ skip_if_journald_unavailable
+
+ run_podman --events-backend=file run --log-driver=journald -d --name test --replace $IMAGE ls /
+ run_podman --events-backend=file logs test
+ run_podman 125 --events-backend=file logs --follow test
+ is "$output" "Error: using --follow with the journald --log-driver but without the journald --events-backend (file) is not supported" "journald logger requires journald eventer"
+}
+
+function _log_test_since() {
+ local driver=$1
+
+ s_before="before_$(random_string)_${driver}"
+ s_after="after_$(random_string)_${driver}"
+
+ before=$(date --iso-8601=seconds)
+ run_podman run --log-driver=$driver -d --name test $IMAGE sh -c \
+ "echo $s_before; trap 'echo $s_after; exit' SIGTERM; while :; do sleep 1; done"
+
+ # sleep a second to make sure the date is after the first echo
+ sleep 1
+ after=$(date --iso-8601=seconds)
+ run_podman stop test
+
+ run_podman logs test
+ is "$output" \
+ "$s_before
+$s_after"
+
+ run_podman logs --since $before test
+ is "$output" \
+ "$s_before
+$s_after"
+
+ run_podman logs --since $after test
+ is "$output" "$s_after"
+ run_podman rm -f test
+}
+
+@test "podman logs - since k8s-file" {
+ _log_test_since k8s-file
+}
+
+@test "podman logs - since journald" {
+ # We can't use journald on RHEL as rootless: rhbz#1895105
+ skip_if_journald_unavailable
+
+ _log_test_since journald
+}
+
# vim: filetype=sh
diff --git a/test/system/045-start.bats b/test/system/045-start.bats
index 542f9d1c2..3e0118dba 100644
--- a/test/system/045-start.bats
+++ b/test/system/045-start.bats
@@ -25,6 +25,8 @@ load helpers
die "podman start --all restarted a running container"
fi
+ run_podman wait $cid_none_implicit $cid_none_explicit $cid_on_failure
+
run_podman rm $cid_none_implicit $cid_none_explicit $cid_on_failure
run_podman stop -t 1 $cid_always
run_podman rm $cid_always
diff --git a/test/system/070-build.bats b/test/system/070-build.bats
index 9e1559013..0f3f3fa7f 100644
--- a/test/system/070-build.bats
+++ b/test/system/070-build.bats
@@ -768,19 +768,27 @@ EOF
@test "podman build COPY hardlinks " {
tmpdir=$PODMAN_TMPDIR/build-test
- mkdir -p $tmpdir
+ subdir=$tmpdir/subdir
+ subsubdir=$subdir/subsubdir
+ mkdir -p $subsubdir
dockerfile=$tmpdir/Dockerfile
cat >$dockerfile <<EOF
FROM $IMAGE
COPY . /test
EOF
- ln $dockerfile $tmpdir/hardlink
+ ln $dockerfile $tmpdir/hardlink1
+ ln $dockerfile $subdir/hardlink2
+ ln $dockerfile $subsubdir/hardlink3
run_podman build -t build_test $tmpdir
run_podman run --rm build_test stat -c '%i' /test/Dockerfile
dinode=$output
- run_podman run --rm build_test stat -c '%i' /test/hardlink
+ run_podman run --rm build_test stat -c '%i' /test/hardlink1
+ is "$output" "$dinode" "COPY hardlinks work"
+ run_podman run --rm build_test stat -c '%i' /test/subdir/hardlink2
+ is "$output" "$dinode" "COPY hardlinks work"
+ run_podman run --rm build_test stat -c '%i' /test/subdir/subsubdir/hardlink3
is "$output" "$dinode" "COPY hardlinks work"
run_podman rmi -f build_test
diff --git a/test/system/090-events.bats b/test/system/090-events.bats
index 19bee5506..d889bd7f9 100644
--- a/test/system/090-events.bats
+++ b/test/system/090-events.bats
@@ -6,7 +6,6 @@
load helpers
@test "events with a filter by label" {
- skip_if_remote "FIXME: -remote does not include labels in event output"
cname=test-$(random_string 30 | tr A-Z a-z)
labelname=$(random_string 10)
labelvalue=$(random_string 15)
@@ -27,7 +26,7 @@ load helpers
}
@test "image events" {
- skip_if_remote "FIXME: remove events on podman-remote seem to be broken"
+ skip_if_remote "remote does not support --events-backend"
pushedDir=$PODMAN_TMPDIR/dir
mkdir -p $pushedDir
@@ -61,3 +60,30 @@ load helpers
.*image remove $imageID $tag.*" \
"podman events"
}
+
+function _events_disjunctive_filters() {
+ local backend=$1
+
+ # Regression test for #10507: make sure that filters with the same key are
+ # applied in disjunction.
+ t0=$(date --iso-8601=seconds)
+ run_podman $backend run --name foo --rm $IMAGE ls
+ run_podman $backend run --name bar --rm $IMAGE ls
+ run_podman $backend events --stream=false --since=$t0 --filter container=foo --filter container=bar --filter event=start
+ is "$output" ".* container start .* name=foo.*
+.* container start .* name=bar.*"
+}
+
+@test "events with disjunctive filters - file" {
+ skip_if_remote "remote does not support --events-backend"
+ _events_disjunctive_filters --events-backend=file
+}
+
+@test "events with disjunctive filters - journald" {
+ skip_if_remote "remote does not support --events-backend"
+ _events_disjunctive_filters --events-backend=journald
+}
+
+@test "events with disjunctive filters - default" {
+ _events_disjunctive_filters ""
+}
diff --git a/test/system/130-kill.bats b/test/system/130-kill.bats
index 1b02b4976..3770eac27 100644
--- a/test/system/130-kill.bats
+++ b/test/system/130-kill.bats
@@ -8,8 +8,7 @@ load helpers
@test "podman kill - test signal handling in containers" {
# Start a container that will handle all signals by emitting 'got: N'
local -a signals=(1 2 3 4 5 6 8 10 12 13 14 15 16 20 21 22 23 24 25 26 64)
- # Force the k8s-file driver until #10323 is fixed.
- run_podman run --log-driver=k8s-file -d $IMAGE sh -c \
+ run_podman run -d $IMAGE sh -c \
"for i in ${signals[*]}; do trap \"echo got: \$i\" \$i; done;
echo READY;
while ! test -e /stop; do sleep 0.05; done;
diff --git a/test/system/255-auto-update.bats b/test/system/255-auto-update.bats
new file mode 100644
index 000000000..9bfb44791
--- /dev/null
+++ b/test/system/255-auto-update.bats
@@ -0,0 +1,274 @@
+#!/usr/bin/env bats -*- bats -*-
+#
+# Tests for automatically update images for containerized services
+#
+
+load helpers
+
+UNIT_DIR="/usr/lib/systemd/system"
+SNAME_FILE=$BATS_TMPDIR/services
+
+function setup() {
+ skip_if_remote "systemd tests are meaningless over remote"
+ skip_if_rootless
+
+ basic_setup
+}
+
+function teardown() {
+ while read line; do
+ if [[ "$line" =~ "podman-auto-update" ]]; then
+ echo "Stop timer: $line.timer"
+ systemctl stop $line.timer
+ systemctl disable $line.timer
+ else
+ systemctl stop $line
+ fi
+ rm -f $UNIT_DIR/$line.{service,timer}
+ done < $SNAME_FILE
+
+ rm -f $SNAME_FILE
+ run_podman ? rmi quay.io/libpod/alpine:latest
+ run_podman ? rmi quay.io/libpod/alpine_nginx:latest
+ run_podman ? rmi quay.io/libpod/localtest:latest
+ basic_teardown
+}
+
+# This functions is used for handle the basic step in auto-update related
+# tests. Including following steps:
+# 1. Generate a random container name and echo it to output.
+# 2. Tag the fake image before test
+# 3. Start a container with io.containers.autoupdate
+# 4. Generate the service file from the container
+# 5. Remove the origin container
+# 6. Start the container from service
+function generate_service() {
+ local target_img_basename=$1
+ local autoupdate=$2
+
+ # Container name. Include the autoupdate type, to make debugging easier.
+ # IMPORTANT: variable 'cname' is passed (out of scope) up to caller!
+ cname=c_${autoupdate//\'/}_$(random_string)
+ target_img="quay.io/libpod/$target_img_basename:latest"
+ run_podman tag $IMAGE $target_img
+ if [[ -n "$autoupdate" ]]; then
+ label="--label io.containers.autoupdate=$autoupdate"
+ else
+ label=""
+ fi
+ run_podman run -d --name $cname $label $target_img top -d 120
+
+ run_podman generate systemd --new $cname
+ echo "$output" > "$UNIT_DIR/container-$cname.service"
+ echo "container-$cname" >> $SNAME_FILE
+ run_podman rm -f $cname
+
+ systemctl daemon-reload
+ systemctl start container-$cname
+ systemctl status container-$cname
+
+ # Original image ID.
+ # IMPORTANT: variable 'ori_image' is passed (out of scope) up to caller!
+ run_podman inspect --format "{{.Image}}" $cname
+ ori_image=$output
+}
+
+function _wait_service_ready() {
+ local sname=$1
+
+ local timeout=6
+ while [[ $timeout -gt 1 ]]; do
+ if systemctl -q is-active $sname; then
+ return
+ fi
+ sleep 1
+ let timeout=$timeout-1
+ done
+
+ # Print serivce status as debug information before failed the case
+ systemctl status $sname
+ die "Timed out waiting for $sname to start"
+}
+
+# Wait for container to update, as confirmed by its image ID changing
+function _confirm_update() {
+ local cname=$1
+ local old_iid=$2
+
+ # Image has already been pulled, so this shouldn't take too long
+ local timeout=5
+ while [[ $timeout -gt 0 ]]; do
+ run_podman '?' inspect --format "{{.Image}}" $cname
+ if [[ $status != 0 ]]; then
+ if [[ $output =~ (no such object|does not exist in database): ]]; then
+ # this is ok, it just means the container is being restarted
+ :
+ else
+ die "podman inspect $cname failed unexpectedly"
+ fi
+ elif [[ $output != $old_iid ]]; then
+ return
+ fi
+ sleep 1
+ done
+
+ die "Timed out waiting for $cname to update; old IID=$old_iid"
+}
+
+# This test can fail in dev. environment because of SELinux.
+# quick fix: chcon -t container_runtime_exec_t ./bin/podman
+@test "podman auto-update - label io.containers.autoupdate=image" {
+ generate_service alpine image
+
+ _wait_service_ready container-$cname.service
+ run_podman auto-update
+ is "$output" "Trying to pull.*" "Image is updated."
+ _confirm_update $cname $ori_image
+}
+
+@test "podman auto-update - label io.containers.autoupdate=disabled" {
+ generate_service alpine disabled
+
+ _wait_service_ready container-$cname.service
+ run_podman auto-update
+ is "$output" "" "Image is not updated when autoupdate=disabled."
+
+ run_podman inspect --format "{{.Image}}" $cname
+ is "$output" "$ori_image" "Image ID should not change"
+}
+
+@test "podman auto-update - label io.containers.autoupdate=fakevalue" {
+ fakevalue=fake_$(random_string)
+ generate_service alpine $fakevalue
+
+ _wait_service_ready container-$cname.service
+ run_podman 125 auto-update
+ is "$output" ".*invalid auto-update policy.*" "invalid policy setup"
+
+ run_podman inspect --format "{{.Image}}" $cname
+ is "$output" "$ori_image" "Image ID should not change"
+}
+
+@test "podman auto-update - label io.containers.autoupdate=local" {
+ generate_service localtest local
+ podman commit --change CMD=/bin/bash $cname quay.io/libpod/localtest:latest
+
+ _wait_service_ready container-$cname.service
+ run_podman auto-update
+ _confirm_update $cname $ori_image
+}
+
+@test "podman auto-update with multiple services" {
+ # Preserve original image ID, to confirm that it changes (or not)
+ run_podman inspect --format "{{.Id}}" $IMAGE
+ local img_id="$output"
+
+ local cnames=()
+ local -A expect_update
+ local -A will_update=([image]=1 [registry]=1 [local]=1)
+
+ local fakevalue=fake_$(random_string)
+ for auto_update in image registry "" disabled "''" $fakevalue local
+ do
+ local img_base="alpine"
+ if [[ $auto_update == "registry" ]]; then
+ img_base="alpine_nginx"
+ elif [[ $auto_update == "local" ]]; then
+ img_base="localtest"
+ fi
+ generate_service $img_base $auto_update
+ cnames+=($cname)
+ if [[ $auto_update == "local" ]]; then
+ local_cname=$cname
+ fi
+
+ if [[ -n "$auto_update" && -n "${will_update[$auto_update]}" ]]; then
+ expect_update[$cname]=1
+ fi
+ done
+
+ # Only check the last service is started. Previous services should already actived.
+ _wait_service_ready container-$cname.service
+ run_podman commit --change CMD=/bin/bash $local_cname quay.io/libpod/localtest:latest
+ # Exit code is expected, due to invalid 'fakevalue'
+ run_podman 125 auto-update
+ update_log=$output
+ is "$update_log" ".*invalid auto-update policy.*" "invalid policy setup"
+ is "$update_log" ".*1 error occurred.*" "invalid policy setup"
+
+ local n_updated=$(grep -c 'Trying to pull' <<<"$update_log")
+ is "$n_updated" "2" "Number of images updated from registry."
+
+ for cname in "${!expect_update[@]}"; do
+ is "$update_log" ".*$cname.*" "container with auto-update policy image updated"
+ # Just because podman says it fetched, doesn't mean it actually updated
+ _confirm_update $cname $img_id
+ done
+
+ # Final confirmation that all image IDs have/haven't changed
+ for cname in "${cnames[@]}"; do
+ run_podman inspect --format "{{.Image}}" $cname
+ if [[ -n "${expect_update[$cname]}" ]]; then
+ if [[ "$output" == "$img_id" ]]; then
+ die "$cname: image ID ($output) did not change"
+ fi
+ else
+ is "$output" "$img_id" "Image should not be changed."
+ fi
+ done
+}
+
+@test "podman auto-update using systemd" {
+ generate_service alpine image
+
+ cat >$UNIT_DIR/podman-auto-update-$cname.timer <<EOF
+[Unit]
+Description=Podman auto-update testing timer
+
+[Timer]
+OnCalendar=*-*-* *:*:0/2
+Persistent=true
+
+[Install]
+WantedBy=timers.target
+EOF
+ cat >$UNIT_DIR/podman-auto-update-$cname.service <<EOF
+[Unit]
+Description=Podman auto-update testing service
+Documentation=man:podman-auto-update(1)
+Wants=network.target
+After=network-online.target
+
+[Service]
+Type=oneshot
+ExecStart=/usr/bin/podman auto-update
+
+[Install]
+WantedBy=multi-user.target default.target
+EOF
+
+ echo "podman-auto-update-$cname" >> $SNAME_FILE
+ systemctl enable --now podman-auto-update-$cname.timer
+ systemctl list-timers --all
+
+ local expect='Finished Podman auto-update testing service'
+ local failed_start=failed
+ local count=0
+ while [ $count -lt 120 ]; do
+ run journalctl -n 15 -u podman-auto-update-$cname.service
+ if [[ "$output" =~ $expect ]]; then
+ failed_start=
+ break
+ fi
+ ((count+=1))
+ sleep 1
+ done
+
+ if [[ -n "$failed_start" ]]; then
+ die "Did not find expected string '$expect' in journalctl output for $cname"
+ fi
+
+ _confirm_update $cname $ori_image
+}
+
+# vim: filetype=sh
diff --git a/test/system/450-interactive.bats b/test/system/450-interactive.bats
index a9bf52ee8..a2db39492 100644
--- a/test/system/450-interactive.bats
+++ b/test/system/450-interactive.bats
@@ -56,8 +56,7 @@ function teardown() {
stty rows $rows cols $cols <$PODMAN_TEST_PTY
# ...and make sure stty under podman reads that.
- # FIXME: 'sleep 1' is needed for podman-remote; without it, there's
- run_podman run -it --name mystty $IMAGE sh -c 'sleep 1;stty size' <$PODMAN_TEST_PTY
+ run_podman run -it --name mystty $IMAGE stty size <$PODMAN_TEST_PTY
is "$output" "$rows $cols" "stty under podman reads the correct dimensions"
}
diff --git a/test/system/500-networking.bats b/test/system/500-networking.bats
index 1cec50827..55ec80bb2 100644
--- a/test/system/500-networking.bats
+++ b/test/system/500-networking.bats
@@ -34,7 +34,7 @@ load helpers
# Bind-mount this file with a different name to a container running httpd
run_podman run -d --name myweb -p "$HOST_PORT:80" \
--restart always \
- -v $INDEX1:/var/www/index.txt \
+ -v $INDEX1:/var/www/index.txt:Z \
-w /var/www \
$IMAGE /bin/busybox-extras httpd -f -p 80
cid=$output
@@ -257,7 +257,7 @@ load helpers
# Bind-mount this file with a different name to a container running httpd
run_podman run -d --name myweb -p "$HOST_PORT:80" \
--network $netname \
- -v $INDEX1:/var/www/index.txt \
+ -v $INDEX1:/var/www/index.txt:Z \
-w /var/www \
$IMAGE /bin/busybox-extras httpd -f -p 80
cid=$output
@@ -329,4 +329,62 @@ load helpers
run_podman network rm -f $mynetname
}
+@test "podman ipv6 in /etc/resolv.conf" {
+ ipv6_regex='([0-9A-Fa-f]{0,4}:){2,7}([0-9A-Fa-f]{0,4})(%\w+)?'
+
+ # Make sure to read the correct /etc/resolv.conf file in case of systemd-resolved.
+ resolve_file=$(readlink -f /etc/resolv.conf)
+ if [[ "$resolve_file" == "/run/systemd/resolve/stub-resolv.conf" ]]; then
+ resolve_file="/run/systemd/resolve/resolv.conf"
+ fi
+
+ # If the host doesn't have an ipv6 in resolv.conf skip this test.
+ # We should never modify resolv.conf on the host.
+ if ! grep -E "$ipv6_regex" "$resolve_file"; then
+ skip "This test needs an ipv6 nameserver in $resolve_file"
+ fi
+
+ # ipv4 slirp
+ run_podman run --rm --network slirp4netns:enable_ipv6=false $IMAGE cat /etc/resolv.conf
+ if grep -E "$ipv6_regex" <<< $output; then
+ die "resolv.conf contains a ipv6 nameserver"
+ fi
+
+ # ipv6 slirp
+ run_podman run --rm --network slirp4netns:enable_ipv6=true $IMAGE cat /etc/resolv.conf
+ # "is" does not like the ipv6 regex
+ if ! grep -E "$ipv6_regex" <<< $output; then
+ die "resolv.conf does not contain a ipv6 nameserver"
+ fi
+
+ # ipv4 cni
+ local mysubnet=$(random_rfc1918_subnet)
+ local netname=testnet-$(random_string 10)
+
+ run_podman network create --subnet $mysubnet.0/24 $netname
+ is "$output" ".*/cni/net.d/$netname.conflist" "output of 'network create'"
+
+ run_podman run --rm --network $netname $IMAGE cat /etc/resolv.conf
+ if grep -E "$ipv6_regex" <<< $output; then
+ die "resolv.conf contains a ipv6 nameserver"
+ fi
+
+ run_podman network rm -f $netname
+
+ # ipv6 cni
+ mysubnet=fd00:4:4:4:4::/64
+ netname=testnet-$(random_string 10)
+
+ run_podman network create --subnet $mysubnet $netname
+ is "$output" ".*/cni/net.d/$netname.conflist" "output of 'network create'"
+
+ run_podman run --rm --network $netname $IMAGE cat /etc/resolv.conf
+ # "is" does not like the ipv6 regex
+ if ! grep -E "$ipv6_regex" <<< $output; then
+ die "resolv.conf does not contain a ipv6 nameserver"
+ fi
+
+ run_podman network rm -f $netname
+}
+
# vim: filetype=sh
diff --git a/test/system/700-play.bats b/test/system/700-play.bats
index bcd8cf939..15f3e240a 100644
--- a/test/system/700-play.bats
+++ b/test/system/700-play.bats
@@ -88,3 +88,44 @@ RELABEL="system_u:object_r:container_file_t:s0"
fi
run_podman pod rm -f test_pod
}
+
+@test "podman play with user from image" {
+ TESTDIR=$PODMAN_TMPDIR/testdir
+ mkdir -p $TESTDIR
+
+testUserYaml="
+apiVersion: v1
+kind: Pod
+metadata:
+ labels:
+ app: test
+ name: test_pod
+spec:
+ containers:
+ - command:
+ - id
+ env:
+ - name: PATH
+ value: /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
+ - name: TERM
+ value: xterm
+ - name: container
+ value: podman
+ image: userimage
+ name: test
+ resources: {}
+status: {}
+"
+
+cat > $PODMAN_TMPDIR/Containerfile << _EOF
+from $IMAGE
+USER bin
+_EOF
+
+ echo "$testUserYaml" | sed "s|TESTDIR|${TESTDIR}|g" > $PODMAN_TMPDIR/test.yaml
+ run_podman build -t userimage $PODMAN_TMPDIR
+ run_podman play kube --start=false $PODMAN_TMPDIR/test.yaml
+ run_podman inspect --format "{{ .Config.User }}" test_pod-test
+ is "$output" bin "expect container within pod to run as the bin user"
+ run_podman pod rm -f test_pod
+}
diff --git a/transfer.md b/transfer.md
index 8ead43f63..c37592384 100644
--- a/transfer.md
+++ b/transfer.md
@@ -9,7 +9,7 @@ This document outlines useful information for ops and dev transfer as it relates
Podman is a tool for managing Pods, Containers, and Container Images. The CLI
for Podman is based on the Docker CLI, although Podman does not require a
-runtime daemon to be running in order to function.
+runtime daemon to be running in order to function. Podman also supports the Docker API via the Podman socket activated system service.
## System Tools
@@ -21,81 +21,94 @@ As well as some systemd helpers like `systemd-cgls` and `systemd-cgtop` are stil
For many troubleshooting and information collection steps, there may be an existing pattern.
Following provides equivalent with `Podman` tools for gathering information or jumping into containers, for operational use.
-| Existing Step | `Podman` (and friends) |
-| :--- | :--- |
-| `docker run` | [`podman run`](./docs/source/markdown/podman-run.1.md) |
-| `docker exec` | [`podman exec`](./docs/source/markdown/podman-exec.1.md) |
-| `docker info` | [`podman info`](./docs/source/markdown/podman-info.1.md) |
-| `docker inspect` | [`podman inspect`](./docs/source/markdown/podman-inspect.1.md) |
-| `docker logs` | [`podman logs`](./docs/source/markdown/podman-logs.1.md) |
-| `docker ps` | [`podman ps`](./docs/source/markdown/podman-ps.1.md) |
-| `docker stats`| [`podman stats`](./docs/source/markdown/podman-stats.1.md)|
-
## Development Transfer
There are other equivalents for these tools
| Existing Step | `Podman` (and friends) |
| :--- | :--- |
+| `docker ` | [`podman`](./docs/source/markdown/podman.1.md) |
| `docker attach` | [`podman attach`](./docs/source/markdown/podman-attach.1.md) |
+| `docker auto-update`|[`podman auto-update`](./docs/source/markdown/podman-auto-update.1.md)|
| `docker build` | [`podman build`](./docs/source/markdown/podman-build.1.md) |
| `docker commit` | [`podman commit`](./docs/source/markdown/podman-commit.1.md) |
-| `docker container`|[`podman container`](./docs/source/markdown/podman-container.1.md) |
+| `docker container `|[`podman container`](./docs/source/markdown/podman-container.1.md) |
+| `docker container prune`|[`podman container prune`](./docs/source/markdown/podman-container-prune.1.md) |
| `docker cp` | [`podman cp`](./docs/source/markdown/podman-cp.1.md) |
| `docker create` | [`podman create`](./docs/source/markdown/podman-create.1.md) |
| `docker diff` | [`podman diff`](./docs/source/markdown/podman-diff.1.md) |
| `docker events` | [`podman events`](./docs/source/markdown/podman-events.1.md) |
+| `docker exec` | [`podman exec`](./docs/source/markdown/podman-exec.1.md) |
| `docker export` | [`podman export`](./docs/source/markdown/podman-export.1.md) |
| `docker history` | [`podman history`](./docs/source/markdown/podman-history.1.md) |
-| `docker image` | [`podman image`](./docs/source/markdown/podman-image.1.md) |
+| `docker image` | [`podman image`](./docs/source/markdown/podman-image.1.md) |
| `docker images` | [`podman images`](./docs/source/markdown/podman-images.1.md) |
| `docker import` | [`podman import`](./docs/source/markdown/podman-import.1.md) |
+| `docker info` | [`podman info`](./docs/source/markdown/podman-info.1.md) |
+| `docker inspect` | [`podman inspect`](./docs/source/markdown/podman-inspect.1.md) |
| `docker kill` | [`podman kill`](./docs/source/markdown/podman-kill.1.md) |
| `docker load` | [`podman load`](./docs/source/markdown/podman-load.1.md) |
| `docker login` | [`podman login`](./docs/source/markdown/podman-login.1.md) |
| `docker logout` | [`podman logout`](./docs/source/markdown/podman-logout.1.md) |
-| `docker network create` | [`podman network create`](./docs/source/markdown/podman-network-create.1.md) |
-| `docker network inspect` | [`podman network inspect`](./docs/source/markdown/podman-network-inspect.1.md) |
-| `docker network ls` | [`podman network ls`](./docs/source/markdown/podman-network-ls.1.md) |
-| `docker network rm` | [`podman network rm`](./docs.source/markdown/podman-network-rm.1.md) |
+| `docker logs` | [`podman logs`](./docs/source/markdown/podman-logs.1.md) |
+| `docker manifest `| [`podman manifest`](./docs.source/markdown/podman-manifest.1.md) |
+| `docker manifest annotate` | [`podman manifest annotate`](./docs/source/markdown/podman-manifest-annotate.1.md) |
+| `docker manifest create` | [`podman manifest create`](./docs/source/markdown/podman-manifest-create.1.md) |
+| `docker manifest inspect`| [`podman manifest inspect`](./docs/source/markdown/podman-manifest-inspect.1.md) |
+| `docker manifest push` | [`podman manifest push`](./docs/source/markdown/podman-manifest-push.1.md) |
+| `docker manifest rm` | [`podman manifest rm`](./docs.source/markdown/podman-manifest-rm.1.md) |
+| `docker network ` | [`podman network`](./docs.source/markdown/podman-network.1.md) |
+| `docker network connect` | [`podman network connect`](./docs/source/markdown/podman-network-connect.1.md) |
+| `docker network create` | [`podman network create`](./docs/source/markdown/podman-network-create.1.md) |
+| `docker network disconnect`| [`podman network disconnect`](./docs/source/markdown/podman-network-disconnect.1.md)|
+| `docker network inspect` | [`podman network inspect`](./docs/source/markdown/podman-network-inspect.1.md) |
+| `docker network ls` | [`podman network ls`](./docs/source/markdown/podman-network-ls.1.md) |
+| `docker network rm` | [`podman network rm`](./docs.source/markdown/podman-network-rm.1.md) |
| `docker pause` | [`podman pause`](./docs/source/markdown/podman-pause.1.md) |
| `docker port` | [`podman port`](./docs/source/markdown/podman-port.1.md) |
| `docker ps` | [`podman ps`](./docs/source/markdown/podman-ps.1.md) |
| `docker pull` | [`podman pull`](./docs/source/markdown/podman-pull.1.md) |
| `docker push` | [`podman push`](./docs/source/markdown/podman-push.1.md) |
+| `docker rename` | [`podman rename`](./docs/source/markdown/podman-rename.1.md) |
| `docker restart` | [`podman restart`](./docs/source/markdown/podman-restart.1.md) |
| `docker rm` | [`podman rm`](./docs/source/markdown/podman-rm.1.md) |
| `docker rmi` | [`podman rmi`](./docs/source/markdown/podman-rmi.1.md) |
| `docker run` | [`podman run`](./docs/source/markdown/podman-run.1.md) |
| `docker save` | [`podman save`](./docs/source/markdown/podman-save.1.md) |
| `docker search` | [`podman search`](./docs/source/markdown/podman-search.1.md) |
+| `docker secret ` | [`podman secret`](./docs/source/markdown/podman-secret.1.md) |
+| `docker secret create` | [`podman secret`](./docs/source/markdown/podman-secret-create.1.md) |
+| `docker secret inspect` | [`podman secret`](./docs/source/markdown/podman-secret-inspect.1.md)|
+| `docker secret ls` | [`podman secret`](./docs/source/markdown/podman-secret-ls.1.md)|
+| `docker secret rm` | [`podman secret`](./docs/source/markdown/podman-secret-rm.1.md)|
+| `docker service` | [`podman service`](./docs/source/markdown/podman-service.1.md) |
| `docker start` | [`podman start`](./docs/source/markdown/podman-start.1.md) |
+| `docker stats` | [`podman stats`](./docs/source/markdown/podman-stats.1.md) |
| `docker stop` | [`podman stop`](./docs/source/markdown/podman-stop.1.md) |
-| `docker system df` | [`podman system df`](./docs/source/markdown/podman-system-df.1.md) |
-| `docker system info` | [`podman system info`](./docs/source/markdown/podman-system-info.1.md) |
-| `docker system prune` | [`podman system prune`](./docs/source/markdown/podman-system-prune.1.md) |
-| `docker system` | [`podman system`](./docs/source/markdown/podman-system.1.md) |
+| `docker system ` | [`podman system`](./docs/source/markdown/podman-system.1.md) |
+| `docker system df` | [`podman system df`](./docs/source/markdown/podman-system-df.1.md) |
+| `docker system info` | [`podman system info`](./docs/source/markdown/podman-system-info.1.md) |
+| `docker system prune` | [`podman system prune`](./docs/source/markdown/podman-system-prune.1.md)|
| `docker tag` | [`podman tag`](./docs/source/markdown/podman-tag.1.md) |
| `docker top` | [`podman top`](./docs/source/markdown/podman-top.1.md) |
| `docker unpause` | [`podman unpause`](./docs/source/markdown/podman-unpause.1.md) |
| `docker version` | [`podman version`](./docs/source/markdown/podman-version.1.md) |
+| `docker volume `| [`podman volume`](./docs/source/markdown/podman-volume.1.md) |
| `docker volume create` | [`podman volume create`](./docs/source/markdown/podman-volume-create.1.md) |
| `docker volume inspect`| [`podman volume inspect`](./docs/source/markdown/podman-volume-inspect.1.md)|
| `docker volume ls` | [`podman volume ls`](./docs/source/markdown/podman-volume-ls.1.md) |
| `docker volume prune` | [`podman volume prune`](./docs/source/markdown/podman-volume-prune.1.md) |
| `docker volume rm` | [`podman volume rm`](./docs/source/markdown/podman-volume-rm.1.md) |
-| `docker volume` | [`podman volume`](./docs/source/markdown/podman-volume.1.md) |
| `docker wait` | [`podman wait`](./docs/source/markdown/podman-wait.1.md) |
-**** Use mount to take advantage of the entire linux tool chain rather then just cp. Read [`here`](./docs/podman-cp.1.md) for more information.
-
## Behavioural differences and pitfalls
These commands behave differently from the commands in Docker:
| Command | Description |
| :--- | :--- |
-| `podman volume create` | While `docker volume create` is idempotent, `podman volume create` raises an error if the volume does already exist. See this [docker issue](https://github.com/moby/moby/issues/16068) for more information.|
+| `podman volume create` | While `docker volume create` is idempotent, `podman volume create` raises an error if the volume does already exist. See this [docker issue](https://github.com/moby/moby/issues/16068) for more information.|
+| `podman run -v /tmp/noexist:/tmp ...` | While `docker run -v /tmp/noexist:/tmp` will create non existing volumes on the host, Podman will report that the volume does not exist. The Podman team sees this as a bug in Docker.|
## Missing commands in podman
@@ -103,14 +116,17 @@ Those Docker commands currently do not have equivalents in `podman`:
| Missing command | Description|
| :--- | :--- |
-| `docker container rename` | podman does not support `container rename` - or the `rename` shorthand. We recommend using `podman rm` and `podman create` to create a container with a specific name.|
+| `docker builder` ||
+| `docker buildx` ||
+| `docker config` ||
+| `docker context` ||
| `docker container update` | podman does not support altering running containers. We recommend recreating containers with the correct arguments.|
| `docker node` ||
| `docker plugin` | podman does not support plugins. We recommend you use alternative OCI Runtimes or OCI Runtime Hooks to alter behavior of podman.|
-| `docker secret` ||
-| `docker service` ||
| `docker stack` ||
| `docker swarm` | podman does not support swarm. We support Kubernetes for orchestration using [CRI-O](https://github.com/cri-o/cri-o).|
+| `docker trust` |[`podman image trust`](./docs/source/markdown/podman-image-trust.1.md) |
+| `docker update` ||
## Missing commands in Docker
@@ -122,26 +138,62 @@ The following podman commands do not have a Docker equivalent:
* [`podman container refresh`](/docs/source/markdown/podman-container-refresh.1.md)
* [`podman container restore`](/docs/source/markdown/podman-container-restore.1.md)
* [`podman container runlabel`](/docs/source/markdown/podman-container-runlabel.1.md)
+* [`podman generate `](./docs/source/markdown/podman-generate.1.md)
* [`podman generate kube`](./docs/source/markdown/podman-generate-kube.1.md)
-* [`podman generate`](./docs/source/markdown/podman-generate.1.md)
-* [`podman healthcheck run`](/docs/source/markdown/podman-healthcheck-run.1.md)
+* [`podman generate systemd`](./docs/source/markdown/podman-generate-systemd.1.md)
+* [`podman healthcheck `](/docs/source/markdown/podmanh-healthcheck.1.md)
+* [`podman healthcheck run`](/docs/source/markdown/podmanh-healthcheck-run.1.md)
+* [`podman image diff`](./docs/source/markdown/podman-image-diff.1.md)
* [`podman image exists`](./docs/source/markdown/podman-image-exists.1.md)
+* [`podman image mount`](./docs/source/markdown/podman-image-mount.1.md)
+* [`podman image prune`](./docs/source/markdown/podman-image-prune.1.md)
* [`podman image sign`](./docs/source/markdown/podman-image-sign.1.md)
+* [`podman image tree`](./docs/source/markdown/podman-image-tree.1.md)
* [`podman image trust`](./docs/source/markdown/podman-image-trust.1.md)
+* [`podman image unmount`](./docs/source/markdown/podman-image-unmount.1.md)
+* [`podman init`](./docs/source/markdown/podman-init.1.md)
+* [`podman machine `](./docs/source/markdown/podman-machine.1.md)
+* [`podman machine init`](./docs/source/markdown/podman-machine-init.1.md)
+* [`podman machine list`](./docs/source/markdown/podman-machine-list.1.md)
+* [`podman machine rm`](./docs/source/markdown/podman-machine-rm.1.md)
+* [`podman machine ssh`](./docs/source/markdown/podman-machine-ssh.1.md)
+* [`podman machine start`](./docs/source/markdown/podman-machine-start.1.md)
+* [`podman machine stop`](./docs/source/markdown/podman-machine-stop.1.md)
+* [`podman manifest add`](./docs/source/markdown/podman-manifest-add.1.md)
+* [`podman manifest exists`](./docs/source/markdown/podman-manifest-exists.1.md)
+* [`podman manifest remove`](./docs/source/markdown/podman-manifest-remove.1.md)
* [`podman mount`](./docs/source/markdown/podman-mount.1.md)
+* [`podman network exists`](./docs/source/markdown/podman-network-exists.1.md)
+* [`podman network prune`](./docs/source/markdown/podman-network-prune.1.md)
+* [`podman network reload`](./docs/source/markdown/podman-network-reload.1.md)
+* [`podman play `](./docs/source/markdown/podman-play.1.md)
* [`podman play kube`](./docs/source/markdown/podman-play-kube.1.md)
-* [`podman play`](./docs/source/markdown/podman-play.1.md)
+* [`podman pod `](./docs/source/markdown/podman-pod.1.md)
* [`podman pod create`](./docs/source/markdown/podman-pod-create.1.md)
* [`podman pod exists`](./docs/source/markdown/podman-pod-exists.1.md)
* [`podman pod inspect`](./docs/source/markdown/podman-pod-inspect.1.md)
* [`podman pod kill`](./docs/source/markdown/podman-pod-kill.1.md)
* [`podman pod pause`](./docs/source/markdown/podman-pod-pause.1.md)
+* [`podman pod prune`](./docs/source/markdown/podman-pod-prune.1.md)
* [`podman pod ps`](./docs/source/markdown/podman-pod-ps.1.md)
* [`podman pod restart`](./docs/source/markdown/podman-pod-restart.1.md)
* [`podman pod rm`](./docs/source/markdown/podman-pod-rm.1.md)
* [`podman pod start`](./docs/source/markdown/podman-pod-start.1.md)
+* [`podman pod stats`](./docs/source/markdown/podman-pod-stats.1.md)
* [`podman pod stop`](./docs/source/markdown/podman-pod-stop.1.md)
* [`podman pod top`](./docs/source/markdown/podman-pod-top.1.md)
* [`podman pod unpause`](./docs/source/markdown/podman-pod-unpause.1.md)
-* [`podman pod`](./docs/source/markdown/podman-pod.1.md)
+* [`podman system connection `](./docs/source/markdown/podman-system-connection.1.md)
+* [`podman system connection add`](./docs/source/markdown/podman-system-connection-add.1.md)
+* [`podman system connection default`](./docs/source/markdown/podman-system-connection-default.1.md)
+* [`podman system connection list`](./docs/source/markdown/podman-system-connection-list.1.md)
+* [`podman system connection remove`](./docs/source/markdown/podman-system-connection-remove.1.md)
+* [`podman system connection rename`](./docs/source/markdown/podman-system-connection-rename.1.md)
+* [`podman system migrate`](./docs/source/markdown/podman-system-connection-migrate.1.md)
+* [`podman system renumber`](./docs/source/markdown/podman-system-connection-renumber.1.md)
+* [`podman system reset`](./docs/source/markdown/podman-system-connection-reset.1.md)
+* [`podman system service`](./docs/source/markdown/podman-system-connection-service.1.md)
* [`podman umount`](./docs/source/markdown/podman-umount.1.md)
+* [`podman unshare`](./docs/source/markdown/podman-unshare.1.md)
+* [`podman untag`](./docs/source/markdown/podman-untag.1.md)
+* [`podman volume exists`](./docs/source/markdown/podman-volume-exists.1.md)
diff --git a/troubleshooting.md b/troubleshooting.md
index e320f20e7..ab9fffeb3 100644
--- a/troubleshooting.md
+++ b/troubleshooting.md
@@ -697,3 +697,32 @@ limits.
This can happen when running a container from an image for another architecture than the one you are running on.
For example, if a remote repository only has, and thus send you, a `linux/arm64` _OS/ARCH_ but you run on `linux/amd64` (as happened in https://github.com/openMF/community-app/issues/3323 due to https://github.com/timbru31/docker-ruby-node/issues/564).
+
+### 27) `Error: failed to create sshClient: Connection to bastion host (ssh://user@host:22/run/user/.../podman/podman.sock) failed.: ssh: handshake failed: ssh: unable to authenticate, attempted methods [none publickey], no supported methods remain`
+
+In some situations where the client is not on the same machine as where the podman daemon is running the client key could be using a cipher not supported by the host. This indicates an issue with one's SSH config. Until remedied using podman over ssh
+with a pre-shared key will be impossible.
+
+#### Symptom
+
+The accepted ciphers per `/etc/crypto-policies/back-ends/openssh.config` are not one that was used to create the public/private key pair that was transferred over to the host for ssh authentication.
+
+You can confirm this is the case by attempting to connect to the host via `podman-remote info` from the client and simultaneously on the host running `journalctl -f` and watching for the error `userauth_pubkey: key type ssh-rsa not in PubkeyAcceptedAlgorithms [preauth]`.
+
+#### Solution
+
+Create a new key using a supported algorithm e.g. ecdsa:
+
+`ssh-keygen -t ecdsa -f ~/.ssh/podman`
+
+Then copy the new id over:
+
+`ssh-copy-id -i ~/.ssh/podman.pub user@host`
+
+And then re-add the connection (removing the old one if necessary):
+
+`podman-remote system connection add myuser --identity ~/.ssh/podman ssh://user@host/run/user/1000/podman/podman.sock`
+
+And now this should work:
+
+`podman-remote info`
diff --git a/vendor/github.com/willf/bitset/.gitignore b/vendor/github.com/bits-and-blooms/bitset/.gitignore
index 5c204d28b..5c204d28b 100644
--- a/vendor/github.com/willf/bitset/.gitignore
+++ b/vendor/github.com/bits-and-blooms/bitset/.gitignore
diff --git a/vendor/github.com/willf/bitset/.travis.yml b/vendor/github.com/bits-and-blooms/bitset/.travis.yml
index 094aa5ce0..094aa5ce0 100644
--- a/vendor/github.com/willf/bitset/.travis.yml
+++ b/vendor/github.com/bits-and-blooms/bitset/.travis.yml
diff --git a/vendor/github.com/willf/bitset/LICENSE b/vendor/github.com/bits-and-blooms/bitset/LICENSE
index 59cab8a93..59cab8a93 100644
--- a/vendor/github.com/willf/bitset/LICENSE
+++ b/vendor/github.com/bits-and-blooms/bitset/LICENSE
diff --git a/vendor/github.com/willf/bitset/README.md b/vendor/github.com/bits-and-blooms/bitset/README.md
index 50338e71d..97e83071e 100644
--- a/vendor/github.com/willf/bitset/README.md
+++ b/vendor/github.com/bits-and-blooms/bitset/README.md
@@ -2,10 +2,9 @@
*Go language library to map between non-negative integers and boolean values*
-[![Test](https://github.com/willf/bitset/workflows/Test/badge.svg)](https://github.com/willf/bitset/actions?query=workflow%3ATest)
-[![Master Coverage Status](https://coveralls.io/repos/willf/bitset/badge.svg?branch=master&service=github)](https://coveralls.io/github/willf/bitset?branch=master)
+[![Test](https://github.com/bits-and-blooms/bitset/workflows/Test/badge.svg)](https://github.com/willf/bitset/actions?query=workflow%3ATest)
[![Go Report Card](https://goreportcard.com/badge/github.com/willf/bitset)](https://goreportcard.com/report/github.com/willf/bitset)
-[![PkgGoDev](https://pkg.go.dev/badge/github.com/willf/bitset?tab=doc)](https://pkg.go.dev/github.com/willf/bitset?tab=doc)
+[![PkgGoDev](https://pkg.go.dev/badge/github.com/bits-and-blooms/bitset?tab=doc)](https://pkg.go.dev/github.com/bits-and-blooms/bitset?tab=doc)
## Description
@@ -30,7 +29,7 @@ import (
"fmt"
"math/rand"
- "github.com/willf/bitset"
+ "github.com/bits-and-blooms/bitset"
)
func main() {
@@ -63,7 +62,7 @@ func main() {
As an alternative to BitSets, one should check out the 'big' package, which provides a (less set-theoretical) view of bitsets.
-Package documentation is at: https://pkg.go.dev/github.com/willf/bitset?tab=doc
+Package documentation is at: https://pkg.go.dev/github.com/bits-and-blooms/bitset?tab=doc
## Memory Usage
@@ -78,7 +77,7 @@ It is possible that a later version will match the `math/bits` return signature
## Installation
```bash
-go get github.com/willf/bitset
+go get github.com/bits-and-blooms/bitset
```
## Contributing
diff --git a/vendor/github.com/willf/bitset/azure-pipelines.yml b/vendor/github.com/bits-and-blooms/bitset/azure-pipelines.yml
index f9b295918..f9b295918 100644
--- a/vendor/github.com/willf/bitset/azure-pipelines.yml
+++ b/vendor/github.com/bits-and-blooms/bitset/azure-pipelines.yml
diff --git a/vendor/github.com/willf/bitset/bitset.go b/vendor/github.com/bits-and-blooms/bitset/bitset.go
index 21e889da2..d688806a5 100644
--- a/vendor/github.com/willf/bitset/bitset.go
+++ b/vendor/github.com/bits-and-blooms/bitset/bitset.go
@@ -209,6 +209,27 @@ func (b *BitSet) Flip(i uint) *BitSet {
return b
}
+// FlipRange bit in [start, end).
+// If end>= Cap(), this function will panic.
+// Warning: using a very large value for 'end'
+// may lead to a memory shortage and a panic: the caller is responsible
+// for providing sensible parameters in line with their memory capacity.
+func (b *BitSet) FlipRange(start, end uint) *BitSet {
+ if start >= end {
+ return b
+ }
+
+ b.extendSetMaybe(end - 1)
+ var startWord uint = start >> log2WordSize
+ var endWord uint = end >> log2WordSize
+ b.set[startWord] ^= ^(^uint64(0) << (start & (wordSize - 1)))
+ for i := startWord; i < endWord; i++ {
+ b.set[i] = ^b.set[i]
+ }
+ b.set[endWord] ^= ^uint64(0) >> (-end & (wordSize - 1))
+ return b
+}
+
// Shrink shrinks BitSet so that the provided value is the last possible
// set value. It clears all bits > the provided index and reduces the size
// and length of the set.
@@ -519,7 +540,7 @@ func (b *BitSet) Copy(c *BitSet) (count uint) {
}
// Count (number of set bits).
-// Also known as "popcount" or "popularity count".
+// Also known as "popcount" or "population count".
func (b *BitSet) Count() uint {
if b != nil && b.set != nil {
return uint(popcntSlice(b.set))
diff --git a/vendor/github.com/bits-and-blooms/bitset/go.mod b/vendor/github.com/bits-and-blooms/bitset/go.mod
new file mode 100644
index 000000000..c43e4522b
--- /dev/null
+++ b/vendor/github.com/bits-and-blooms/bitset/go.mod
@@ -0,0 +1,3 @@
+module github.com/bits-and-blooms/bitset
+
+go 1.14
diff --git a/vendor/github.com/willf/bitset/go.sum b/vendor/github.com/bits-and-blooms/bitset/go.sum
index e69de29bb..e69de29bb 100644
--- a/vendor/github.com/willf/bitset/go.sum
+++ b/vendor/github.com/bits-and-blooms/bitset/go.sum
diff --git a/vendor/github.com/willf/bitset/popcnt.go b/vendor/github.com/bits-and-blooms/bitset/popcnt.go
index 76577a838..76577a838 100644
--- a/vendor/github.com/willf/bitset/popcnt.go
+++ b/vendor/github.com/bits-and-blooms/bitset/popcnt.go
diff --git a/vendor/github.com/willf/bitset/popcnt_19.go b/vendor/github.com/bits-and-blooms/bitset/popcnt_19.go
index fc8ff4f36..fc8ff4f36 100644
--- a/vendor/github.com/willf/bitset/popcnt_19.go
+++ b/vendor/github.com/bits-and-blooms/bitset/popcnt_19.go
diff --git a/vendor/github.com/willf/bitset/popcnt_amd64.go b/vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.go
index 4cf64f24a..4cf64f24a 100644
--- a/vendor/github.com/willf/bitset/popcnt_amd64.go
+++ b/vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.go
diff --git a/vendor/github.com/willf/bitset/popcnt_amd64.s b/vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.s
index 666c0dcc1..666c0dcc1 100644
--- a/vendor/github.com/willf/bitset/popcnt_amd64.s
+++ b/vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.s
diff --git a/vendor/github.com/willf/bitset/popcnt_generic.go b/vendor/github.com/bits-and-blooms/bitset/popcnt_generic.go
index 21e0ff7b4..21e0ff7b4 100644
--- a/vendor/github.com/willf/bitset/popcnt_generic.go
+++ b/vendor/github.com/bits-and-blooms/bitset/popcnt_generic.go
diff --git a/vendor/github.com/willf/bitset/trailing_zeros_18.go b/vendor/github.com/bits-and-blooms/bitset/trailing_zeros_18.go
index c52b61be9..c52b61be9 100644
--- a/vendor/github.com/willf/bitset/trailing_zeros_18.go
+++ b/vendor/github.com/bits-and-blooms/bitset/trailing_zeros_18.go
diff --git a/vendor/github.com/willf/bitset/trailing_zeros_19.go b/vendor/github.com/bits-and-blooms/bitset/trailing_zeros_19.go
index 36a988e71..36a988e71 100644
--- a/vendor/github.com/willf/bitset/trailing_zeros_19.go
+++ b/vendor/github.com/bits-and-blooms/bitset/trailing_zeros_19.go
diff --git a/vendor/github.com/containers/common/libimage/image.go b/vendor/github.com/containers/common/libimage/image.go
index 1f76d4ae5..de0b4b2c5 100644
--- a/vendor/github.com/containers/common/libimage/image.go
+++ b/vendor/github.com/containers/common/libimage/image.go
@@ -658,25 +658,6 @@ func (i *Image) Unmount(force bool) error {
return err
}
-// MountPoint returns the fully-evaluated mount point of the image. If the
-// image isn't mounted, an empty string is returned.
-func (i *Image) MountPoint() (string, error) {
- counter, err := i.runtime.store.Mounted(i.TopLayer())
- if err != nil {
- return "", err
- }
-
- if counter == 0 {
- return "", nil
- }
-
- layer, err := i.runtime.store.Layer(i.TopLayer())
- if err != nil {
- return "", err
- }
- return filepath.EvalSymlinks(layer.MountPoint)
-}
-
// Size computes the size of the image layers and associated data.
func (i *Image) Size() (int64, error) {
return i.runtime.store.ImageSize(i.ID())
diff --git a/vendor/github.com/containers/common/libimage/normalize.go b/vendor/github.com/containers/common/libimage/normalize.go
index 03d2456de..bfea807c8 100644
--- a/vendor/github.com/containers/common/libimage/normalize.go
+++ b/vendor/github.com/containers/common/libimage/normalize.go
@@ -5,6 +5,7 @@ import (
"github.com/containers/image/v5/docker/reference"
"github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
)
// NormalizeName normalizes the provided name according to the conventions by
@@ -40,6 +41,11 @@ func NormalizeName(name string) (reference.Named, error) {
}
if _, hasTag := named.(reference.NamedTagged); hasTag {
+ // Strip off the tag of a tagged and digested reference.
+ named, err = normalizeTaggedDigestedNamed(named)
+ if err != nil {
+ return nil, err
+ }
return named, nil
}
if _, hasDigest := named.(reference.Digested); hasDigest {
@@ -90,3 +96,48 @@ func ToNameTagPairs(repoTags []reference.Named) ([]NameTagPair, error) {
}
return pairs, nil
}
+
+// normalizeTaggedDigestedString strips the tag off the specified string iff it
+// is tagged and digested. Note that the tag is entirely ignored to match
+// Docker behavior.
+func normalizeTaggedDigestedString(s string) (string, error) {
+ // Note that the input string is not expected to be parseable, so we
+ // return it verbatim in error cases.
+ ref, err := reference.Parse(s)
+ if err != nil {
+ return "", err
+ }
+ named, ok := ref.(reference.Named)
+ if !ok {
+ return s, nil
+ }
+ named, err = normalizeTaggedDigestedNamed(named)
+ if err != nil {
+ return "", err
+ }
+ return named.String(), nil
+}
+
+// normalizeTaggedDigestedNamed strips the tag off the specified named
+// reference iff it is tagged and digested. Note that the tag is entirely
+// ignored to match Docker behavior.
+func normalizeTaggedDigestedNamed(named reference.Named) (reference.Named, error) {
+ _, isTagged := named.(reference.NamedTagged)
+ if !isTagged {
+ return named, nil
+ }
+ digested, isDigested := named.(reference.Digested)
+ if !isDigested {
+ return named, nil
+ }
+
+ // Now strip off the tag.
+ newNamed := reference.TrimNamed(named)
+ // And re-add the digest.
+ newNamed, err := reference.WithDigest(newNamed, digested.Digest())
+ if err != nil {
+ return named, err
+ }
+ logrus.Debugf("Stripped off tag from tagged and digested reference %q", named.String())
+ return newNamed, nil
+}
diff --git a/vendor/github.com/containers/common/libimage/pull.go b/vendor/github.com/containers/common/libimage/pull.go
index d93715279..acf5c818f 100644
--- a/vendor/github.com/containers/common/libimage/pull.go
+++ b/vendor/github.com/containers/common/libimage/pull.go
@@ -52,6 +52,7 @@ func (r *Runtime) Pull(ctx context.Context, name string, pullPolicy config.PullP
options = &PullOptions{}
}
+ var possiblyUnqualifiedName string // used for short-name resolution
ref, err := alltransports.ParseImageName(name)
if err != nil {
// If the image clearly refers to a local one, we can look it up directly.
@@ -67,6 +68,15 @@ func (r *Runtime) Pull(ctx context.Context, name string, pullPolicy config.PullP
return []*Image{local}, err
}
+ // Docker compat: strip off the tag iff name is tagged and digested
+ // (e.g., fedora:latest@sha256...). In that case, the tag is stripped
+ // off and entirely ignored. The digest is the sole source of truth.
+ normalizedName, normalizeError := normalizeTaggedDigestedString(name)
+ if normalizeError != nil {
+ return nil, normalizeError
+ }
+ name = normalizedName
+
// If the input does not include a transport assume it refers
// to a registry.
dockerRef, dockerErr := alltransports.ParseImageName("docker://" + name)
@@ -74,6 +84,17 @@ func (r *Runtime) Pull(ctx context.Context, name string, pullPolicy config.PullP
return nil, err
}
ref = dockerRef
+ possiblyUnqualifiedName = name
+ } else if ref.Transport().Name() == registryTransport.Transport.Name() {
+ // Normalize the input if we're referring to the docker
+ // transport directly. That makes sure that a `docker://fedora`
+ // will resolve directly to `docker.io/library/fedora:latest`
+ // and not be subject to short-name resolution.
+ named := ref.DockerReference()
+ if named == nil {
+ return nil, errors.New("internal error: unexpected nil reference")
+ }
+ possiblyUnqualifiedName = named.String()
}
if options.AllTags && ref.Transport().Name() != registryTransport.Transport.Name() {
@@ -94,7 +115,7 @@ func (r *Runtime) Pull(ctx context.Context, name string, pullPolicy config.PullP
// DOCKER REGISTRY
case registryTransport.Transport.Name():
- pulledImages, pullError = r.copyFromRegistry(ctx, ref, strings.TrimPrefix(name, "docker://"), pullPolicy, options)
+ pulledImages, pullError = r.copyFromRegistry(ctx, ref, possiblyUnqualifiedName, pullPolicy, options)
// DOCKER ARCHIVE
case dockerArchiveTransport.Transport.Name():
@@ -279,6 +300,7 @@ func (r *Runtime) copyFromRegistry(ctx context.Context, ref types.ImageReference
return r.copySingleImageFromRegistry(ctx, inputName, pullPolicy, options)
}
+ // Copy all tags
named := reference.TrimNamed(ref.DockerReference())
tags, err := registryTransport.GetRepositoryTags(ctx, &r.systemContext, ref)
if err != nil {
diff --git a/vendor/github.com/containers/common/libimage/runtime.go b/vendor/github.com/containers/common/libimage/runtime.go
index 422cd6069..bbf1c2a61 100644
--- a/vendor/github.com/containers/common/libimage/runtime.go
+++ b/vendor/github.com/containers/common/libimage/runtime.go
@@ -180,6 +180,15 @@ func (r *Runtime) LookupImage(name string, options *LookupImageOptions) (*Image,
}
logrus.Debugf("Found image %q in local containers storage (%s)", name, storageRef.StringWithinTransport())
return r.storageToImage(img, storageRef), "", nil
+ } else {
+ // Docker compat: strip off the tag iff name is tagged and digested
+ // (e.g., fedora:latest@sha256...). In that case, the tag is stripped
+ // off and entirely ignored. The digest is the sole source of truth.
+ normalizedName, err := normalizeTaggedDigestedString(name)
+ if err != nil {
+ return nil, "", err
+ }
+ name = normalizedName
}
originalName := name
@@ -516,8 +525,9 @@ type RemoveImagesOptions struct {
WithSize bool
}
-// RemoveImages removes images specified by names. All images are expected to
-// exist in the local containers storage.
+// RemoveImages removes images specified by names. If no names are specified,
+// remove images as specified via the options' filters. All images are
+// expected to exist in the local containers storage.
//
// If an image has more names than one name, the image will be untagged with
// the specified name. RemoveImages returns a slice of untagged and removed
@@ -557,6 +567,9 @@ func (r *Runtime) RemoveImages(ctx context.Context, names []string, options *Rem
// orderedIDs and the deleteMap.
switch {
case len(names) > 0:
+ // Look up the images one-by-one. That allows for removing
+ // images that have been looked up successfully while reporting
+ // lookup errors at the end.
lookupOptions := LookupImageOptions{IgnorePlatform: true}
for _, name := range names {
img, resolvedName, err := r.LookupImage(name, &lookupOptions)
diff --git a/vendor/github.com/containers/common/pkg/config/config.go b/vendor/github.com/containers/common/pkg/config/config.go
index 9ac71c6c8..edd52f49d 100644
--- a/vendor/github.com/containers/common/pkg/config/config.go
+++ b/vendor/github.com/containers/common/pkg/config/config.go
@@ -150,6 +150,11 @@ type ContainersConfig struct {
// PidNS indicates how to create a pid namespace for the container
PidNS string `toml:"pidns,omitempty"`
+ // RootlessNetworking depicts the "kind" of networking for rootless
+ // containers. Valid options are `slirp4netns` and `cni`. Default is
+ // `slirp4netns`
+ RootlessNetworking string `toml:"rootless_networking,omitempty"`
+
// SeccompProfile is the seccomp.json profile path which is used as the
// default for the runtime.
SeccompProfile string `toml:"seccomp_profile,omitempty"`
diff --git a/vendor/github.com/containers/common/pkg/config/containers.conf b/vendor/github.com/containers/common/pkg/config/containers.conf
index 8770ebda0..d9b379eae 100644
--- a/vendor/github.com/containers/common/pkg/config/containers.conf
+++ b/vendor/github.com/containers/common/pkg/config/containers.conf
@@ -389,6 +389,9 @@ default_sysctls = [
# `podman --remote=true` for access to the remote Podman service.
# remote = false
+# Indicates the networking to be used for rootless containers
+# rootless_networking="slirp4netns"
+
# Directory for persistent engine files (database, etc)
# By default, this will be configured relative to where the containers/storage
# stores containers
diff --git a/vendor/github.com/containers/common/pkg/config/default.go b/vendor/github.com/containers/common/pkg/config/default.go
index 04aaac94d..d5a7d5b84 100644
--- a/vendor/github.com/containers/common/pkg/config/default.go
+++ b/vendor/github.com/containers/common/pkg/config/default.go
@@ -82,6 +82,10 @@ var (
"/usr/local/lib/cni",
"/opt/cni/bin",
}
+
+ // DefaultRootlessNetwork is the kind of of rootless networking
+ // for containers
+ DefaultRootlessNetwork = "slirp4netns"
)
const (
@@ -186,24 +190,25 @@ func DefaultConfig() (*Config, error) {
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"TERM=xterm",
},
- EnvHost: false,
- HTTPProxy: true,
- Init: false,
- InitPath: "",
- IPCNS: "private",
- LogDriver: DefaultLogDriver,
- LogSizeMax: DefaultLogSizeMax,
- NetNS: netns,
- NoHosts: false,
- PidsLimit: DefaultPidsLimit,
- PidNS: "private",
- SeccompProfile: SeccompDefaultPath,
- ShmSize: DefaultShmSize,
- TZ: "",
- Umask: "0022",
- UTSNS: "private",
- UserNS: "host",
- UserNSSize: DefaultUserNSSize,
+ EnvHost: false,
+ HTTPProxy: true,
+ Init: false,
+ InitPath: "",
+ IPCNS: "private",
+ LogDriver: defaultLogDriver(),
+ LogSizeMax: DefaultLogSizeMax,
+ NetNS: netns,
+ NoHosts: false,
+ PidsLimit: DefaultPidsLimit,
+ PidNS: "private",
+ RootlessNetworking: DefaultRootlessNetwork,
+ SeccompProfile: SeccompDefaultPath,
+ ShmSize: DefaultShmSize,
+ TZ: "",
+ Umask: "0022",
+ UTSNS: "private",
+ UserNS: "host",
+ UserNSSize: DefaultUserNSSize,
},
Network: NetworkConfig{
DefaultNetwork: "podman",
@@ -410,9 +415,6 @@ func probeConmon(conmonBinary string) error {
// NetNS returns the default network namespace
func (c *Config) NetNS() string {
- if c.Containers.NetNS == "private" && unshare.IsRootless() {
- return "slirp4netns"
- }
return c.Containers.NetNS
}
@@ -544,3 +546,9 @@ func (c *Config) LogDriver() string {
func (c *Config) MachineEnabled() bool {
return c.Engine.MachineEnabled
}
+
+// RootlessNetworking returns the "kind" of networking
+// rootless containers should use
+func (c *Config) RootlessNetworking() string {
+ return c.Containers.RootlessNetworking
+}
diff --git a/vendor/github.com/containers/common/pkg/config/nosystemd.go b/vendor/github.com/containers/common/pkg/config/nosystemd.go
index 5b82b1389..6e39a6ccd 100644
--- a/vendor/github.com/containers/common/pkg/config/nosystemd.go
+++ b/vendor/github.com/containers/common/pkg/config/nosystemd.go
@@ -3,9 +3,17 @@
package config
func defaultCgroupManager() string {
- return "cgroupfs"
+ return CgroupfsCgroupsManager
}
func defaultEventsLogger() string {
return "file"
}
+
+func defaultLogDriver() string {
+ return DefaultLogDriver
+}
+
+func useSystemd() bool {
+ return false
+}
diff --git a/vendor/github.com/containers/common/pkg/config/systemd.go b/vendor/github.com/containers/common/pkg/config/systemd.go
index 02e5c4ac2..ed014126b 100644
--- a/vendor/github.com/containers/common/pkg/config/systemd.go
+++ b/vendor/github.com/containers/common/pkg/config/systemd.go
@@ -3,11 +3,23 @@
package config
import (
+ "io/ioutil"
+ "strings"
+ "sync"
+
"github.com/containers/common/pkg/cgroupv2"
"github.com/containers/storage/pkg/unshare"
)
+var (
+ systemdOnce sync.Once
+ usesSystemd bool
+)
+
func defaultCgroupManager() string {
+ if !useSystemd() {
+ return CgroupfsCgroupsManager
+ }
enabled, err := cgroupv2.Enabled()
if err == nil && !enabled && unshare.IsRootless() {
return CgroupfsCgroupsManager
@@ -15,6 +27,32 @@ func defaultCgroupManager() string {
return SystemdCgroupsManager
}
+
func defaultEventsLogger() string {
- return "journald"
+ if useSystemd() {
+ return "journald"
+ }
+ return "file"
+}
+
+func defaultLogDriver() string {
+ // If we decide to change the default for logdriver, it should be done here.
+ if useSystemd() {
+ return DefaultLogDriver
+ }
+
+ return DefaultLogDriver
+
+}
+
+func useSystemd() bool {
+ systemdOnce.Do(func() {
+ dat, err := ioutil.ReadFile("/proc/1/comm")
+ if err == nil {
+ val := strings.TrimSuffix(string(dat), "\n")
+ usesSystemd = (val == "systemd")
+ }
+ return
+ })
+ return usesSystemd
}
diff --git a/vendor/github.com/containers/common/pkg/defaultnet/default_network.go b/vendor/github.com/containers/common/pkg/defaultnet/default_network.go
new file mode 100644
index 000000000..9b32241d6
--- /dev/null
+++ b/vendor/github.com/containers/common/pkg/defaultnet/default_network.go
@@ -0,0 +1,222 @@
+package defaultnet
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "os"
+ "path/filepath"
+ "regexp"
+ "text/template"
+
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+// TODO: A smarter implementation would make sure cni-podman0 was unused before
+// making the default, and adjust if necessary
+const networkTemplate = `{
+ "cniVersion": "0.4.0",
+ "name": "{{{{.Name}}}}",
+ "plugins": [
+ {
+ "type": "bridge",
+ "bridge": "cni-podman0",
+ "isGateway": true,
+ "ipMasq": true,
+ "hairpinMode": true,
+ "ipam": {
+ "type": "host-local",
+ "routes": [{ "dst": "0.0.0.0/0" }],
+ "ranges": [
+ [
+ {
+ "subnet": "{{{{.Subnet}}}}",
+ "gateway": "{{{{.Gateway}}}}"
+ }
+ ]
+ ]
+ }
+ },
+{{{{- if (eq .Machine true) }}}}
+ {
+ "type": "podman-machine",
+ "capabilities": {
+ "portMappings": true
+ }
+ },
+{{{{- end}}}}
+ {
+ "type": "portmap",
+ "capabilities": {
+ "portMappings": true
+ }
+ },
+ {
+ "type": "firewall"
+ },
+ {
+ "type": "tuning"
+ }
+ ]
+}
+`
+
+var (
+ // Borrowed from Podman, modified to remove dashes and periods.
+ nameRegex = regexp.MustCompile("^[a-zA-Z0-9][a-zA-Z0-9_]*$")
+)
+
+// Used to pass info into the template engine
+type networkInfo struct {
+ Name string
+ Subnet string
+ Gateway string
+ Machine bool
+}
+
+// The most trivial definition of a CNI network possible for our use here.
+// We need the name, and nothing else.
+type network struct {
+ Name string `json:"name"`
+}
+
+// Create makes the CNI default network, if necessary.
+// Accepts the name and subnet of the network to create (a standard template
+// will be used, with these values plugged in), the configuration directory
+// where CNI configs are stored (to verify if a named configuration already
+// exists), an exists directory (where a sentinel file will be stored, to ensure
+// the network is only made once), and an isMachine bool (to determine whether
+// the machine block will be added to the config).
+// Create first checks if a default network has already been created via the
+// presence of a sentinel file. If it does exist, it returns immediately without
+// error.
+// It next checks if a CNI network with the given name already exists. In that
+// case, it creates the sentinel file and returns without error.
+// If neither of these are true, the default network is created.
+func Create(name, subnet, configDir, existsDir string, isMachine bool) error {
+ // TODO: Should probably regex name to make sure it's valid.
+ if name == "" || subnet == "" || configDir == "" || existsDir == "" {
+ return errors.Errorf("must provide values for all arguments to MakeDefaultNetwork")
+ }
+ if !nameRegex.MatchString(name) {
+ return errors.Errorf("invalid default network name %s - letters, numbers, and underscores only", name)
+ }
+
+ sentinelFile := filepath.Join(existsDir, "defaultCNINetExists")
+
+ // Check if sentinel file exists, return immediately if it does.
+ if _, err := os.Stat(sentinelFile); err == nil {
+ return nil
+ }
+
+ // Create the sentinel file if it doesn't exist, so subsequent checks
+ // don't need to go further.
+ file, err := os.Create(sentinelFile)
+ if err != nil {
+ return err
+ }
+ file.Close()
+
+ // We may need to make the config dir.
+ if err := os.MkdirAll(configDir, 0755); err != nil && !os.IsExist(err) {
+ return errors.Wrapf(err, "error creating CNI configuration directory")
+ }
+
+ // Check all networks in the CNI conflist.
+ files, err := ioutil.ReadDir(configDir)
+ if err != nil {
+ return errors.Wrapf(err, "error reading CNI configuration directory")
+ }
+ if len(files) > 0 {
+ configPaths := make([]string, 0, len(files))
+ for _, path := range files {
+ if !path.IsDir() && filepath.Ext(path.Name()) == ".conflist" {
+ configPaths = append(configPaths, filepath.Join(configDir, path.Name()))
+ }
+ }
+ for _, config := range configPaths {
+ configName, err := getConfigName(config)
+ if err != nil {
+ logrus.Errorf("Error reading CNI configuration file: %v", err)
+ continue
+ }
+ if configName == name {
+ return nil
+ }
+ }
+ }
+
+ // We need to make the config.
+ // Get subnet and gateway.
+ _, ipNet, err := net.ParseCIDR(subnet)
+ if err != nil {
+ return errors.Wrapf(err, "default network subnet %s is invalid", subnet)
+ }
+
+ ones, bits := ipNet.Mask.Size()
+ if ones == bits {
+ return errors.Wrapf(err, "default network subnet %s is to small", subnet)
+ }
+ gateway := make(net.IP, len(ipNet.IP))
+ // copy the subnet ip to the gateway so we can modify it
+ copy(gateway, ipNet.IP)
+ // the default gateway should be the first ip in the subnet
+ gateway[len(gateway)-1]++
+
+ netInfo := new(networkInfo)
+ netInfo.Name = name
+ netInfo.Gateway = gateway.String()
+ netInfo.Subnet = ipNet.String()
+ netInfo.Machine = isMachine
+
+ templ, err := template.New("network_template").Delims("{{{{", "}}}}").Parse(networkTemplate)
+ if err != nil {
+ return errors.Wrapf(err, "error compiling template for default network")
+ }
+ var output bytes.Buffer
+ if err := templ.Execute(&output, netInfo); err != nil {
+ return errors.Wrapf(err, "error executing template for default network")
+ }
+
+ // Next, we need to place the config on disk.
+ // Loop through possible indexes, with a limit of 100 attempts.
+ created := false
+ for i := 87; i < 187; i++ {
+ configFile, err := os.OpenFile(filepath.Join(configDir, fmt.Sprintf("%d-%s.conflist", i, name)), os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0644)
+ if err != nil {
+ logrus.Infof("Attempt to create default CNI network config file failed: %v", err)
+ continue
+ }
+ defer configFile.Close()
+
+ created = true
+
+ // Success - file is open. Write our buffer to it.
+ if _, err := configFile.Write(output.Bytes()); err != nil {
+ return errors.Wrapf(err, "error writing default CNI config to file")
+ }
+ break
+ }
+ if !created {
+ return errors.Errorf("no available default network configuration file was found")
+ }
+
+ return nil
+}
+
+// Get the name of the configuration contained in a given conflist file. Accepts
+// the full path of a .conflist CNI configuration.
+func getConfigName(file string) (string, error) {
+ contents, err := ioutil.ReadFile(file)
+ if err != nil {
+ return "", err
+ }
+ config := new(network)
+ if err := json.Unmarshal(contents, config); err != nil {
+ return "", errors.Wrapf(err, "error decoding CNI configuration %s", filepath.Base(file))
+ }
+ return config.Name, nil
+}
diff --git a/vendor/github.com/containers/common/version/version.go b/vendor/github.com/containers/common/version/version.go
index f6f6d1608..ff3bf9a32 100644
--- a/vendor/github.com/containers/common/version/version.go
+++ b/vendor/github.com/containers/common/version/version.go
@@ -1,4 +1,4 @@
package version
// Version is the version of the build.
-const Version = "0.38.4"
+const Version = "0.39.1-dev"
diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION
index 3492b09b4..96cd6ee1e 100644
--- a/vendor/github.com/containers/storage/VERSION
+++ b/vendor/github.com/containers/storage/VERSION
@@ -1 +1 @@
-1.31.2
+1.32.1
diff --git a/vendor/github.com/containers/storage/containers.go b/vendor/github.com/containers/storage/containers.go
index 6407e9a83..b4f773f2b 100644
--- a/vendor/github.com/containers/storage/containers.go
+++ b/vendor/github.com/containers/storage/containers.go
@@ -626,5 +626,5 @@ func (r *containerStore) ReloadIfChanged() error {
if err == nil && modified {
return r.Load()
}
- return nil
+ return err
}
diff --git a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go
index d9d19a0e1..19fb3fda9 100644
--- a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go
+++ b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go
@@ -2446,7 +2446,9 @@ func (devices *DeviceSet) UnmountDevice(hash, mountPath string) error {
logrus.Debugf("devmapper: Unmount(%s)", mountPath)
if err := mount.Unmount(mountPath); err != nil {
- return err
+ if ok, _ := Mounted(mountPath); ok {
+ return err
+ }
}
logrus.Debug("devmapper: Unmount done")
diff --git a/vendor/github.com/containers/storage/drivers/driver.go b/vendor/github.com/containers/storage/drivers/driver.go
index 1d2a65966..770b431bd 100644
--- a/vendor/github.com/containers/storage/drivers/driver.go
+++ b/vendor/github.com/containers/storage/drivers/driver.go
@@ -230,6 +230,9 @@ type AdditionalLayer interface {
// Info returns arbitrary information stored along with this layer (i.e. `info` file)
Info() (io.ReadCloser, error)
+ // Blob returns a reader of the raw contents of this layer.
+ Blob() (io.ReadCloser, error)
+
// Release tells the additional layer store that we don't use this handler.
Release()
}
@@ -243,6 +246,10 @@ type AdditionalLayerStoreDriver interface {
// LookupAdditionalLayer looks up additional layer store by the specified
// digest and ref and returns an object representing that layer.
LookupAdditionalLayer(d digest.Digest, ref string) (AdditionalLayer, error)
+
+ // LookupAdditionalLayer looks up additional layer store by the specified
+ // ID and returns an object representing that layer.
+ LookupAdditionalLayerByID(id string) (AdditionalLayer, error)
}
// DiffGetterDriver is the interface for layered file system drivers that
diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
index aed3899f8..d5d161bfd 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
@@ -721,6 +721,7 @@ func (d *Driver) Cleanup() error {
// LookupAdditionalLayer looks up additional layer store by the specified
// digest and ref and returns an object representing that layer.
// This API is experimental and can be changed without bumping the major version number.
+// TODO: to remove the comment once it's no longer experimental.
func (d *Driver) LookupAdditionalLayer(dgst digest.Digest, ref string) (graphdriver.AdditionalLayer, error) {
l, err := d.getAdditionalLayerPath(dgst, ref)
if err != nil {
@@ -736,6 +737,25 @@ func (d *Driver) LookupAdditionalLayer(dgst digest.Digest, ref string) (graphdri
}, nil
}
+// LookupAdditionalLayerByID looks up additional layer store by the specified
+// ID and returns an object representing that layer.
+// This API is experimental and can be changed without bumping the major version number.
+// TODO: to remove the comment once it's no longer experimental.
+func (d *Driver) LookupAdditionalLayerByID(id string) (graphdriver.AdditionalLayer, error) {
+ l, err := d.getAdditionalLayerPathByID(id)
+ if err != nil {
+ return nil, err
+ }
+ // Tell the additional layer store that we use this layer.
+ // This will increase reference counter on the store's side.
+ // This will be decreased on Release() method.
+ notifyUseAdditionalLayer(l)
+ return &additionalLayer{
+ path: l,
+ d: d,
+ }, nil
+}
+
// CreateFromTemplate creates a layer with the same contents and parent as another layer.
func (d *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error {
if readWrite {
@@ -1655,7 +1675,7 @@ func (d *Driver) getLowerDiffPaths(id string) ([]string, error) {
// and its parent and returns the size in bytes of the changes
// relative to its base filesystem directory.
func (d *Driver) DiffSize(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error) {
- if d.useNaiveDiff() || !d.isParent(id, parent) {
+ if d.options.mountProgram == "" && (d.useNaiveDiff() || !d.isParent(id, parent)) {
return d.naiveDiff.DiffSize(id, idMappings, parent, parentMappings, mountLabel)
}
@@ -1833,9 +1853,7 @@ func (d *Driver) getAdditionalLayerPath(dgst digest.Digest, ref string) (string,
for _, p := range []string{
filepath.Join(target, "diff"),
filepath.Join(target, "info"),
- // TODO(ktock): We should have an API to expose the stream data of this layer
- // to enable the client to retrieve the entire contents of this
- // layer when it exports this layer.
+ filepath.Join(target, "blob"),
} {
if _, err := os.Stat(p); err != nil {
return "", errors.Wrapf(graphdriver.ErrLayerUnknown,
@@ -1850,8 +1868,8 @@ func (d *Driver) getAdditionalLayerPath(dgst digest.Digest, ref string) (string,
}
func (d *Driver) releaseAdditionalLayerByID(id string) {
- if al, err := ioutil.ReadFile(path.Join(d.dir(id), "additionallayer")); err == nil {
- notifyReleaseAdditionalLayer(string(al))
+ if al, err := d.getAdditionalLayerPathByID(id); err == nil {
+ notifyReleaseAdditionalLayer(al)
} else if !os.IsNotExist(err) {
logrus.Warnf("unexpected error on reading Additional Layer Store pointer %v", err)
}
@@ -1866,12 +1884,19 @@ type additionalLayer struct {
// Info returns arbitrary information stored along with this layer (i.e. `info` file).
// This API is experimental and can be changed without bumping the major version number.
+// TODO: to remove the comment once it's no longer experimental.
func (al *additionalLayer) Info() (io.ReadCloser, error) {
return os.Open(filepath.Join(al.path, "info"))
}
+// Blob returns a reader of the raw contents of this leyer.
+func (al *additionalLayer) Blob() (io.ReadCloser, error) {
+ return os.Open(filepath.Join(al.path, "blob"))
+}
+
// CreateAs creates a new layer from this additional layer.
// This API is experimental and can be changed without bumping the major version number.
+// TODO: to remove the comment once it's no longer experimental.
func (al *additionalLayer) CreateAs(id, parent string) error {
// TODO: support opts
if err := al.d.Create(id, parent, nil); err != nil {
@@ -1891,8 +1916,17 @@ func (al *additionalLayer) CreateAs(id, parent string) error {
return os.Symlink(filepath.Join(al.path, "diff"), diffDir)
}
+func (d *Driver) getAdditionalLayerPathByID(id string) (string, error) {
+ al, err := ioutil.ReadFile(path.Join(d.dir(id), "additionallayer"))
+ if err != nil {
+ return "", err
+ }
+ return string(al), nil
+}
+
// Release tells the additional layer store that we don't use this handler.
// This API is experimental and can be changed without bumping the major version number.
+// TODO: to remove the comment once it's no longer experimental.
func (al *additionalLayer) Release() {
// Tell the additional layer store that we don't use this layer handler.
// This will decrease the reference counter on the store's side, which was
diff --git a/vendor/github.com/containers/storage/go.mod b/vendor/github.com/containers/storage/go.mod
index 3d720cde2..e7ca56e64 100644
--- a/vendor/github.com/containers/storage/go.mod
+++ b/vendor/github.com/containers/storage/go.mod
@@ -10,7 +10,7 @@ require (
github.com/google/go-intervals v0.0.2
github.com/hashicorp/go-multierror v1.1.1
github.com/json-iterator/go v1.1.11
- github.com/klauspost/compress v1.12.2
+ github.com/klauspost/compress v1.12.3
github.com/klauspost/pgzip v1.2.5
github.com/mattn/go-shellwords v1.0.11
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible
diff --git a/vendor/github.com/containers/storage/go.sum b/vendor/github.com/containers/storage/go.sum
index 91403a201..5373d0597 100644
--- a/vendor/github.com/containers/storage/go.sum
+++ b/vendor/github.com/containers/storage/go.sum
@@ -383,8 +383,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/compress v1.12.2 h1:2KCfW3I9M7nSc5wOqXAlW2v2U6v+w6cbjvbfp+OykW8=
-github.com/klauspost/compress v1.12.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
+github.com/klauspost/compress v1.12.3 h1:G5AfA94pHPysR56qqrkO2pxEexdDzrpFJ6yt/VqWxVU=
+github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
diff --git a/vendor/github.com/containers/storage/images.go b/vendor/github.com/containers/storage/images.go
index c7b968e05..bca25a65b 100644
--- a/vendor/github.com/containers/storage/images.go
+++ b/vendor/github.com/containers/storage/images.go
@@ -810,5 +810,5 @@ func (r *imageStore) ReloadIfChanged() error {
if err == nil && modified {
return r.Load()
}
- return nil
+ return err
}
diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go
index 394c00731..9b05474bb 100644
--- a/vendor/github.com/containers/storage/layers.go
+++ b/vendor/github.com/containers/storage/layers.go
@@ -1156,48 +1156,51 @@ func (r *layerStore) deleteInternal(id string) error {
}
id = layer.ID
err := r.driver.Remove(id)
- if err == nil {
- os.Remove(r.tspath(id))
- os.RemoveAll(r.datadir(id))
- delete(r.byid, id)
- for _, name := range layer.Names {
- delete(r.byname, name)
+ if err != nil {
+ return err
+ }
+
+ os.Remove(r.tspath(id))
+ os.RemoveAll(r.datadir(id))
+ delete(r.byid, id)
+ for _, name := range layer.Names {
+ delete(r.byname, name)
+ }
+ r.idindex.Delete(id)
+ mountLabel := layer.MountLabel
+ if layer.MountPoint != "" {
+ delete(r.bymount, layer.MountPoint)
+ }
+ r.deleteInDigestMap(id)
+ toDeleteIndex := -1
+ for i, candidate := range r.layers {
+ if candidate.ID == id {
+ toDeleteIndex = i
+ break
}
- r.idindex.Delete(id)
- mountLabel := layer.MountLabel
- if layer.MountPoint != "" {
- delete(r.bymount, layer.MountPoint)
+ }
+ if toDeleteIndex != -1 {
+ // delete the layer at toDeleteIndex
+ if toDeleteIndex == len(r.layers)-1 {
+ r.layers = r.layers[:len(r.layers)-1]
+ } else {
+ r.layers = append(r.layers[:toDeleteIndex], r.layers[toDeleteIndex+1:]...)
}
- r.deleteInDigestMap(id)
- toDeleteIndex := -1
- for i, candidate := range r.layers {
- if candidate.ID == id {
- toDeleteIndex = i
+ }
+ if mountLabel != "" {
+ var found bool
+ for _, candidate := range r.layers {
+ if candidate.MountLabel == mountLabel {
+ found = true
break
}
}
- if toDeleteIndex != -1 {
- // delete the layer at toDeleteIndex
- if toDeleteIndex == len(r.layers)-1 {
- r.layers = r.layers[:len(r.layers)-1]
- } else {
- r.layers = append(r.layers[:toDeleteIndex], r.layers[toDeleteIndex+1:]...)
- }
- }
- if mountLabel != "" {
- var found bool
- for _, candidate := range r.layers {
- if candidate.MountLabel == mountLabel {
- found = true
- break
- }
- }
- if !found {
- label.ReleaseLabel(mountLabel)
- }
+ if !found {
+ label.ReleaseLabel(mountLabel)
}
}
- return err
+
+ return nil
}
func (r *layerStore) deleteInDigestMap(id string) {
@@ -1401,6 +1404,52 @@ func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser,
return maybeCompressReadCloser(diff)
}
+ if ad, ok := r.driver.(drivers.AdditionalLayerStoreDriver); ok {
+ if aLayer, err := ad.LookupAdditionalLayerByID(to); err == nil {
+ // This is an additional layer. We leverage blob API for aquiring the reproduced raw blob.
+ info, err := aLayer.Info()
+ if err != nil {
+ aLayer.Release()
+ return nil, err
+ }
+ defer info.Close()
+ layer := &Layer{}
+ if err := json.NewDecoder(info).Decode(layer); err != nil {
+ aLayer.Release()
+ return nil, err
+ }
+ blob, err := aLayer.Blob()
+ if err != nil {
+ aLayer.Release()
+ return nil, err
+ }
+ // If layer compression type is different from the expected one, decompress and convert it.
+ if compression != layer.CompressionType {
+ diff, err := archive.DecompressStream(blob)
+ if err != nil {
+ if err2 := blob.Close(); err2 != nil {
+ err = errors.Wrapf(err, "failed to close blob file: %v", err2)
+ }
+ aLayer.Release()
+ return nil, err
+ }
+ rc, err := maybeCompressReadCloser(diff)
+ if err != nil {
+ if err2 := closeAll(blob.Close, diff.Close); err2 != nil {
+ err = errors.Wrapf(err, "failed to cleanup: %v", err2)
+ }
+ aLayer.Release()
+ return nil, err
+ }
+ return ioutils.NewReadCloserWrapper(rc, func() error {
+ defer aLayer.Release()
+ return closeAll(blob.Close, rc.Close)
+ }), nil
+ }
+ return ioutils.NewReadCloserWrapper(blob, func() error { defer aLayer.Release(); return blob.Close() }), nil
+ }
+ }
+
tsfile, err := os.Open(r.tspath(to))
if err != nil {
if !os.IsNotExist(err) {
@@ -1731,5 +1780,18 @@ func (r *layerStore) ReloadIfChanged() error {
if err == nil && modified {
return r.Load()
}
- return nil
+ return err
+}
+
+func closeAll(closes ...func() error) (rErr error) {
+ for _, f := range closes {
+ if err := f(); err != nil {
+ if rErr == nil {
+ rErr = errors.Wrapf(err, "close error")
+ continue
+ }
+ rErr = errors.Wrapf(rErr, "%v", err)
+ }
+ }
+ return
}
diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go
index 759407c63..d6d547c64 100644
--- a/vendor/github.com/containers/storage/store.go
+++ b/vendor/github.com/containers/storage/store.go
@@ -788,6 +788,15 @@ func (s *store) load() error {
}
s.containerStore = rcs
+ for _, store := range driver.AdditionalImageStores() {
+ gipath := filepath.Join(store, driverPrefix+"images")
+ ris, err := newROImageStore(gipath)
+ if err != nil {
+ return err
+ }
+ s.roImageStores = append(s.roImageStores, ris)
+ }
+
s.digestLockRoot = filepath.Join(s.runRoot, driverPrefix+"locks")
if err := os.MkdirAll(s.digestLockRoot, 0700); err != nil {
return err
@@ -910,22 +919,10 @@ func (s *store) ImageStore() (ImageStore, error) {
// Store. Accessing these stores directly will bypass locking and
// synchronization, so it is not a part of the exported Store interface.
func (s *store) ROImageStores() ([]ROImageStore, error) {
- if len(s.roImageStores) != 0 {
- return s.roImageStores, nil
- }
- driver, err := s.getGraphDriver()
- if err != nil {
- return nil, err
- }
- driverPrefix := s.graphDriverName + "-"
- for _, store := range driver.AdditionalImageStores() {
- gipath := filepath.Join(store, driverPrefix+"images")
- ris, err := newROImageStore(gipath)
- if err != nil {
- return nil, err
- }
- s.roImageStores = append(s.roImageStores, ris)
+ if s.imageStore == nil {
+ return nil, ErrLoadError
}
+
return s.roImageStores, nil
}
@@ -2655,8 +2652,13 @@ func (s *store) mount(id string, options drivers.MountOpts) (string, error) {
return "", err
}
+ modified, err := s.graphLock.Modified()
+ if err != nil {
+ return "", err
+ }
+
/* We need to make sure the home mount is present when the Mount is done. */
- if s.graphLock.TouchedSince(s.lastLoaded) {
+ if modified {
s.graphDriver = nil
s.layerStore = nil
s.graphDriver, err = s.getGraphDriver()
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive.go b/vendor/github.com/docker/docker/pkg/archive/archive.go
index 8d14b7869..50b83c62c 100644
--- a/vendor/github.com/docker/docker/pkg/archive/archive.go
+++ b/vendor/github.com/docker/docker/pkg/archive/archive.go
@@ -402,10 +402,24 @@ func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 {
// ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem
// to a tar header
func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error {
+ const (
+ // Values based on linux/include/uapi/linux/capability.h
+ xattrCapsSz2 = 20
+ versionOffset = 3
+ vfsCapRevision2 = 2
+ vfsCapRevision3 = 3
+ )
capability, _ := system.Lgetxattr(path, "security.capability")
if capability != nil {
+ length := len(capability)
+ if capability[versionOffset] == vfsCapRevision3 {
+ // Convert VFS_CAP_REVISION_3 to VFS_CAP_REVISION_2 as root UID makes no
+ // sense outside the user namespace the archive is built in.
+ capability[versionOffset] = vfsCapRevision2
+ length = xattrCapsSz2
+ }
hdr.Xattrs = make(map[string]string)
- hdr.Xattrs["security.capability"] = string(capability)
+ hdr.Xattrs["security.capability"] = string(capability[:length])
}
return nil
}
diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go
index 40b5802de..5283ac5a5 100644
--- a/vendor/github.com/klauspost/compress/flate/deflate.go
+++ b/vendor/github.com/klauspost/compress/flate/deflate.go
@@ -644,7 +644,7 @@ func (d *compressor) init(w io.Writer, level int) (err error) {
d.fill = (*compressor).fillBlock
d.step = (*compressor).store
case level == ConstantCompression:
- d.w.logNewTablePenalty = 8
+ d.w.logNewTablePenalty = 10
d.window = make([]byte, 32<<10)
d.fill = (*compressor).fillBlock
d.step = (*compressor).storeHuff
diff --git a/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/vendor/github.com/klauspost/compress/flate/fast_encoder.go
index 678f08105..347ac2c90 100644
--- a/vendor/github.com/klauspost/compress/flate/fast_encoder.go
+++ b/vendor/github.com/klauspost/compress/flate/fast_encoder.go
@@ -45,7 +45,7 @@ const (
bTableBits = 17 // Bits used in the big tables
bTableSize = 1 << bTableBits // Size of the table
- allocHistory = maxStoreBlockSize * 10 // Size to preallocate for history.
+ allocHistory = maxStoreBlockSize * 5 // Size to preallocate for history.
bufferReset = (1 << 31) - allocHistory - maxStoreBlockSize - 1 // Reset the buffer offset when reaching this.
)
diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
index db54be139..3ad5e9807 100644
--- a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
+++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
@@ -6,6 +6,7 @@ package flate
import (
"encoding/binary"
+ "fmt"
"io"
)
@@ -27,7 +28,7 @@ const (
// after which bytes are flushed to the writer.
// Should preferably be a multiple of 6, since
// we accumulate 6 bytes between writes to the buffer.
- bufferFlushSize = 240
+ bufferFlushSize = 246
// bufferSize is the actual output byte buffer size.
// It must have additional headroom for a flush
@@ -59,19 +60,31 @@ var offsetExtraBits = [64]int8{
14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20,
}
-var offsetBase = [64]uint32{
- /* normal deflate */
- 0x000000, 0x000001, 0x000002, 0x000003, 0x000004,
- 0x000006, 0x000008, 0x00000c, 0x000010, 0x000018,
- 0x000020, 0x000030, 0x000040, 0x000060, 0x000080,
- 0x0000c0, 0x000100, 0x000180, 0x000200, 0x000300,
- 0x000400, 0x000600, 0x000800, 0x000c00, 0x001000,
- 0x001800, 0x002000, 0x003000, 0x004000, 0x006000,
+var offsetCombined = [32]uint32{}
- /* extended window */
- 0x008000, 0x00c000, 0x010000, 0x018000, 0x020000,
- 0x030000, 0x040000, 0x060000, 0x080000, 0x0c0000,
- 0x100000, 0x180000, 0x200000, 0x300000,
+func init() {
+ var offsetBase = [64]uint32{
+ /* normal deflate */
+ 0x000000, 0x000001, 0x000002, 0x000003, 0x000004,
+ 0x000006, 0x000008, 0x00000c, 0x000010, 0x000018,
+ 0x000020, 0x000030, 0x000040, 0x000060, 0x000080,
+ 0x0000c0, 0x000100, 0x000180, 0x000200, 0x000300,
+ 0x000400, 0x000600, 0x000800, 0x000c00, 0x001000,
+ 0x001800, 0x002000, 0x003000, 0x004000, 0x006000,
+
+ /* extended window */
+ 0x008000, 0x00c000, 0x010000, 0x018000, 0x020000,
+ 0x030000, 0x040000, 0x060000, 0x080000, 0x0c0000,
+ 0x100000, 0x180000, 0x200000, 0x300000,
+ }
+
+ for i := range offsetCombined[:] {
+ // Don't use extended window values...
+ if offsetBase[i] > 0x006000 {
+ continue
+ }
+ offsetCombined[i] = uint32(offsetExtraBits[i])<<16 | (offsetBase[i])
+ }
}
// The odd order in which the codegen code sizes are written.
@@ -88,15 +101,16 @@ type huffmanBitWriter struct {
bits uint64
nbits uint16
nbytes uint8
+ lastHuffMan bool
literalEncoding *huffmanEncoder
+ tmpLitEncoding *huffmanEncoder
offsetEncoding *huffmanEncoder
codegenEncoding *huffmanEncoder
err error
lastHeader int
// Set between 0 (reused block can be up to 2x the size)
logNewTablePenalty uint
- lastHuffMan bool
- bytes [256]byte
+ bytes [256 + 8]byte
literalFreq [lengthCodesStart + 32]uint16
offsetFreq [32]uint16
codegenFreq [codegenCodeCount]uint16
@@ -128,6 +142,7 @@ func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter {
return &huffmanBitWriter{
writer: w,
literalEncoding: newHuffmanEncoder(literalCount),
+ tmpLitEncoding: newHuffmanEncoder(literalCount),
codegenEncoding: newHuffmanEncoder(codegenCodeCount),
offsetEncoding: newHuffmanEncoder(offsetCodeCount),
}
@@ -745,9 +760,31 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
offs := oeCodes[:32]
lengths := leCodes[lengthCodesStart:]
lengths = lengths[:32]
+
+ // Go 1.16 LOVES having these on stack.
+ bits, nbits, nbytes := w.bits, w.nbits, w.nbytes
+
for _, t := range tokens {
if t < matchType {
- w.writeCode(lits[t.literal()])
+ //w.writeCode(lits[t.literal()])
+ c := lits[t.literal()]
+ bits |= uint64(c.code) << nbits
+ nbits += c.len
+ if nbits >= 48 {
+ binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
+ //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
+ bits >>= 48
+ nbits -= 48
+ nbytes += 6
+ if nbytes >= bufferFlushSize {
+ if w.err != nil {
+ nbytes = 0
+ return
+ }
+ _, w.err = w.writer.Write(w.bytes[:nbytes])
+ nbytes = 0
+ }
+ }
continue
}
@@ -759,38 +796,99 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
} else {
// inlined
c := lengths[lengthCode&31]
- w.bits |= uint64(c.code) << w.nbits
- w.nbits += c.len
- if w.nbits >= 48 {
- w.writeOutBits()
+ bits |= uint64(c.code) << nbits
+ nbits += c.len
+ if nbits >= 48 {
+ binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
+ //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
+ bits >>= 48
+ nbits -= 48
+ nbytes += 6
+ if nbytes >= bufferFlushSize {
+ if w.err != nil {
+ nbytes = 0
+ return
+ }
+ _, w.err = w.writer.Write(w.bytes[:nbytes])
+ nbytes = 0
+ }
}
}
extraLengthBits := uint16(lengthExtraBits[lengthCode&31])
if extraLengthBits > 0 {
+ //w.writeBits(extraLength, extraLengthBits)
extraLength := int32(length - lengthBase[lengthCode&31])
- w.writeBits(extraLength, extraLengthBits)
+ bits |= uint64(extraLength) << nbits
+ nbits += extraLengthBits
+ if nbits >= 48 {
+ binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
+ //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
+ bits >>= 48
+ nbits -= 48
+ nbytes += 6
+ if nbytes >= bufferFlushSize {
+ if w.err != nil {
+ nbytes = 0
+ return
+ }
+ _, w.err = w.writer.Write(w.bytes[:nbytes])
+ nbytes = 0
+ }
+ }
}
// Write the offset
offset := t.offset()
- offsetCode := offsetCode(offset)
+ offsetCode := offset >> 16
+ offset &= matchOffsetOnlyMask
if false {
w.writeCode(offs[offsetCode&31])
} else {
// inlined
- c := offs[offsetCode&31]
- w.bits |= uint64(c.code) << w.nbits
- w.nbits += c.len
- if w.nbits >= 48 {
- w.writeOutBits()
+ c := offs[offsetCode]
+ bits |= uint64(c.code) << nbits
+ nbits += c.len
+ if nbits >= 48 {
+ binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
+ //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
+ bits >>= 48
+ nbits -= 48
+ nbytes += 6
+ if nbytes >= bufferFlushSize {
+ if w.err != nil {
+ nbytes = 0
+ return
+ }
+ _, w.err = w.writer.Write(w.bytes[:nbytes])
+ nbytes = 0
+ }
}
}
- extraOffsetBits := uint16(offsetExtraBits[offsetCode&63])
- if extraOffsetBits > 0 {
- extraOffset := int32(offset - offsetBase[offsetCode&63])
- w.writeBits(extraOffset, extraOffsetBits)
+ offsetComb := offsetCombined[offsetCode]
+ if offsetComb > 1<<16 {
+ //w.writeBits(extraOffset, extraOffsetBits)
+ bits |= uint64(offset&matchOffsetOnlyMask-(offsetComb&0xffff)) << nbits
+ nbits += uint16(offsetComb >> 16)
+ if nbits >= 48 {
+ binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
+ //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
+ bits >>= 48
+ nbits -= 48
+ nbytes += 6
+ if nbytes >= bufferFlushSize {
+ if w.err != nil {
+ nbytes = 0
+ return
+ }
+ _, w.err = w.writer.Write(w.bytes[:nbytes])
+ nbytes = 0
+ }
+ }
}
}
+ // Restore...
+ w.bits, w.nbits, w.nbytes = bits, nbits, nbytes
+
if deferEOB {
w.writeCode(leCodes[endBlockMarker])
}
@@ -825,13 +923,28 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
}
}
+ // Fill is rarely better...
+ const fill = false
+ const numLiterals = endBlockMarker + 1
+ const numOffsets = 1
+
// Add everything as literals
// We have to estimate the header size.
// Assume header is around 70 bytes:
// https://stackoverflow.com/a/25454430
const guessHeaderSizeBits = 70 * 8
- estBits := histogramSize(input, w.literalFreq[:], !eof && !sync)
- estBits += w.lastHeader + len(input)/32
+ histogram(input, w.literalFreq[:numLiterals], fill)
+ w.literalFreq[endBlockMarker] = 1
+ w.tmpLitEncoding.generate(w.literalFreq[:numLiterals], 15)
+ if fill {
+ // Clear fill...
+ for i := range w.literalFreq[:numLiterals] {
+ w.literalFreq[i] = 0
+ }
+ histogram(input, w.literalFreq[:numLiterals], false)
+ }
+ estBits := w.tmpLitEncoding.canReuseBits(w.literalFreq[:numLiterals])
+ estBits += w.lastHeader
if w.lastHeader == 0 {
estBits += guessHeaderSizeBits
}
@@ -839,33 +952,31 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
// Store bytes, if we don't get a reasonable improvement.
ssize, storable := w.storedSize(input)
- if storable && ssize < estBits {
+ if storable && ssize <= estBits {
w.writeStoredHeader(len(input), eof)
w.writeBytes(input)
return
}
- reuseSize := 0
if w.lastHeader > 0 {
- reuseSize = w.literalEncoding.bitLength(w.literalFreq[:256])
+ reuseSize := w.literalEncoding.canReuseBits(w.literalFreq[:256])
if estBits < reuseSize {
+ if debugDeflate {
+ //fmt.Println("not reusing, reuse:", reuseSize/8, "> new:", estBits/8, "- header est:", w.lastHeader/8)
+ }
// We owe an EOB
w.writeCode(w.literalEncoding.codes[endBlockMarker])
w.lastHeader = 0
+ } else if debugDeflate {
+ fmt.Println("reusing, reuse:", reuseSize/8, "> new:", estBits/8, "- header est:", w.lastHeader/8)
}
}
- const numLiterals = endBlockMarker + 1
- const numOffsets = 1
+ count := 0
if w.lastHeader == 0 {
- if !eof && !sync {
- // Generate a slightly suboptimal tree that can be used for all.
- fillHist(w.literalFreq[:numLiterals])
- }
- w.literalFreq[endBlockMarker] = 1
- w.literalEncoding.generate(w.literalFreq[:numLiterals], 15)
-
+ // Use the temp encoding, so swap.
+ w.literalEncoding, w.tmpLitEncoding = w.tmpLitEncoding, w.literalEncoding
// Generate codegen and codegenFrequencies, which indicates how to encode
// the literalEncoding and the offsetEncoding.
w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, huffOffset)
@@ -876,34 +987,47 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
w.lastHuffMan = true
w.lastHeader, _ = w.headerSize()
+ if debugDeflate {
+ count += w.lastHeader
+ fmt.Println("header:", count/8)
+ }
}
- encoding := w.literalEncoding.codes[:257]
+ encoding := w.literalEncoding.codes[:256]
+ // Go 1.16 LOVES having these on stack. At least 1.5x the speed.
+ bits, nbits, nbytes := w.bits, w.nbits, w.nbytes
for _, t := range input {
// Bitwriting inlined, ~30% speedup
c := encoding[t]
- w.bits |= uint64(c.code) << w.nbits
- w.nbits += c.len
- if w.nbits >= 48 {
- bits := w.bits
- w.bits >>= 48
- w.nbits -= 48
- n := w.nbytes
- binary.LittleEndian.PutUint64(w.bytes[n:], bits)
- n += 6
- if n >= bufferFlushSize {
+ bits |= uint64(c.code) << nbits
+ nbits += c.len
+ if debugDeflate {
+ count += int(c.len)
+ }
+ if nbits >= 48 {
+ binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
+ //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
+ bits >>= 48
+ nbits -= 48
+ nbytes += 6
+ if nbytes >= bufferFlushSize {
if w.err != nil {
- n = 0
+ nbytes = 0
return
}
- w.write(w.bytes[:n])
- n = 0
+ _, w.err = w.writer.Write(w.bytes[:nbytes])
+ nbytes = 0
}
- w.nbytes = n
}
}
+ // Restore...
+ w.bits, w.nbits, w.nbytes = bits, nbits, nbytes
+
+ if debugDeflate {
+ fmt.Println("wrote", count/8, "bytes")
+ }
if eof || sync {
- w.writeCode(encoding[endBlockMarker])
+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
w.lastHeader = 0
w.lastHuffMan = false
}
diff --git a/vendor/github.com/klauspost/compress/flate/huffman_code.go b/vendor/github.com/klauspost/compress/flate/huffman_code.go
index 0d3445a1c..67b2b3872 100644
--- a/vendor/github.com/klauspost/compress/flate/huffman_code.go
+++ b/vendor/github.com/klauspost/compress/flate/huffman_code.go
@@ -21,9 +21,13 @@ type hcode struct {
}
type huffmanEncoder struct {
- codes []hcode
- freqcache []literalNode
- bitCount [17]int32
+ codes []hcode
+ bitCount [17]int32
+
+ // Allocate a reusable buffer with the longest possible frequency table.
+ // Possible lengths are codegenCodeCount, offsetCodeCount and literalCount.
+ // The largest of these is literalCount, so we allocate for that case.
+ freqcache [literalCount + 1]literalNode
}
type literalNode struct {
@@ -132,6 +136,21 @@ func (h *huffmanEncoder) bitLengthRaw(b []byte) int {
return total
}
+// canReuseBits returns the number of bits or math.MaxInt32 if the encoder cannot be reused.
+func (h *huffmanEncoder) canReuseBits(freq []uint16) int {
+ var total int
+ for i, f := range freq {
+ if f != 0 {
+ code := h.codes[i]
+ if code.len == 0 {
+ return math.MaxInt32
+ }
+ total += int(f) * int(code.len)
+ }
+ }
+ return total
+}
+
// Return the number of literals assigned to each bit size in the Huffman encoding
//
// This method is only called when list.length >= 3
@@ -291,12 +310,6 @@ func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalN
// freq An array of frequencies, in which frequency[i] gives the frequency of literal i.
// maxBits The maximum number of bits to use for any literal.
func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) {
- if h.freqcache == nil {
- // Allocate a reusable buffer with the longest possible frequency table.
- // Possible lengths are codegenCodeCount, offsetCodeCount and literalCount.
- // The largest of these is literalCount, so we allocate for that case.
- h.freqcache = make([]literalNode, literalCount+1)
- }
list := h.freqcache[:len(freq)+1]
// Number of non-zero literals
count := 0
@@ -330,10 +343,14 @@ func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) {
h.assignEncodingAndSize(bitCount, list)
}
+// atLeastOne clamps the result between 1 and 15.
func atLeastOne(v float32) float32 {
if v < 1 {
return 1
}
+ if v > 15 {
+ return 15
+ }
return v
}
@@ -346,31 +363,12 @@ func fillHist(b []uint16) {
}
}
-// histogramSize accumulates a histogram of b in h.
-// An estimated size in bits is returned.
-// len(h) must be >= 256, and h's elements must be all zeroes.
-func histogramSize(b []byte, h []uint16, fill bool) (bits int) {
+func histogram(b []byte, h []uint16, fill bool) {
h = h[:256]
for _, t := range b {
h[t]++
}
- total := len(b)
if fill {
- for _, v := range h {
- if v == 0 {
- total++
- }
- }
+ fillHist(h)
}
-
- invTotal := 1.0 / float32(total)
- shannon := float32(0.0)
- for _, v := range h {
- if v > 0 {
- n := float32(v)
- shannon += atLeastOne(-mFastLog2(n*invTotal)) * n
- }
- }
-
- return int(shannon + 0.99)
}
diff --git a/vendor/github.com/klauspost/compress/flate/token.go b/vendor/github.com/klauspost/compress/flate/token.go
index f9abf606d..eb862d7a9 100644
--- a/vendor/github.com/klauspost/compress/flate/token.go
+++ b/vendor/github.com/klauspost/compress/flate/token.go
@@ -13,14 +13,17 @@ import (
)
const (
+ // From top
// 2 bits: type 0 = literal 1=EOF 2=Match 3=Unused
// 8 bits: xlength = length - MIN_MATCH_LENGTH
- // 22 bits xoffset = offset - MIN_OFFSET_SIZE, or literal
- lengthShift = 22
- offsetMask = 1<<lengthShift - 1
- typeMask = 3 << 30
- literalType = 0 << 30
- matchType = 1 << 30
+ // 5 bits offsetcode
+ // 16 bits xoffset = offset - MIN_OFFSET_SIZE, or literal
+ lengthShift = 22
+ offsetMask = 1<<lengthShift - 1
+ typeMask = 3 << 30
+ literalType = 0 << 30
+ matchType = 1 << 30
+ matchOffsetOnlyMask = 0xffff
)
// The length code for length X (MIN_MATCH_LENGTH <= X <= MAX_MATCH_LENGTH)
@@ -187,7 +190,7 @@ func (t *tokens) indexTokens(in []token) {
t.AddLiteral(tok.literal())
continue
}
- t.AddMatch(uint32(tok.length()), tok.offset())
+ t.AddMatch(uint32(tok.length()), tok.offset()&matchOffsetOnlyMask)
}
}
@@ -232,7 +235,7 @@ func (t *tokens) EstimatedBits() int {
for _, v := range t.litHist[:] {
if v > 0 {
n := float32(v)
- shannon += -mFastLog2(n*invTotal) * n
+ shannon += atLeastOne(-mFastLog2(n*invTotal)) * n
}
}
// Just add 15 for EOB
@@ -240,7 +243,7 @@ func (t *tokens) EstimatedBits() int {
for i, v := range t.extraHist[1 : literalCount-256] {
if v > 0 {
n := float32(v)
- shannon += -mFastLog2(n*invTotal) * n
+ shannon += atLeastOne(-mFastLog2(n*invTotal)) * n
bits += int(lengthExtraBits[i&31]) * int(v)
nMatches += int(v)
}
@@ -251,7 +254,7 @@ func (t *tokens) EstimatedBits() int {
for i, v := range t.offHist[:offsetCodeCount] {
if v > 0 {
n := float32(v)
- shannon += -mFastLog2(n*invTotal) * n
+ shannon += atLeastOne(-mFastLog2(n*invTotal)) * n
bits += int(offsetExtraBits[i&31]) * int(v)
}
}
@@ -270,11 +273,13 @@ func (t *tokens) AddMatch(xlength uint32, xoffset uint32) {
panic(fmt.Errorf("invalid offset: %v", xoffset))
}
}
+ oCode := offsetCode(xoffset)
+ xoffset |= oCode << 16
t.nLits++
- lengthCode := lengthCodes1[uint8(xlength)] & 31
+
+ t.extraHist[lengthCodes1[uint8(xlength)]]++
+ t.offHist[oCode]++
t.tokens[t.n] = token(matchType | xlength<<lengthShift | xoffset)
- t.extraHist[lengthCode]++
- t.offHist[offsetCode(xoffset)&31]++
t.n++
}
@@ -286,7 +291,8 @@ func (t *tokens) AddMatchLong(xlength int32, xoffset uint32) {
panic(fmt.Errorf("invalid offset: %v", xoffset))
}
}
- oc := offsetCode(xoffset) & 31
+ oc := offsetCode(xoffset)
+ xoffset |= oc << 16
for xlength > 0 {
xl := xlength
if xl > 258 {
@@ -294,12 +300,11 @@ func (t *tokens) AddMatchLong(xlength int32, xoffset uint32) {
xl = 258 - baseMatchLength
}
xlength -= xl
- xl -= 3
+ xl -= baseMatchLength
t.nLits++
- lengthCode := lengthCodes1[uint8(xl)] & 31
- t.tokens[t.n] = token(matchType | uint32(xl)<<lengthShift | xoffset)
- t.extraHist[lengthCode]++
+ t.extraHist[lengthCodes1[uint8(xl)]]++
t.offHist[oc]++
+ t.tokens[t.n] = token(matchType | uint32(xl)<<lengthShift | xoffset)
t.n++
}
}
diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md
index e7d7eb0ae..787813fa9 100644
--- a/vendor/github.com/klauspost/compress/zstd/README.md
+++ b/vendor/github.com/klauspost/compress/zstd/README.md
@@ -16,8 +16,7 @@ Currently the package is heavily optimized for 64 bit processors and will be sig
Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`.
-Godoc Documentation: https://godoc.org/github.com/klauspost/compress/zstd
-
+[![Go Reference](https://pkg.go.dev/badge/github.com/klauspost/compress/zstd.svg)](https://pkg.go.dev/github.com/klauspost/compress/zstd)
## Compressor
@@ -405,13 +404,28 @@ BenchmarkDecoder_DecodeAllParallelCgo/comp-data.bin.zst-16 749938
This reflects the performance around May 2020, but this may be out of date.
+## Zstd inside ZIP files
+
+It is possible to use zstandard to compress individual files inside zip archives.
+While this isn't widely supported it can be useful for internal files.
+
+To support the compression and decompression of these files you must register a compressor and decompressor.
+
+It is highly recommended registering the (de)compressors on individual zip Reader/Writer and NOT
+use the global registration functions. The main reason for this is that 2 registrations from
+different packages will result in a panic.
+
+It is a good idea to only have a single compressor and decompressor, since they can be used for multiple zip
+files concurrently, and using a single instance will allow reusing some resources.
+
+See [this example](https://pkg.go.dev/github.com/klauspost/compress/zstd#example-ZipCompressor) for
+how to compress and decompress files inside zip archives.
+
# Contributions
Contributions are always welcome.
For new features/fixes, remember to add tests and for performance enhancements include benchmarks.
-For sending files for reproducing errors use a service like [goobox](https://goobox.io/#/upload) or similar to share your files.
-
For general feedback and experience reports, feel free to open an issue or write me on [Twitter](https://twitter.com/sh0dan).
This package includes the excellent [`github.com/cespare/xxhash`](https://github.com/cespare/xxhash) package Copyright (c) 2016 Caleb Spare.
diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go
index b51d922bd..6cea054d2 100644
--- a/vendor/github.com/klauspost/compress/zstd/blockdec.go
+++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go
@@ -123,12 +123,10 @@ func newBlockDec(lowMem bool) *blockDec {
// Input must be a start of a block and will be at the end of the block when returned.
func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
b.WindowSize = windowSize
- tmp := br.readSmall(3)
- if tmp == nil {
- if debug {
- println("Reading block header:", io.ErrUnexpectedEOF)
- }
- return io.ErrUnexpectedEOF
+ tmp, err := br.readSmall(3)
+ if err != nil {
+ println("Reading block header:", err)
+ return err
}
bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16)
b.Last = bh&1 != 0
@@ -179,7 +177,6 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
if cap(b.dst) <= maxSize {
b.dst = make([]byte, 0, maxSize+1)
}
- var err error
b.data, err = br.readBig(cSize, b.dataStorage)
if err != nil {
if debug {
diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go
index 658ef7838..17e820a6a 100644
--- a/vendor/github.com/klauspost/compress/zstd/bytebuf.go
+++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go
@@ -12,8 +12,8 @@ import (
type byteBuffer interface {
// Read up to 8 bytes.
- // Returns nil if no more input is available.
- readSmall(n int) []byte
+ // Returns io.ErrUnexpectedEOF if this cannot be satisfied.
+ readSmall(n int) ([]byte, error)
// Read >8 bytes.
// MAY use the destination slice.
@@ -29,17 +29,17 @@ type byteBuffer interface {
// in-memory buffer
type byteBuf []byte
-func (b *byteBuf) readSmall(n int) []byte {
+func (b *byteBuf) readSmall(n int) ([]byte, error) {
if debugAsserts && n > 8 {
panic(fmt.Errorf("small read > 8 (%d). use readBig", n))
}
bb := *b
if len(bb) < n {
- return nil
+ return nil, io.ErrUnexpectedEOF
}
r := bb[:n]
*b = bb[n:]
- return r
+ return r, nil
}
func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) {
@@ -81,19 +81,22 @@ type readerWrapper struct {
tmp [8]byte
}
-func (r *readerWrapper) readSmall(n int) []byte {
+func (r *readerWrapper) readSmall(n int) ([]byte, error) {
if debugAsserts && n > 8 {
panic(fmt.Errorf("small read > 8 (%d). use readBig", n))
}
n2, err := io.ReadFull(r.r, r.tmp[:n])
// We only really care about the actual bytes read.
- if n2 != n {
+ if err != nil {
+ if err == io.EOF {
+ return nil, io.ErrUnexpectedEOF
+ }
if debug {
println("readSmall: got", n2, "want", n, "err", err)
}
- return nil
+ return nil, err
}
- return r.tmp[:n]
+ return r.tmp[:n], nil
}
func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) {
diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go
index 693c5f05d..4dc151213 100644
--- a/vendor/github.com/klauspost/compress/zstd/framedec.go
+++ b/vendor/github.com/klauspost/compress/zstd/framedec.go
@@ -80,9 +80,14 @@ func (d *frameDec) reset(br byteBuffer) error {
d.WindowSize = 0
var b []byte
for {
- b = br.readSmall(4)
- if b == nil {
+ var err error
+ b, err = br.readSmall(4)
+ switch err {
+ case io.EOF, io.ErrUnexpectedEOF:
return io.EOF
+ default:
+ return err
+ case nil:
}
if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 {
if debug {
@@ -92,14 +97,14 @@ func (d *frameDec) reset(br byteBuffer) error {
break
}
// Read size to skip
- b = br.readSmall(4)
- if b == nil {
- println("Reading Frame Size EOF")
- return io.ErrUnexpectedEOF
+ b, err = br.readSmall(4)
+ if err != nil {
+ println("Reading Frame Size", err)
+ return err
}
n := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)
println("Skipping frame with", n, "bytes.")
- err := br.skipN(int(n))
+ err = br.skipN(int(n))
if err != nil {
if debug {
println("Reading discarded frame", err)
@@ -147,12 +152,11 @@ func (d *frameDec) reset(br byteBuffer) error {
if size == 3 {
size = 4
}
- b = br.readSmall(int(size))
- if b == nil {
- if debug {
- println("Reading Dictionary_ID", io.ErrUnexpectedEOF)
- }
- return io.ErrUnexpectedEOF
+
+ b, err = br.readSmall(int(size))
+ if err != nil {
+ println("Reading Dictionary_ID", err)
+ return err
}
var id uint32
switch size {
@@ -187,10 +191,10 @@ func (d *frameDec) reset(br byteBuffer) error {
}
d.FrameContentSize = 0
if fcsSize > 0 {
- b := br.readSmall(fcsSize)
- if b == nil {
- println("Reading Frame content", io.ErrUnexpectedEOF)
- return io.ErrUnexpectedEOF
+ b, err = br.readSmall(fcsSize)
+ if err != nil {
+ println("Reading Frame content", err)
+ return err
}
switch fcsSize {
case 1:
@@ -307,10 +311,10 @@ func (d *frameDec) checkCRC() error {
tmp[3] = byte(got >> 24)
// We can overwrite upper tmp now
- want := d.rawInput.readSmall(4)
- if want == nil {
- println("CRC missing?")
- return io.ErrUnexpectedEOF
+ want, err := d.rawInput.readSmall(4)
+ if err != nil {
+ println("CRC missing?", err)
+ return err
}
if !bytes.Equal(tmp[:], want) {
diff --git a/vendor/github.com/klauspost/compress/zstd/zip.go b/vendor/github.com/klauspost/compress/zstd/zip.go
index e35a0a2f8..9325b928a 100644
--- a/vendor/github.com/klauspost/compress/zstd/zip.go
+++ b/vendor/github.com/klauspost/compress/zstd/zip.go
@@ -13,8 +13,9 @@ import (
// See https://www.winzip.com/win/en/comp_info.html
const ZipMethodWinZip = 93
-// ZipMethodPKWare is the method number used by PKWARE to indicate Zstandard compression.
-// See https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.7.TXT
+// ZipMethodPKWare is the original method number used by PKWARE to indicate Zstandard compression.
+// Deprecated: This has been deprecated by PKWARE, use ZipMethodWinZip instead for compression.
+// See https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.9.TXT
const ZipMethodPKWare = 20
var zipReaderPool sync.Pool
diff --git a/vendor/github.com/onsi/ginkgo/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/CHANGELOG.md
index 50631e4a9..494abdbfb 100644
--- a/vendor/github.com/onsi/ginkgo/CHANGELOG.md
+++ b/vendor/github.com/onsi/ginkgo/CHANGELOG.md
@@ -1,3 +1,13 @@
+## 1.16.4
+
+### Fixes
+1.16.4 retracts 1.16.3. There are no code changes. The 1.16.3 tag was associated with the wrong commit and an attempt to change it after-the-fact has proven problematic. 1.16.4 retracts 1.16.3 in Ginkgo's go.mod and creates a new, correctly tagged, release.
+
+## 1.16.3
+
+### Features
+- Measure is now deprecated and emits a deprecation warning.
+
## 1.16.2
### Fixes
diff --git a/vendor/github.com/onsi/ginkgo/RELEASING.md b/vendor/github.com/onsi/ginkgo/RELEASING.md
index 1e298c2da..db3d234c1 100644
--- a/vendor/github.com/onsi/ginkgo/RELEASING.md
+++ b/vendor/github.com/onsi/ginkgo/RELEASING.md
@@ -8,7 +8,10 @@ A Ginkgo release is a tagged git sha and a GitHub release. To cut a release:
- Fixes (fix version)
- Maintenance (which in general should not be mentioned in `CHANGELOG.md` as they have no user impact)
1. Update `VERSION` in `config/config.go`
-1. Create a commit with the version number as the commit message (e.g. `v1.3.0`)
-1. Tag the commit with the version number as the tag name (e.g. `v1.3.0`)
-1. Push the commit and tag to GitHub
-1. Create a new [GitHub release](https://help.github.com/articles/creating-releases/) with the version number as the tag (e.g. `v1.3.0`). List the key changes in the release notes.
+1. Commit, push, and release:
+ ```
+ git commit -m "vM.m.p"
+ git push
+ gh release create "vM.m.p"
+ git fetch --tags origin master
+ ``` \ No newline at end of file
diff --git a/vendor/github.com/onsi/ginkgo/config/config.go b/vendor/github.com/onsi/ginkgo/config/config.go
index ab8863d75..5f3f43969 100644
--- a/vendor/github.com/onsi/ginkgo/config/config.go
+++ b/vendor/github.com/onsi/ginkgo/config/config.go
@@ -20,7 +20,7 @@ import (
"fmt"
)
-const VERSION = "1.16.2"
+const VERSION = "1.16.4"
type GinkgoConfigType struct {
RandomSeed int64
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go b/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go
index 998c2c2ca..4a6e1e1ee 100644
--- a/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go
+++ b/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go
@@ -473,24 +473,28 @@ func By(text string, callbacks ...func()) {
//The body function must have the signature:
// func(b Benchmarker)
func Measure(text string, body interface{}, samples int) bool {
+ deprecationTracker.TrackDeprecation(types.Deprecations.Measure(), codelocation.New(1))
global.Suite.PushMeasureNode(text, body, types.FlagTypeNone, codelocation.New(1), samples)
return true
}
//You can focus individual Measures using FMeasure
func FMeasure(text string, body interface{}, samples int) bool {
+ deprecationTracker.TrackDeprecation(types.Deprecations.Measure(), codelocation.New(1))
global.Suite.PushMeasureNode(text, body, types.FlagTypeFocused, codelocation.New(1), samples)
return true
}
//You can mark Measurements as pending using PMeasure
func PMeasure(text string, _ ...interface{}) bool {
+ deprecationTracker.TrackDeprecation(types.Deprecations.Measure(), codelocation.New(1))
global.Suite.PushMeasureNode(text, func(b Benchmarker) {}, types.FlagTypePending, codelocation.New(1), 0)
return true
}
//You can mark Measurements as pending using XMeasure
func XMeasure(text string, _ ...interface{}) bool {
+ deprecationTracker.TrackDeprecation(types.Deprecations.Measure(), codelocation.New(1))
global.Suite.PushMeasureNode(text, func(b Benchmarker) {}, types.FlagTypePending, codelocation.New(1), 0)
return true
}
diff --git a/vendor/github.com/onsi/ginkgo/go.mod b/vendor/github.com/onsi/ginkgo/go.mod
index 664372fb6..86a5a97be 100644
--- a/vendor/github.com/onsi/ginkgo/go.mod
+++ b/vendor/github.com/onsi/ginkgo/go.mod
@@ -9,3 +9,5 @@ require (
golang.org/x/sys v0.0.0-20210112080510-489259a85091
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e
)
+
+retract v1.16.3 // git tag accidentally associated with incorrect git commit
diff --git a/vendor/github.com/onsi/ginkgo/types/deprecation_support.go b/vendor/github.com/onsi/ginkgo/types/deprecation_support.go
index 71420f597..305c134b7 100644
--- a/vendor/github.com/onsi/ginkgo/types/deprecation_support.go
+++ b/vendor/github.com/onsi/ginkgo/types/deprecation_support.go
@@ -46,9 +46,9 @@ func (d deprecations) Async() Deprecation {
func (d deprecations) Measure() Deprecation {
return Deprecation{
- Message: "Measure is deprecated in Ginkgo V2",
+ Message: "Measure is deprecated and will be removed in Ginkgo V2. Please migrate to gomega/gmeasure.",
DocLink: "removed-measure",
- Version: "1.16.0",
+ Version: "1.16.3",
}
}
diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md
index b05f3a935..4783c0d43 100644
--- a/vendor/github.com/onsi/gomega/CHANGELOG.md
+++ b/vendor/github.com/onsi/gomega/CHANGELOG.md
@@ -1,3 +1,9 @@
+## 1.13.0
+
+### Features
+- gmeasure provides BETA support for benchmarking (#447) [8f2dfbf]
+- Set consistently and eventually defaults on init (#443) [12eb778]
+
## 1.12.0
### Features
diff --git a/vendor/github.com/onsi/gomega/env.go b/vendor/github.com/onsi/gomega/env.go
new file mode 100644
index 000000000..62fd885a9
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/env.go
@@ -0,0 +1,40 @@
+package gomega
+
+import (
+ "os"
+
+ "github.com/onsi/gomega/internal/defaults"
+)
+
+const (
+ ConsistentlyDurationEnvVarName = "GOMEGA_DEFAULT_CONSISTENTLY_DURATION"
+ ConsistentlyPollingIntervalEnvVarName = "GOMEGA_DEFAULT_CONSISTENTLY_POLLING_INTERVAL"
+ EventuallyTimeoutEnvVarName = "GOMEGA_DEFAULT_EVENTUALLY_TIMEOUT"
+ EventuallyPollingIntervalEnvVarName = "GOMEGA_DEFAULT_EVENTUALLY_POLLING_INTERVAL"
+)
+
+func init() {
+ defaults.SetDurationFromEnv(
+ os.Getenv,
+ SetDefaultConsistentlyDuration,
+ ConsistentlyDurationEnvVarName,
+ )
+
+ defaults.SetDurationFromEnv(
+ os.Getenv,
+ SetDefaultConsistentlyPollingInterval,
+ ConsistentlyPollingIntervalEnvVarName,
+ )
+
+ defaults.SetDurationFromEnv(
+ os.Getenv,
+ SetDefaultEventuallyTimeout,
+ EventuallyTimeoutEnvVarName,
+ )
+
+ defaults.SetDurationFromEnv(
+ os.Getenv,
+ SetDefaultEventuallyPollingInterval,
+ EventuallyPollingIntervalEnvVarName,
+ )
+}
diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go
index 9050e15e8..a05b34b27 100644
--- a/vendor/github.com/onsi/gomega/gomega_dsl.go
+++ b/vendor/github.com/onsi/gomega/gomega_dsl.go
@@ -24,7 +24,7 @@ import (
"github.com/onsi/gomega/types"
)
-const GOMEGA_VERSION = "1.12.0"
+const GOMEGA_VERSION = "1.13.0"
const nilFailHandlerPanic = `You are trying to make an assertion, but Gomega's fail handler is nil.
If you're using Ginkgo then you probably forgot to put your assertion in an It().
diff --git a/vendor/github.com/onsi/gomega/internal/defaults/env.go b/vendor/github.com/onsi/gomega/internal/defaults/env.go
new file mode 100644
index 000000000..bc29c63d5
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/internal/defaults/env.go
@@ -0,0 +1,22 @@
+package defaults
+
+import (
+ "fmt"
+ "time"
+)
+
+func SetDurationFromEnv(getDurationFromEnv func(string) string, varSetter func(time.Duration), name string) {
+ durationFromEnv := getDurationFromEnv(name)
+
+ if len(durationFromEnv) == 0 {
+ return
+ }
+
+ duration, err := time.ParseDuration(durationFromEnv)
+
+ if err != nil {
+ panic(fmt.Sprintf("Expected a duration when using %s! Parse error %v", name, err))
+ }
+
+ varSetter(duration)
+}
diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go
index 54597398b..a91a116f8 100644
--- a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go
+++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go
@@ -16,9 +16,9 @@ import (
"strings"
"sync"
+ "github.com/bits-and-blooms/bitset"
"github.com/opencontainers/selinux/pkg/pwalk"
"github.com/pkg/errors"
- "github.com/willf/bitset"
"golang.org/x/sys/unix"
)
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/GoUnusedProtection__.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/GoUnusedProtection__.go
new file mode 100644
index 000000000..54cd3b086
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/GoUnusedProtection__.go
@@ -0,0 +1,6 @@
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
+
+package agent
+
+var GoUnusedProtection__ int;
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/constants.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent-consts.go
index aa9857bb8..a0df50779 100644
--- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/constants.go
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent-consts.go
@@ -1,19 +1,23 @@
-// Autogenerated by Thrift Compiler (0.9.3)
-// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
package agent
-import (
+import(
"bytes"
+ "context"
"fmt"
+ "time"
"github.com/uber/jaeger-client-go/thrift"
"github.com/uber/jaeger-client-go/thrift-gen/jaeger"
"github.com/uber/jaeger-client-go/thrift-gen/zipkincore"
+
)
// (needed to ensure safety because of naive import list construction.)
var _ = thrift.ZERO
var _ = fmt.Printf
+var _ = context.Background
+var _ = time.Now
var _ = bytes.Equal
var _ = jaeger.GoUnusedProtection__
@@ -21,3 +25,4 @@ var _ = zipkincore.GoUnusedProtection__
func init() {
}
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent.go
index e48811c50..6472e84e6 100644
--- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent.go
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent.go
@@ -1,411 +1,396 @@
-// Autogenerated by Thrift Compiler (0.9.3)
-// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
package agent
-import (
+import(
"bytes"
+ "context"
"fmt"
+ "time"
"github.com/uber/jaeger-client-go/thrift"
"github.com/uber/jaeger-client-go/thrift-gen/jaeger"
"github.com/uber/jaeger-client-go/thrift-gen/zipkincore"
+
)
// (needed to ensure safety because of naive import list construction.)
var _ = thrift.ZERO
var _ = fmt.Printf
+var _ = context.Background
+var _ = time.Now
var _ = bytes.Equal
var _ = jaeger.GoUnusedProtection__
var _ = zipkincore.GoUnusedProtection__
-
type Agent interface {
- // Parameters:
- // - Spans
- EmitZipkinBatch(spans []*zipkincore.Span) (err error)
- // Parameters:
- // - Batch
- EmitBatch(batch *jaeger.Batch) (err error)
+ // Parameters:
+ // - Spans
+ EmitZipkinBatch(ctx context.Context, spans []*zipkincore.Span) (_err error)
+ // Parameters:
+ // - Batch
+ EmitBatch(ctx context.Context, batch *jaeger.Batch) (_err error)
}
type AgentClient struct {
- Transport thrift.TTransport
- ProtocolFactory thrift.TProtocolFactory
- InputProtocol thrift.TProtocol
- OutputProtocol thrift.TProtocol
- SeqId int32
+ c thrift.TClient
+ meta thrift.ResponseMeta
}
func NewAgentClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *AgentClient {
- return &AgentClient{Transport: t,
- ProtocolFactory: f,
- InputProtocol: f.GetProtocol(t),
- OutputProtocol: f.GetProtocol(t),
- SeqId: 0,
- }
+ return &AgentClient{
+ c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)),
+ }
}
func NewAgentClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *AgentClient {
- return &AgentClient{Transport: t,
- ProtocolFactory: nil,
- InputProtocol: iprot,
- OutputProtocol: oprot,
- SeqId: 0,
- }
+ return &AgentClient{
+ c: thrift.NewTStandardClient(iprot, oprot),
+ }
+}
+
+func NewAgentClient(c thrift.TClient) *AgentClient {
+ return &AgentClient{
+ c: c,
+ }
+}
+
+func (p *AgentClient) Client_() thrift.TClient {
+ return p.c
+}
+
+func (p *AgentClient) LastResponseMeta_() thrift.ResponseMeta {
+ return p.meta
+}
+
+func (p *AgentClient) SetLastResponseMeta_(meta thrift.ResponseMeta) {
+ p.meta = meta
}
// Parameters:
// - Spans
-func (p *AgentClient) EmitZipkinBatch(spans []*zipkincore.Span) (err error) {
- if err = p.sendEmitZipkinBatch(spans); err != nil {
- return
- }
- return
-}
-
-func (p *AgentClient) sendEmitZipkinBatch(spans []*zipkincore.Span) (err error) {
- oprot := p.OutputProtocol
- if oprot == nil {
- oprot = p.ProtocolFactory.GetProtocol(p.Transport)
- p.OutputProtocol = oprot
- }
- p.SeqId++
- if err = oprot.WriteMessageBegin("emitZipkinBatch", thrift.ONEWAY, p.SeqId); err != nil {
- return
- }
- args := AgentEmitZipkinBatchArgs{
- Spans: spans,
- }
- if err = args.Write(oprot); err != nil {
- return
- }
- if err = oprot.WriteMessageEnd(); err != nil {
- return
- }
- return oprot.Flush()
+func (p *AgentClient) EmitZipkinBatch(ctx context.Context, spans []*zipkincore.Span) (_err error) {
+ var _args0 AgentEmitZipkinBatchArgs
+ _args0.Spans = spans
+ p.SetLastResponseMeta_(thrift.ResponseMeta{})
+ if _, err := p.Client_().Call(ctx, "emitZipkinBatch", &_args0, nil); err != nil {
+ return err
+ }
+ return nil
}
// Parameters:
// - Batch
-func (p *AgentClient) EmitBatch(batch *jaeger.Batch) (err error) {
- if err = p.sendEmitBatch(batch); err != nil {
- return
- }
- return
-}
-
-func (p *AgentClient) sendEmitBatch(batch *jaeger.Batch) (err error) {
- oprot := p.OutputProtocol
- if oprot == nil {
- oprot = p.ProtocolFactory.GetProtocol(p.Transport)
- p.OutputProtocol = oprot
- }
- p.SeqId++
- if err = oprot.WriteMessageBegin("emitBatch", thrift.ONEWAY, p.SeqId); err != nil {
- return
- }
- args := AgentEmitBatchArgs{
- Batch: batch,
- }
- if err = args.Write(oprot); err != nil {
- return
- }
- if err = oprot.WriteMessageEnd(); err != nil {
- return
- }
- return oprot.Flush()
+func (p *AgentClient) EmitBatch(ctx context.Context, batch *jaeger.Batch) (_err error) {
+ var _args1 AgentEmitBatchArgs
+ _args1.Batch = batch
+ p.SetLastResponseMeta_(thrift.ResponseMeta{})
+ if _, err := p.Client_().Call(ctx, "emitBatch", &_args1, nil); err != nil {
+ return err
+ }
+ return nil
}
type AgentProcessor struct {
- processorMap map[string]thrift.TProcessorFunction
- handler Agent
+ processorMap map[string]thrift.TProcessorFunction
+ handler Agent
}
func (p *AgentProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
- p.processorMap[key] = processor
+ p.processorMap[key] = processor
}
func (p *AgentProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
- processor, ok = p.processorMap[key]
- return processor, ok
+ processor, ok = p.processorMap[key]
+ return processor, ok
}
func (p *AgentProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
- return p.processorMap
+ return p.processorMap
}
func NewAgentProcessor(handler Agent) *AgentProcessor {
- self0 := &AgentProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)}
- self0.processorMap["emitZipkinBatch"] = &agentProcessorEmitZipkinBatch{handler: handler}
- self0.processorMap["emitBatch"] = &agentProcessorEmitBatch{handler: handler}
- return self0
+ self2 := &AgentProcessor{handler:handler, processorMap:make(map[string]thrift.TProcessorFunction)}
+ self2.processorMap["emitZipkinBatch"] = &agentProcessorEmitZipkinBatch{handler:handler}
+ self2.processorMap["emitBatch"] = &agentProcessorEmitBatch{handler:handler}
+return self2
}
-func (p *AgentProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- name, _, seqId, err := iprot.ReadMessageBegin()
- if err != nil {
- return false, err
- }
- if processor, ok := p.GetProcessorFunction(name); ok {
- return processor.Process(seqId, iprot, oprot)
- }
- iprot.Skip(thrift.STRUCT)
- iprot.ReadMessageEnd()
- x1 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name)
- oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId)
- x1.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush()
- return false, x1
+func (p *AgentProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ name, _, seqId, err2 := iprot.ReadMessageBegin(ctx)
+ if err2 != nil { return false, thrift.WrapTException(err2) }
+ if processor, ok := p.GetProcessorFunction(name); ok {
+ return processor.Process(ctx, seqId, iprot, oprot)
+ }
+ iprot.Skip(ctx, thrift.STRUCT)
+ iprot.ReadMessageEnd(ctx)
+ x3 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function " + name)
+ oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId)
+ x3.Write(ctx, oprot)
+ oprot.WriteMessageEnd(ctx)
+ oprot.Flush(ctx)
+ return false, x3
}
type agentProcessorEmitZipkinBatch struct {
- handler Agent
+ handler Agent
}
-func (p *agentProcessorEmitZipkinBatch) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := AgentEmitZipkinBatchArgs{}
- if err = args.Read(iprot); err != nil {
- iprot.ReadMessageEnd()
- return false, err
- }
+func (p *agentProcessorEmitZipkinBatch) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := AgentEmitZipkinBatchArgs{}
+ var err2 error
+ if err2 = args.Read(ctx, iprot); err2 != nil {
+ iprot.ReadMessageEnd(ctx)
+ return false, thrift.WrapTException(err2)
+ }
+ iprot.ReadMessageEnd(ctx)
+
+ tickerCancel := func() {}
+ _ = tickerCancel
- iprot.ReadMessageEnd()
- var err2 error
- if err2 = p.handler.EmitZipkinBatch(args.Spans); err2 != nil {
- return true, err2
- }
- return true, nil
+ if err2 = p.handler.EmitZipkinBatch(ctx, args.Spans); err2 != nil {
+ tickerCancel()
+ return true, thrift.WrapTException(err2)
+ }
+ tickerCancel()
+ return true, nil
}
type agentProcessorEmitBatch struct {
- handler Agent
+ handler Agent
}
-func (p *agentProcessorEmitBatch) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := AgentEmitBatchArgs{}
- if err = args.Read(iprot); err != nil {
- iprot.ReadMessageEnd()
- return false, err
- }
+func (p *agentProcessorEmitBatch) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := AgentEmitBatchArgs{}
+ var err2 error
+ if err2 = args.Read(ctx, iprot); err2 != nil {
+ iprot.ReadMessageEnd(ctx)
+ return false, thrift.WrapTException(err2)
+ }
+ iprot.ReadMessageEnd(ctx)
- iprot.ReadMessageEnd()
- var err2 error
- if err2 = p.handler.EmitBatch(args.Batch); err2 != nil {
- return true, err2
- }
- return true, nil
+ tickerCancel := func() {}
+ _ = tickerCancel
+
+ if err2 = p.handler.EmitBatch(ctx, args.Batch); err2 != nil {
+ tickerCancel()
+ return true, thrift.WrapTException(err2)
+ }
+ tickerCancel()
+ return true, nil
}
+
// HELPER FUNCTIONS AND STRUCTURES
// Attributes:
// - Spans
type AgentEmitZipkinBatchArgs struct {
- Spans []*zipkincore.Span `thrift:"spans,1" json:"spans"`
+ Spans []*zipkincore.Span `thrift:"spans,1" db:"spans" json:"spans"`
}
func NewAgentEmitZipkinBatchArgs() *AgentEmitZipkinBatchArgs {
- return &AgentEmitZipkinBatchArgs{}
+ return &AgentEmitZipkinBatchArgs{}
}
+
func (p *AgentEmitZipkinBatchArgs) GetSpans() []*zipkincore.Span {
- return p.Spans
-}
-func (p *AgentEmitZipkinBatchArgs) Read(iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP {
- break
- }
- switch fieldId {
- case 1:
- if err := p.readField1(iprot); err != nil {
- return err
- }
- default:
- if err := iprot.Skip(fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- return nil
-}
-
-func (p *AgentEmitZipkinBatchArgs) readField1(iprot thrift.TProtocol) error {
- _, size, err := iprot.ReadListBegin()
- if err != nil {
- return thrift.PrependError("error reading list begin: ", err)
- }
- tSlice := make([]*zipkincore.Span, 0, size)
- p.Spans = tSlice
- for i := 0; i < size; i++ {
- _elem2 := &zipkincore.Span{}
- if err := _elem2.Read(iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem2), err)
- }
- p.Spans = append(p.Spans, _elem2)
- }
- if err := iprot.ReadListEnd(); err != nil {
- return thrift.PrependError("error reading list end: ", err)
- }
- return nil
-}
-
-func (p *AgentEmitZipkinBatchArgs) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("emitZipkinBatch_args"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
- }
- if err := p.writeField1(oprot); err != nil {
- return err
- }
- if err := oprot.WriteFieldStop(); err != nil {
- return thrift.PrependError("write field stop error: ", err)
- }
- if err := oprot.WriteStructEnd(); err != nil {
- return thrift.PrependError("write struct stop error: ", err)
- }
- return nil
-}
-
-func (p *AgentEmitZipkinBatchArgs) writeField1(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("spans", thrift.LIST, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:spans: ", p), err)
- }
- if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Spans)); err != nil {
- return thrift.PrependError("error writing list begin: ", err)
- }
- for _, v := range p.Spans {
- if err := v.Write(oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
- }
- }
- if err := oprot.WriteListEnd(); err != nil {
- return thrift.PrependError("error writing list end: ", err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:spans: ", p), err)
- }
- return err
+ return p.Spans
+}
+func (p *AgentEmitZipkinBatchArgs) Read(ctx context.Context, iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP { break; }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.LIST {
+ if err := p.ReadField1(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ default:
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(ctx); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ return nil
+}
+
+func (p *AgentEmitZipkinBatchArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin(ctx)
+ if err != nil {
+ return thrift.PrependError("error reading list begin: ", err)
+ }
+ tSlice := make([]*zipkincore.Span, 0, size)
+ p.Spans = tSlice
+ for i := 0; i < size; i ++ {
+ _elem4 := &zipkincore.Span{}
+ if err := _elem4.Read(ctx, iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err)
+ }
+ p.Spans = append(p.Spans, _elem4)
+ }
+ if err := iprot.ReadListEnd(ctx); err != nil {
+ return thrift.PrependError("error reading list end: ", err)
+ }
+ return nil
+}
+
+func (p *AgentEmitZipkinBatchArgs) Write(ctx context.Context, oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin(ctx, "emitZipkinBatch_args"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+ if p != nil {
+ if err := p.writeField1(ctx, oprot); err != nil { return err }
+ }
+ if err := oprot.WriteFieldStop(ctx); err != nil {
+ return thrift.PrependError("write field stop error: ", err) }
+ if err := oprot.WriteStructEnd(ctx); err != nil {
+ return thrift.PrependError("write struct stop error: ", err) }
+ return nil
+}
+
+func (p *AgentEmitZipkinBatchArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "spans", thrift.LIST, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:spans: ", p), err) }
+ if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Spans)); err != nil {
+ return thrift.PrependError("error writing list begin: ", err)
+ }
+ for _, v := range p.Spans {
+ if err := v.Write(ctx, oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+ }
+ }
+ if err := oprot.WriteListEnd(ctx); err != nil {
+ return thrift.PrependError("error writing list end: ", err)
+ }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:spans: ", p), err) }
+ return err
}
func (p *AgentEmitZipkinBatchArgs) String() string {
- if p == nil {
- return "<nil>"
- }
- return fmt.Sprintf("AgentEmitZipkinBatchArgs(%+v)", *p)
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("AgentEmitZipkinBatchArgs(%+v)", *p)
}
// Attributes:
// - Batch
type AgentEmitBatchArgs struct {
- Batch *jaeger.Batch `thrift:"batch,1" json:"batch"`
+ Batch *jaeger.Batch `thrift:"batch,1" db:"batch" json:"batch"`
}
func NewAgentEmitBatchArgs() *AgentEmitBatchArgs {
- return &AgentEmitBatchArgs{}
+ return &AgentEmitBatchArgs{}
}
var AgentEmitBatchArgs_Batch_DEFAULT *jaeger.Batch
-
func (p *AgentEmitBatchArgs) GetBatch() *jaeger.Batch {
- if !p.IsSetBatch() {
- return AgentEmitBatchArgs_Batch_DEFAULT
- }
- return p.Batch
+ if !p.IsSetBatch() {
+ return AgentEmitBatchArgs_Batch_DEFAULT
+ }
+return p.Batch
}
func (p *AgentEmitBatchArgs) IsSetBatch() bool {
- return p.Batch != nil
-}
-
-func (p *AgentEmitBatchArgs) Read(iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP {
- break
- }
- switch fieldId {
- case 1:
- if err := p.readField1(iprot); err != nil {
- return err
- }
- default:
- if err := iprot.Skip(fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- return nil
-}
-
-func (p *AgentEmitBatchArgs) readField1(iprot thrift.TProtocol) error {
- p.Batch = &jaeger.Batch{}
- if err := p.Batch.Read(iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Batch), err)
- }
- return nil
-}
-
-func (p *AgentEmitBatchArgs) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("emitBatch_args"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
- }
- if err := p.writeField1(oprot); err != nil {
- return err
- }
- if err := oprot.WriteFieldStop(); err != nil {
- return thrift.PrependError("write field stop error: ", err)
- }
- if err := oprot.WriteStructEnd(); err != nil {
- return thrift.PrependError("write struct stop error: ", err)
- }
- return nil
-}
-
-func (p *AgentEmitBatchArgs) writeField1(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("batch", thrift.STRUCT, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batch: ", p), err)
- }
- if err := p.Batch.Write(oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Batch), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batch: ", p), err)
- }
- return err
+ return p.Batch != nil
+}
+
+func (p *AgentEmitBatchArgs) Read(ctx context.Context, iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP { break; }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.STRUCT {
+ if err := p.ReadField1(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ default:
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(ctx); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ return nil
+}
+
+func (p *AgentEmitBatchArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+ p.Batch = &jaeger.Batch{}
+ if err := p.Batch.Read(ctx, iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Batch), err)
+ }
+ return nil
+}
+
+func (p *AgentEmitBatchArgs) Write(ctx context.Context, oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin(ctx, "emitBatch_args"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+ if p != nil {
+ if err := p.writeField1(ctx, oprot); err != nil { return err }
+ }
+ if err := oprot.WriteFieldStop(ctx); err != nil {
+ return thrift.PrependError("write field stop error: ", err) }
+ if err := oprot.WriteStructEnd(ctx); err != nil {
+ return thrift.PrependError("write struct stop error: ", err) }
+ return nil
+}
+
+func (p *AgentEmitBatchArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "batch", thrift.STRUCT, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batch: ", p), err) }
+ if err := p.Batch.Write(ctx, oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Batch), err)
+ }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batch: ", p), err) }
+ return err
}
func (p *AgentEmitBatchArgs) String() string {
- if p == nil {
- return "<nil>"
- }
- return fmt.Sprintf("AgentEmitBatchArgs(%+v)", *p)
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("AgentEmitBatchArgs(%+v)", *p)
}
+
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/ttypes.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/ttypes.go
deleted file mode 100644
index 9c28f11c1..000000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/ttypes.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// Autogenerated by Thrift Compiler (0.9.3)
-// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-
-package agent
-
-import (
- "bytes"
- "fmt"
- "github.com/uber/jaeger-client-go/thrift"
- "github.com/uber/jaeger-client-go/thrift-gen/jaeger"
- "github.com/uber/jaeger-client-go/thrift-gen/zipkincore"
-)
-
-// (needed to ensure safety because of naive import list construction.)
-var _ = thrift.ZERO
-var _ = fmt.Printf
-var _ = bytes.Equal
-
-var _ = jaeger.GoUnusedProtection__
-var _ = zipkincore.GoUnusedProtection__
-var GoUnusedProtection__ int
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/GoUnusedProtection__.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/GoUnusedProtection__.go
new file mode 100644
index 000000000..fe45a9f9a
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/GoUnusedProtection__.go
@@ -0,0 +1,6 @@
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
+
+package jaeger
+
+var GoUnusedProtection__ int;
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/agent.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/agent.go
deleted file mode 100644
index b32c37dd2..000000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/agent.go
+++ /dev/null
@@ -1,242 +0,0 @@
-// Autogenerated by Thrift Compiler (0.9.3)
-// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-
-package jaeger
-
-import (
- "bytes"
- "fmt"
- "github.com/uber/jaeger-client-go/thrift"
-)
-
-// (needed to ensure safety because of naive import list construction.)
-var _ = thrift.ZERO
-var _ = fmt.Printf
-var _ = bytes.Equal
-
-type Agent interface {
- // Parameters:
- // - Batch
- EmitBatch(batch *Batch) (err error)
-}
-
-type AgentClient struct {
- Transport thrift.TTransport
- ProtocolFactory thrift.TProtocolFactory
- InputProtocol thrift.TProtocol
- OutputProtocol thrift.TProtocol
- SeqId int32
-}
-
-func NewAgentClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *AgentClient {
- return &AgentClient{Transport: t,
- ProtocolFactory: f,
- InputProtocol: f.GetProtocol(t),
- OutputProtocol: f.GetProtocol(t),
- SeqId: 0,
- }
-}
-
-func NewAgentClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *AgentClient {
- return &AgentClient{Transport: t,
- ProtocolFactory: nil,
- InputProtocol: iprot,
- OutputProtocol: oprot,
- SeqId: 0,
- }
-}
-
-// Parameters:
-// - Batch
-func (p *AgentClient) EmitBatch(batch *Batch) (err error) {
- if err = p.sendEmitBatch(batch); err != nil {
- return
- }
- return
-}
-
-func (p *AgentClient) sendEmitBatch(batch *Batch) (err error) {
- oprot := p.OutputProtocol
- if oprot == nil {
- oprot = p.ProtocolFactory.GetProtocol(p.Transport)
- p.OutputProtocol = oprot
- }
- p.SeqId++
- if err = oprot.WriteMessageBegin("emitBatch", thrift.ONEWAY, p.SeqId); err != nil {
- return
- }
- args := AgentEmitBatchArgs{
- Batch: batch,
- }
- if err = args.Write(oprot); err != nil {
- return
- }
- if err = oprot.WriteMessageEnd(); err != nil {
- return
- }
- return oprot.Flush()
-}
-
-type AgentProcessor struct {
- processorMap map[string]thrift.TProcessorFunction
- handler Agent
-}
-
-func (p *AgentProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
- p.processorMap[key] = processor
-}
-
-func (p *AgentProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
- processor, ok = p.processorMap[key]
- return processor, ok
-}
-
-func (p *AgentProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
- return p.processorMap
-}
-
-func NewAgentProcessor(handler Agent) *AgentProcessor {
-
- self6 := &AgentProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)}
- self6.processorMap["emitBatch"] = &agentProcessorEmitBatch{handler: handler}
- return self6
-}
-
-func (p *AgentProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- name, _, seqId, err := iprot.ReadMessageBegin()
- if err != nil {
- return false, err
- }
- if processor, ok := p.GetProcessorFunction(name); ok {
- return processor.Process(seqId, iprot, oprot)
- }
- iprot.Skip(thrift.STRUCT)
- iprot.ReadMessageEnd()
- x7 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name)
- oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId)
- x7.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush()
- return false, x7
-
-}
-
-type agentProcessorEmitBatch struct {
- handler Agent
-}
-
-func (p *agentProcessorEmitBatch) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := AgentEmitBatchArgs{}
- if err = args.Read(iprot); err != nil {
- iprot.ReadMessageEnd()
- return false, err
- }
-
- iprot.ReadMessageEnd()
- var err2 error
- if err2 = p.handler.EmitBatch(args.Batch); err2 != nil {
- return true, err2
- }
- return true, nil
-}
-
-// HELPER FUNCTIONS AND STRUCTURES
-
-// Attributes:
-// - Batch
-type AgentEmitBatchArgs struct {
- Batch *Batch `thrift:"batch,1" json:"batch"`
-}
-
-func NewAgentEmitBatchArgs() *AgentEmitBatchArgs {
- return &AgentEmitBatchArgs{}
-}
-
-var AgentEmitBatchArgs_Batch_DEFAULT *Batch
-
-func (p *AgentEmitBatchArgs) GetBatch() *Batch {
- if !p.IsSetBatch() {
- return AgentEmitBatchArgs_Batch_DEFAULT
- }
- return p.Batch
-}
-func (p *AgentEmitBatchArgs) IsSetBatch() bool {
- return p.Batch != nil
-}
-
-func (p *AgentEmitBatchArgs) Read(iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP {
- break
- }
- switch fieldId {
- case 1:
- if err := p.readField1(iprot); err != nil {
- return err
- }
- default:
- if err := iprot.Skip(fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- return nil
-}
-
-func (p *AgentEmitBatchArgs) readField1(iprot thrift.TProtocol) error {
- p.Batch = &Batch{}
- if err := p.Batch.Read(iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Batch), err)
- }
- return nil
-}
-
-func (p *AgentEmitBatchArgs) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("emitBatch_args"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
- }
- if err := p.writeField1(oprot); err != nil {
- return err
- }
- if err := oprot.WriteFieldStop(); err != nil {
- return thrift.PrependError("write field stop error: ", err)
- }
- if err := oprot.WriteStructEnd(); err != nil {
- return thrift.PrependError("write struct stop error: ", err)
- }
- return nil
-}
-
-func (p *AgentEmitBatchArgs) writeField1(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("batch", thrift.STRUCT, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batch: ", p), err)
- }
- if err := p.Batch.Write(oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Batch), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batch: ", p), err)
- }
- return err
-}
-
-func (p *AgentEmitBatchArgs) String() string {
- if p == nil {
- return "<nil>"
- }
- return fmt.Sprintf("AgentEmitBatchArgs(%+v)", *p)
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/constants.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/jaeger-consts.go
index 621b8b1c2..b6ce85570 100644
--- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/constants.go
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/jaeger-consts.go
@@ -1,18 +1,23 @@
-// Autogenerated by Thrift Compiler (0.9.3)
-// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
package jaeger
-import (
+import(
"bytes"
+ "context"
"fmt"
+ "time"
"github.com/uber/jaeger-client-go/thrift"
)
// (needed to ensure safety because of naive import list construction.)
var _ = thrift.ZERO
var _ = fmt.Printf
+var _ = context.Background
+var _ = time.Now
var _ = bytes.Equal
+
func init() {
}
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/jaeger.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/jaeger.go
new file mode 100644
index 000000000..d55cca024
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/jaeger.go
@@ -0,0 +1,2698 @@
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
+
+package jaeger
+
+import(
+ "bytes"
+ "context"
+ "database/sql/driver"
+ "errors"
+ "fmt"
+ "time"
+ "github.com/uber/jaeger-client-go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = context.Background
+var _ = time.Now
+var _ = bytes.Equal
+
+type TagType int64
+const (
+ TagType_STRING TagType = 0
+ TagType_DOUBLE TagType = 1
+ TagType_BOOL TagType = 2
+ TagType_LONG TagType = 3
+ TagType_BINARY TagType = 4
+)
+
+func (p TagType) String() string {
+ switch p {
+ case TagType_STRING: return "STRING"
+ case TagType_DOUBLE: return "DOUBLE"
+ case TagType_BOOL: return "BOOL"
+ case TagType_LONG: return "LONG"
+ case TagType_BINARY: return "BINARY"
+ }
+ return "<UNSET>"
+}
+
+func TagTypeFromString(s string) (TagType, error) {
+ switch s {
+ case "STRING": return TagType_STRING, nil
+ case "DOUBLE": return TagType_DOUBLE, nil
+ case "BOOL": return TagType_BOOL, nil
+ case "LONG": return TagType_LONG, nil
+ case "BINARY": return TagType_BINARY, nil
+ }
+ return TagType(0), fmt.Errorf("not a valid TagType string")
+}
+
+
+func TagTypePtr(v TagType) *TagType { return &v }
+
+func (p TagType) MarshalText() ([]byte, error) {
+return []byte(p.String()), nil
+}
+
+func (p *TagType) UnmarshalText(text []byte) error {
+q, err := TagTypeFromString(string(text))
+if (err != nil) {
+return err
+}
+*p = q
+return nil
+}
+
+func (p *TagType) Scan(value interface{}) error {
+v, ok := value.(int64)
+if !ok {
+return errors.New("Scan value is not int64")
+}
+*p = TagType(v)
+return nil
+}
+
+func (p * TagType) Value() (driver.Value, error) {
+ if p == nil {
+ return nil, nil
+ }
+return int64(*p), nil
+}
+type SpanRefType int64
+const (
+ SpanRefType_CHILD_OF SpanRefType = 0
+ SpanRefType_FOLLOWS_FROM SpanRefType = 1
+)
+
+func (p SpanRefType) String() string {
+ switch p {
+ case SpanRefType_CHILD_OF: return "CHILD_OF"
+ case SpanRefType_FOLLOWS_FROM: return "FOLLOWS_FROM"
+ }
+ return "<UNSET>"
+}
+
+func SpanRefTypeFromString(s string) (SpanRefType, error) {
+ switch s {
+ case "CHILD_OF": return SpanRefType_CHILD_OF, nil
+ case "FOLLOWS_FROM": return SpanRefType_FOLLOWS_FROM, nil
+ }
+ return SpanRefType(0), fmt.Errorf("not a valid SpanRefType string")
+}
+
+
+func SpanRefTypePtr(v SpanRefType) *SpanRefType { return &v }
+
+func (p SpanRefType) MarshalText() ([]byte, error) {
+return []byte(p.String()), nil
+}
+
+func (p *SpanRefType) UnmarshalText(text []byte) error {
+q, err := SpanRefTypeFromString(string(text))
+if (err != nil) {
+return err
+}
+*p = q
+return nil
+}
+
+func (p *SpanRefType) Scan(value interface{}) error {
+v, ok := value.(int64)
+if !ok {
+return errors.New("Scan value is not int64")
+}
+*p = SpanRefType(v)
+return nil
+}
+
+func (p * SpanRefType) Value() (driver.Value, error) {
+ if p == nil {
+ return nil, nil
+ }
+return int64(*p), nil
+}
+// Attributes:
+// - Key
+// - VType
+// - VStr
+// - VDouble
+// - VBool
+// - VLong
+// - VBinary
+type Tag struct {
+ Key string `thrift:"key,1,required" db:"key" json:"key"`
+ VType TagType `thrift:"vType,2,required" db:"vType" json:"vType"`
+ VStr *string `thrift:"vStr,3" db:"vStr" json:"vStr,omitempty"`
+ VDouble *float64 `thrift:"vDouble,4" db:"vDouble" json:"vDouble,omitempty"`
+ VBool *bool `thrift:"vBool,5" db:"vBool" json:"vBool,omitempty"`
+ VLong *int64 `thrift:"vLong,6" db:"vLong" json:"vLong,omitempty"`
+ VBinary []byte `thrift:"vBinary,7" db:"vBinary" json:"vBinary,omitempty"`
+}
+
+func NewTag() *Tag {
+ return &Tag{}
+}
+
+
+func (p *Tag) GetKey() string {
+ return p.Key
+}
+
+func (p *Tag) GetVType() TagType {
+ return p.VType
+}
+var Tag_VStr_DEFAULT string
+func (p *Tag) GetVStr() string {
+ if !p.IsSetVStr() {
+ return Tag_VStr_DEFAULT
+ }
+return *p.VStr
+}
+var Tag_VDouble_DEFAULT float64
+func (p *Tag) GetVDouble() float64 {
+ if !p.IsSetVDouble() {
+ return Tag_VDouble_DEFAULT
+ }
+return *p.VDouble
+}
+var Tag_VBool_DEFAULT bool
+func (p *Tag) GetVBool() bool {
+ if !p.IsSetVBool() {
+ return Tag_VBool_DEFAULT
+ }
+return *p.VBool
+}
+var Tag_VLong_DEFAULT int64
+func (p *Tag) GetVLong() int64 {
+ if !p.IsSetVLong() {
+ return Tag_VLong_DEFAULT
+ }
+return *p.VLong
+}
+var Tag_VBinary_DEFAULT []byte
+
+func (p *Tag) GetVBinary() []byte {
+ return p.VBinary
+}
+func (p *Tag) IsSetVStr() bool {
+ return p.VStr != nil
+}
+
+func (p *Tag) IsSetVDouble() bool {
+ return p.VDouble != nil
+}
+
+func (p *Tag) IsSetVBool() bool {
+ return p.VBool != nil
+}
+
+func (p *Tag) IsSetVLong() bool {
+ return p.VLong != nil
+}
+
+func (p *Tag) IsSetVBinary() bool {
+ return p.VBinary != nil
+}
+
+func (p *Tag) Read(ctx context.Context, iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ var issetKey bool = false;
+ var issetVType bool = false;
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP { break; }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.STRING {
+ if err := p.ReadField1(ctx, iprot); err != nil {
+ return err
+ }
+ issetKey = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 2:
+ if fieldTypeId == thrift.I32 {
+ if err := p.ReadField2(ctx, iprot); err != nil {
+ return err
+ }
+ issetVType = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 3:
+ if fieldTypeId == thrift.STRING {
+ if err := p.ReadField3(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 4:
+ if fieldTypeId == thrift.DOUBLE {
+ if err := p.ReadField4(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 5:
+ if fieldTypeId == thrift.BOOL {
+ if err := p.ReadField5(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 6:
+ if fieldTypeId == thrift.I64 {
+ if err := p.ReadField6(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 7:
+ if fieldTypeId == thrift.STRING {
+ if err := p.ReadField7(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ default:
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(ctx); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ if !issetKey{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Key is not set"));
+ }
+ if !issetVType{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field VType is not set"));
+ }
+ return nil
+}
+
+func (p *Tag) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadString(ctx); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+} else {
+ p.Key = v
+}
+ return nil
+}
+
+func (p *Tag) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI32(ctx); err != nil {
+ return thrift.PrependError("error reading field 2: ", err)
+} else {
+ temp := TagType(v)
+ p.VType = temp
+}
+ return nil
+}
+
+func (p *Tag) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadString(ctx); err != nil {
+ return thrift.PrependError("error reading field 3: ", err)
+} else {
+ p.VStr = &v
+}
+ return nil
+}
+
+func (p *Tag) ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadDouble(ctx); err != nil {
+ return thrift.PrependError("error reading field 4: ", err)
+} else {
+ p.VDouble = &v
+}
+ return nil
+}
+
+func (p *Tag) ReadField5(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadBool(ctx); err != nil {
+ return thrift.PrependError("error reading field 5: ", err)
+} else {
+ p.VBool = &v
+}
+ return nil
+}
+
+func (p *Tag) ReadField6(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(ctx); err != nil {
+ return thrift.PrependError("error reading field 6: ", err)
+} else {
+ p.VLong = &v
+}
+ return nil
+}
+
+func (p *Tag) ReadField7(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadBinary(ctx); err != nil {
+ return thrift.PrependError("error reading field 7: ", err)
+} else {
+ p.VBinary = v
+}
+ return nil
+}
+
+func (p *Tag) Write(ctx context.Context, oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin(ctx, "Tag"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+ if p != nil {
+ if err := p.writeField1(ctx, oprot); err != nil { return err }
+ if err := p.writeField2(ctx, oprot); err != nil { return err }
+ if err := p.writeField3(ctx, oprot); err != nil { return err }
+ if err := p.writeField4(ctx, oprot); err != nil { return err }
+ if err := p.writeField5(ctx, oprot); err != nil { return err }
+ if err := p.writeField6(ctx, oprot); err != nil { return err }
+ if err := p.writeField7(ctx, oprot); err != nil { return err }
+ }
+ if err := oprot.WriteFieldStop(ctx); err != nil {
+ return thrift.PrependError("write field stop error: ", err) }
+ if err := oprot.WriteStructEnd(ctx); err != nil {
+ return thrift.PrependError("write struct stop error: ", err) }
+ return nil
+}
+
+func (p *Tag) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "key", thrift.STRING, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) }
+ if err := oprot.WriteString(ctx, string(p.Key)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) }
+ return err
+}
+
+func (p *Tag) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "vType", thrift.I32, 2); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:vType: ", p), err) }
+ if err := oprot.WriteI32(ctx, int32(p.VType)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.vType (2) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 2:vType: ", p), err) }
+ return err
+}
+
+func (p *Tag) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if p.IsSetVStr() {
+ if err := oprot.WriteFieldBegin(ctx, "vStr", thrift.STRING, 3); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:vStr: ", p), err) }
+ if err := oprot.WriteString(ctx, string(*p.VStr)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.vStr (3) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 3:vStr: ", p), err) }
+ }
+ return err
+}
+
+func (p *Tag) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if p.IsSetVDouble() {
+ if err := oprot.WriteFieldBegin(ctx, "vDouble", thrift.DOUBLE, 4); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:vDouble: ", p), err) }
+ if err := oprot.WriteDouble(ctx, float64(*p.VDouble)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.vDouble (4) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 4:vDouble: ", p), err) }
+ }
+ return err
+}
+
+func (p *Tag) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if p.IsSetVBool() {
+ if err := oprot.WriteFieldBegin(ctx, "vBool", thrift.BOOL, 5); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:vBool: ", p), err) }
+ if err := oprot.WriteBool(ctx, bool(*p.VBool)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.vBool (5) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 5:vBool: ", p), err) }
+ }
+ return err
+}
+
+func (p *Tag) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if p.IsSetVLong() {
+ if err := oprot.WriteFieldBegin(ctx, "vLong", thrift.I64, 6); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:vLong: ", p), err) }
+ if err := oprot.WriteI64(ctx, int64(*p.VLong)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.vLong (6) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 6:vLong: ", p), err) }
+ }
+ return err
+}
+
+func (p *Tag) writeField7(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if p.IsSetVBinary() {
+ if err := oprot.WriteFieldBegin(ctx, "vBinary", thrift.STRING, 7); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:vBinary: ", p), err) }
+ if err := oprot.WriteBinary(ctx, p.VBinary); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.vBinary (7) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 7:vBinary: ", p), err) }
+ }
+ return err
+}
+
+func (p *Tag) Equals(other *Tag) bool {
+ if p == other {
+ return true
+ } else if p == nil || other == nil {
+ return false
+ }
+ if p.Key != other.Key { return false }
+ if p.VType != other.VType { return false }
+ if p.VStr != other.VStr {
+ if p.VStr == nil || other.VStr == nil {
+ return false
+ }
+ if (*p.VStr) != (*other.VStr) { return false }
+ }
+ if p.VDouble != other.VDouble {
+ if p.VDouble == nil || other.VDouble == nil {
+ return false
+ }
+ if (*p.VDouble) != (*other.VDouble) { return false }
+ }
+ if p.VBool != other.VBool {
+ if p.VBool == nil || other.VBool == nil {
+ return false
+ }
+ if (*p.VBool) != (*other.VBool) { return false }
+ }
+ if p.VLong != other.VLong {
+ if p.VLong == nil || other.VLong == nil {
+ return false
+ }
+ if (*p.VLong) != (*other.VLong) { return false }
+ }
+ if bytes.Compare(p.VBinary, other.VBinary) != 0 { return false }
+ return true
+}
+
+func (p *Tag) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("Tag(%+v)", *p)
+}
+
+// Attributes:
+// - Timestamp
+// - Fields
+type Log struct {
+ Timestamp int64 `thrift:"timestamp,1,required" db:"timestamp" json:"timestamp"`
+ Fields []*Tag `thrift:"fields,2,required" db:"fields" json:"fields"`
+}
+
+func NewLog() *Log {
+ return &Log{}
+}
+
+
+func (p *Log) GetTimestamp() int64 {
+ return p.Timestamp
+}
+
+func (p *Log) GetFields() []*Tag {
+ return p.Fields
+}
+func (p *Log) Read(ctx context.Context, iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ var issetTimestamp bool = false;
+ var issetFields bool = false;
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP { break; }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.I64 {
+ if err := p.ReadField1(ctx, iprot); err != nil {
+ return err
+ }
+ issetTimestamp = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 2:
+ if fieldTypeId == thrift.LIST {
+ if err := p.ReadField2(ctx, iprot); err != nil {
+ return err
+ }
+ issetFields = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ default:
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(ctx); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ if !issetTimestamp{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Timestamp is not set"));
+ }
+ if !issetFields{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Fields is not set"));
+ }
+ return nil
+}
+
+func (p *Log) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(ctx); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+} else {
+ p.Timestamp = v
+}
+ return nil
+}
+
+func (p *Log) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin(ctx)
+ if err != nil {
+ return thrift.PrependError("error reading list begin: ", err)
+ }
+ tSlice := make([]*Tag, 0, size)
+ p.Fields = tSlice
+ for i := 0; i < size; i ++ {
+ _elem0 := &Tag{}
+ if err := _elem0.Read(ctx, iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err)
+ }
+ p.Fields = append(p.Fields, _elem0)
+ }
+ if err := iprot.ReadListEnd(ctx); err != nil {
+ return thrift.PrependError("error reading list end: ", err)
+ }
+ return nil
+}
+
+func (p *Log) Write(ctx context.Context, oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin(ctx, "Log"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+ if p != nil {
+ if err := p.writeField1(ctx, oprot); err != nil { return err }
+ if err := p.writeField2(ctx, oprot); err != nil { return err }
+ }
+ if err := oprot.WriteFieldStop(ctx); err != nil {
+ return thrift.PrependError("write field stop error: ", err) }
+ if err := oprot.WriteStructEnd(ctx); err != nil {
+ return thrift.PrependError("write struct stop error: ", err) }
+ return nil
+}
+
+func (p *Log) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "timestamp", thrift.I64, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:timestamp: ", p), err) }
+ if err := oprot.WriteI64(ctx, int64(p.Timestamp)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.timestamp (1) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:timestamp: ", p), err) }
+ return err
+}
+
+func (p *Log) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "fields", thrift.LIST, 2); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:fields: ", p), err) }
+ if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Fields)); err != nil {
+ return thrift.PrependError("error writing list begin: ", err)
+ }
+ for _, v := range p.Fields {
+ if err := v.Write(ctx, oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+ }
+ }
+ if err := oprot.WriteListEnd(ctx); err != nil {
+ return thrift.PrependError("error writing list end: ", err)
+ }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 2:fields: ", p), err) }
+ return err
+}
+
+func (p *Log) Equals(other *Log) bool {
+ if p == other {
+ return true
+ } else if p == nil || other == nil {
+ return false
+ }
+ if p.Timestamp != other.Timestamp { return false }
+ if len(p.Fields) != len(other.Fields) { return false }
+ for i, _tgt := range p.Fields {
+ _src1 := other.Fields[i]
+ if !_tgt.Equals(_src1) { return false }
+ }
+ return true
+}
+
+func (p *Log) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("Log(%+v)", *p)
+}
+
+// Attributes:
+// - RefType
+// - TraceIdLow
+// - TraceIdHigh
+// - SpanId
+type SpanRef struct {
+ RefType SpanRefType `thrift:"refType,1,required" db:"refType" json:"refType"`
+ TraceIdLow int64 `thrift:"traceIdLow,2,required" db:"traceIdLow" json:"traceIdLow"`
+ TraceIdHigh int64 `thrift:"traceIdHigh,3,required" db:"traceIdHigh" json:"traceIdHigh"`
+ SpanId int64 `thrift:"spanId,4,required" db:"spanId" json:"spanId"`
+}
+
+func NewSpanRef() *SpanRef {
+ return &SpanRef{}
+}
+
+
+func (p *SpanRef) GetRefType() SpanRefType {
+ return p.RefType
+}
+
+func (p *SpanRef) GetTraceIdLow() int64 {
+ return p.TraceIdLow
+}
+
+func (p *SpanRef) GetTraceIdHigh() int64 {
+ return p.TraceIdHigh
+}
+
+func (p *SpanRef) GetSpanId() int64 {
+ return p.SpanId
+}
+func (p *SpanRef) Read(ctx context.Context, iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ var issetRefType bool = false;
+ var issetTraceIdLow bool = false;
+ var issetTraceIdHigh bool = false;
+ var issetSpanId bool = false;
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP { break; }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.I32 {
+ if err := p.ReadField1(ctx, iprot); err != nil {
+ return err
+ }
+ issetRefType = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 2:
+ if fieldTypeId == thrift.I64 {
+ if err := p.ReadField2(ctx, iprot); err != nil {
+ return err
+ }
+ issetTraceIdLow = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 3:
+ if fieldTypeId == thrift.I64 {
+ if err := p.ReadField3(ctx, iprot); err != nil {
+ return err
+ }
+ issetTraceIdHigh = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 4:
+ if fieldTypeId == thrift.I64 {
+ if err := p.ReadField4(ctx, iprot); err != nil {
+ return err
+ }
+ issetSpanId = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ default:
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(ctx); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ if !issetRefType{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field RefType is not set"));
+ }
+ if !issetTraceIdLow{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdLow is not set"));
+ }
+ if !issetTraceIdHigh{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdHigh is not set"));
+ }
+ if !issetSpanId{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SpanId is not set"));
+ }
+ return nil
+}
+
+func (p *SpanRef) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI32(ctx); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+} else {
+ temp := SpanRefType(v)
+ p.RefType = temp
+}
+ return nil
+}
+
+func (p *SpanRef) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(ctx); err != nil {
+ return thrift.PrependError("error reading field 2: ", err)
+} else {
+ p.TraceIdLow = v
+}
+ return nil
+}
+
+func (p *SpanRef) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(ctx); err != nil {
+ return thrift.PrependError("error reading field 3: ", err)
+} else {
+ p.TraceIdHigh = v
+}
+ return nil
+}
+
+func (p *SpanRef) ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(ctx); err != nil {
+ return thrift.PrependError("error reading field 4: ", err)
+} else {
+ p.SpanId = v
+}
+ return nil
+}
+
+func (p *SpanRef) Write(ctx context.Context, oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin(ctx, "SpanRef"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+ if p != nil {
+ if err := p.writeField1(ctx, oprot); err != nil { return err }
+ if err := p.writeField2(ctx, oprot); err != nil { return err }
+ if err := p.writeField3(ctx, oprot); err != nil { return err }
+ if err := p.writeField4(ctx, oprot); err != nil { return err }
+ }
+ if err := oprot.WriteFieldStop(ctx); err != nil {
+ return thrift.PrependError("write field stop error: ", err) }
+ if err := oprot.WriteStructEnd(ctx); err != nil {
+ return thrift.PrependError("write struct stop error: ", err) }
+ return nil
+}
+
+func (p *SpanRef) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "refType", thrift.I32, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:refType: ", p), err) }
+ if err := oprot.WriteI32(ctx, int32(p.RefType)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.refType (1) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:refType: ", p), err) }
+ return err
+}
+
+func (p *SpanRef) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "traceIdLow", thrift.I64, 2); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:traceIdLow: ", p), err) }
+ if err := oprot.WriteI64(ctx, int64(p.TraceIdLow)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.traceIdLow (2) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 2:traceIdLow: ", p), err) }
+ return err
+}
+
+func (p *SpanRef) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "traceIdHigh", thrift.I64, 3); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:traceIdHigh: ", p), err) }
+ if err := oprot.WriteI64(ctx, int64(p.TraceIdHigh)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.traceIdHigh (3) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 3:traceIdHigh: ", p), err) }
+ return err
+}
+
+func (p *SpanRef) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "spanId", thrift.I64, 4); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:spanId: ", p), err) }
+ if err := oprot.WriteI64(ctx, int64(p.SpanId)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.spanId (4) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 4:spanId: ", p), err) }
+ return err
+}
+
+func (p *SpanRef) Equals(other *SpanRef) bool {
+ if p == other {
+ return true
+ } else if p == nil || other == nil {
+ return false
+ }
+ if p.RefType != other.RefType { return false }
+ if p.TraceIdLow != other.TraceIdLow { return false }
+ if p.TraceIdHigh != other.TraceIdHigh { return false }
+ if p.SpanId != other.SpanId { return false }
+ return true
+}
+
+func (p *SpanRef) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("SpanRef(%+v)", *p)
+}
+
+// Attributes:
+// - TraceIdLow
+// - TraceIdHigh
+// - SpanId
+// - ParentSpanId
+// - OperationName
+// - References
+// - Flags
+// - StartTime
+// - Duration
+// - Tags
+// - Logs
+type Span struct {
+ TraceIdLow int64 `thrift:"traceIdLow,1,required" db:"traceIdLow" json:"traceIdLow"`
+ TraceIdHigh int64 `thrift:"traceIdHigh,2,required" db:"traceIdHigh" json:"traceIdHigh"`
+ SpanId int64 `thrift:"spanId,3,required" db:"spanId" json:"spanId"`
+ ParentSpanId int64 `thrift:"parentSpanId,4,required" db:"parentSpanId" json:"parentSpanId"`
+ OperationName string `thrift:"operationName,5,required" db:"operationName" json:"operationName"`
+ References []*SpanRef `thrift:"references,6" db:"references" json:"references,omitempty"`
+ Flags int32 `thrift:"flags,7,required" db:"flags" json:"flags"`
+ StartTime int64 `thrift:"startTime,8,required" db:"startTime" json:"startTime"`
+ Duration int64 `thrift:"duration,9,required" db:"duration" json:"duration"`
+ Tags []*Tag `thrift:"tags,10" db:"tags" json:"tags,omitempty"`
+ Logs []*Log `thrift:"logs,11" db:"logs" json:"logs,omitempty"`
+}
+
+func NewSpan() *Span {
+ return &Span{}
+}
+
+
+func (p *Span) GetTraceIdLow() int64 {
+ return p.TraceIdLow
+}
+
+func (p *Span) GetTraceIdHigh() int64 {
+ return p.TraceIdHigh
+}
+
+func (p *Span) GetSpanId() int64 {
+ return p.SpanId
+}
+
+func (p *Span) GetParentSpanId() int64 {
+ return p.ParentSpanId
+}
+
+func (p *Span) GetOperationName() string {
+ return p.OperationName
+}
+var Span_References_DEFAULT []*SpanRef
+
+func (p *Span) GetReferences() []*SpanRef {
+ return p.References
+}
+
+func (p *Span) GetFlags() int32 {
+ return p.Flags
+}
+
+func (p *Span) GetStartTime() int64 {
+ return p.StartTime
+}
+
+func (p *Span) GetDuration() int64 {
+ return p.Duration
+}
+var Span_Tags_DEFAULT []*Tag
+
+func (p *Span) GetTags() []*Tag {
+ return p.Tags
+}
+var Span_Logs_DEFAULT []*Log
+
+func (p *Span) GetLogs() []*Log {
+ return p.Logs
+}
+func (p *Span) IsSetReferences() bool {
+ return p.References != nil
+}
+
+func (p *Span) IsSetTags() bool {
+ return p.Tags != nil
+}
+
+func (p *Span) IsSetLogs() bool {
+ return p.Logs != nil
+}
+
+func (p *Span) Read(ctx context.Context, iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ var issetTraceIdLow bool = false;
+ var issetTraceIdHigh bool = false;
+ var issetSpanId bool = false;
+ var issetParentSpanId bool = false;
+ var issetOperationName bool = false;
+ var issetFlags bool = false;
+ var issetStartTime bool = false;
+ var issetDuration bool = false;
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP { break; }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.I64 {
+ if err := p.ReadField1(ctx, iprot); err != nil {
+ return err
+ }
+ issetTraceIdLow = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 2:
+ if fieldTypeId == thrift.I64 {
+ if err := p.ReadField2(ctx, iprot); err != nil {
+ return err
+ }
+ issetTraceIdHigh = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 3:
+ if fieldTypeId == thrift.I64 {
+ if err := p.ReadField3(ctx, iprot); err != nil {
+ return err
+ }
+ issetSpanId = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 4:
+ if fieldTypeId == thrift.I64 {
+ if err := p.ReadField4(ctx, iprot); err != nil {
+ return err
+ }
+ issetParentSpanId = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 5:
+ if fieldTypeId == thrift.STRING {
+ if err := p.ReadField5(ctx, iprot); err != nil {
+ return err
+ }
+ issetOperationName = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 6:
+ if fieldTypeId == thrift.LIST {
+ if err := p.ReadField6(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 7:
+ if fieldTypeId == thrift.I32 {
+ if err := p.ReadField7(ctx, iprot); err != nil {
+ return err
+ }
+ issetFlags = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 8:
+ if fieldTypeId == thrift.I64 {
+ if err := p.ReadField8(ctx, iprot); err != nil {
+ return err
+ }
+ issetStartTime = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 9:
+ if fieldTypeId == thrift.I64 {
+ if err := p.ReadField9(ctx, iprot); err != nil {
+ return err
+ }
+ issetDuration = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 10:
+ if fieldTypeId == thrift.LIST {
+ if err := p.ReadField10(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 11:
+ if fieldTypeId == thrift.LIST {
+ if err := p.ReadField11(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ default:
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(ctx); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ if !issetTraceIdLow{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdLow is not set"));
+ }
+ if !issetTraceIdHigh{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdHigh is not set"));
+ }
+ if !issetSpanId{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SpanId is not set"));
+ }
+ if !issetParentSpanId{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ParentSpanId is not set"));
+ }
+ if !issetOperationName{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field OperationName is not set"));
+ }
+ if !issetFlags{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Flags is not set"));
+ }
+ if !issetStartTime{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field StartTime is not set"));
+ }
+ if !issetDuration{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Duration is not set"));
+ }
+ return nil
+}
+
+func (p *Span) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(ctx); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+} else {
+ p.TraceIdLow = v
+}
+ return nil
+}
+
+func (p *Span) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(ctx); err != nil {
+ return thrift.PrependError("error reading field 2: ", err)
+} else {
+ p.TraceIdHigh = v
+}
+ return nil
+}
+
+func (p *Span) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(ctx); err != nil {
+ return thrift.PrependError("error reading field 3: ", err)
+} else {
+ p.SpanId = v
+}
+ return nil
+}
+
+func (p *Span) ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(ctx); err != nil {
+ return thrift.PrependError("error reading field 4: ", err)
+} else {
+ p.ParentSpanId = v
+}
+ return nil
+}
+
+func (p *Span) ReadField5(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadString(ctx); err != nil {
+ return thrift.PrependError("error reading field 5: ", err)
+} else {
+ p.OperationName = v
+}
+ return nil
+}
+
+func (p *Span) ReadField6(ctx context.Context, iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin(ctx)
+ if err != nil {
+ return thrift.PrependError("error reading list begin: ", err)
+ }
+ tSlice := make([]*SpanRef, 0, size)
+ p.References = tSlice
+ for i := 0; i < size; i ++ {
+ _elem2 := &SpanRef{}
+ if err := _elem2.Read(ctx, iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem2), err)
+ }
+ p.References = append(p.References, _elem2)
+ }
+ if err := iprot.ReadListEnd(ctx); err != nil {
+ return thrift.PrependError("error reading list end: ", err)
+ }
+ return nil
+}
+
+func (p *Span) ReadField7(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI32(ctx); err != nil {
+ return thrift.PrependError("error reading field 7: ", err)
+} else {
+ p.Flags = v
+}
+ return nil
+}
+
+func (p *Span) ReadField8(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(ctx); err != nil {
+ return thrift.PrependError("error reading field 8: ", err)
+} else {
+ p.StartTime = v
+}
+ return nil
+}
+
+func (p *Span) ReadField9(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(ctx); err != nil {
+ return thrift.PrependError("error reading field 9: ", err)
+} else {
+ p.Duration = v
+}
+ return nil
+}
+
+func (p *Span) ReadField10(ctx context.Context, iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin(ctx)
+ if err != nil {
+ return thrift.PrependError("error reading list begin: ", err)
+ }
+ tSlice := make([]*Tag, 0, size)
+ p.Tags = tSlice
+ for i := 0; i < size; i ++ {
+ _elem3 := &Tag{}
+ if err := _elem3.Read(ctx, iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem3), err)
+ }
+ p.Tags = append(p.Tags, _elem3)
+ }
+ if err := iprot.ReadListEnd(ctx); err != nil {
+ return thrift.PrependError("error reading list end: ", err)
+ }
+ return nil
+}
+
+func (p *Span) ReadField11(ctx context.Context, iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin(ctx)
+ if err != nil {
+ return thrift.PrependError("error reading list begin: ", err)
+ }
+ tSlice := make([]*Log, 0, size)
+ p.Logs = tSlice
+ for i := 0; i < size; i ++ {
+ _elem4 := &Log{}
+ if err := _elem4.Read(ctx, iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err)
+ }
+ p.Logs = append(p.Logs, _elem4)
+ }
+ if err := iprot.ReadListEnd(ctx); err != nil {
+ return thrift.PrependError("error reading list end: ", err)
+ }
+ return nil
+}
+
+func (p *Span) Write(ctx context.Context, oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin(ctx, "Span"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+ if p != nil {
+ if err := p.writeField1(ctx, oprot); err != nil { return err }
+ if err := p.writeField2(ctx, oprot); err != nil { return err }
+ if err := p.writeField3(ctx, oprot); err != nil { return err }
+ if err := p.writeField4(ctx, oprot); err != nil { return err }
+ if err := p.writeField5(ctx, oprot); err != nil { return err }
+ if err := p.writeField6(ctx, oprot); err != nil { return err }
+ if err := p.writeField7(ctx, oprot); err != nil { return err }
+ if err := p.writeField8(ctx, oprot); err != nil { return err }
+ if err := p.writeField9(ctx, oprot); err != nil { return err }
+ if err := p.writeField10(ctx, oprot); err != nil { return err }
+ if err := p.writeField11(ctx, oprot); err != nil { return err }
+ }
+ if err := oprot.WriteFieldStop(ctx); err != nil {
+ return thrift.PrependError("write field stop error: ", err) }
+ if err := oprot.WriteStructEnd(ctx); err != nil {
+ return thrift.PrependError("write struct stop error: ", err) }
+ return nil
+}
+
+func (p *Span) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "traceIdLow", thrift.I64, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:traceIdLow: ", p), err) }
+ if err := oprot.WriteI64(ctx, int64(p.TraceIdLow)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.traceIdLow (1) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:traceIdLow: ", p), err) }
+ return err
+}
+
+func (p *Span) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "traceIdHigh", thrift.I64, 2); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:traceIdHigh: ", p), err) }
+ if err := oprot.WriteI64(ctx, int64(p.TraceIdHigh)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.traceIdHigh (2) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 2:traceIdHigh: ", p), err) }
+ return err
+}
+
+func (p *Span) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "spanId", thrift.I64, 3); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:spanId: ", p), err) }
+ if err := oprot.WriteI64(ctx, int64(p.SpanId)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.spanId (3) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 3:spanId: ", p), err) }
+ return err
+}
+
+func (p *Span) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "parentSpanId", thrift.I64, 4); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:parentSpanId: ", p), err) }
+ if err := oprot.WriteI64(ctx, int64(p.ParentSpanId)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.parentSpanId (4) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 4:parentSpanId: ", p), err) }
+ return err
+}
+
+func (p *Span) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "operationName", thrift.STRING, 5); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:operationName: ", p), err) }
+ if err := oprot.WriteString(ctx, string(p.OperationName)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.operationName (5) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 5:operationName: ", p), err) }
+ return err
+}
+
+func (p *Span) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if p.IsSetReferences() {
+ if err := oprot.WriteFieldBegin(ctx, "references", thrift.LIST, 6); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:references: ", p), err) }
+ if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.References)); err != nil {
+ return thrift.PrependError("error writing list begin: ", err)
+ }
+ for _, v := range p.References {
+ if err := v.Write(ctx, oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+ }
+ }
+ if err := oprot.WriteListEnd(ctx); err != nil {
+ return thrift.PrependError("error writing list end: ", err)
+ }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 6:references: ", p), err) }
+ }
+ return err
+}
+
+func (p *Span) writeField7(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "flags", thrift.I32, 7); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:flags: ", p), err) }
+ if err := oprot.WriteI32(ctx, int32(p.Flags)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.flags (7) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 7:flags: ", p), err) }
+ return err
+}
+
+func (p *Span) writeField8(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "startTime", thrift.I64, 8); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:startTime: ", p), err) }
+ if err := oprot.WriteI64(ctx, int64(p.StartTime)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.startTime (8) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 8:startTime: ", p), err) }
+ return err
+}
+
+func (p *Span) writeField9(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "duration", thrift.I64, 9); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:duration: ", p), err) }
+ if err := oprot.WriteI64(ctx, int64(p.Duration)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.duration (9) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 9:duration: ", p), err) }
+ return err
+}
+
+func (p *Span) writeField10(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if p.IsSetTags() {
+ if err := oprot.WriteFieldBegin(ctx, "tags", thrift.LIST, 10); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:tags: ", p), err) }
+ if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Tags)); err != nil {
+ return thrift.PrependError("error writing list begin: ", err)
+ }
+ for _, v := range p.Tags {
+ if err := v.Write(ctx, oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+ }
+ }
+ if err := oprot.WriteListEnd(ctx); err != nil {
+ return thrift.PrependError("error writing list end: ", err)
+ }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 10:tags: ", p), err) }
+ }
+ return err
+}
+
+func (p *Span) writeField11(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if p.IsSetLogs() {
+ if err := oprot.WriteFieldBegin(ctx, "logs", thrift.LIST, 11); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:logs: ", p), err) }
+ if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Logs)); err != nil {
+ return thrift.PrependError("error writing list begin: ", err)
+ }
+ for _, v := range p.Logs {
+ if err := v.Write(ctx, oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+ }
+ }
+ if err := oprot.WriteListEnd(ctx); err != nil {
+ return thrift.PrependError("error writing list end: ", err)
+ }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 11:logs: ", p), err) }
+ }
+ return err
+}
+
+func (p *Span) Equals(other *Span) bool {
+ if p == other {
+ return true
+ } else if p == nil || other == nil {
+ return false
+ }
+ if p.TraceIdLow != other.TraceIdLow { return false }
+ if p.TraceIdHigh != other.TraceIdHigh { return false }
+ if p.SpanId != other.SpanId { return false }
+ if p.ParentSpanId != other.ParentSpanId { return false }
+ if p.OperationName != other.OperationName { return false }
+ if len(p.References) != len(other.References) { return false }
+ for i, _tgt := range p.References {
+ _src5 := other.References[i]
+ if !_tgt.Equals(_src5) { return false }
+ }
+ if p.Flags != other.Flags { return false }
+ if p.StartTime != other.StartTime { return false }
+ if p.Duration != other.Duration { return false }
+ if len(p.Tags) != len(other.Tags) { return false }
+ for i, _tgt := range p.Tags {
+ _src6 := other.Tags[i]
+ if !_tgt.Equals(_src6) { return false }
+ }
+ if len(p.Logs) != len(other.Logs) { return false }
+ for i, _tgt := range p.Logs {
+ _src7 := other.Logs[i]
+ if !_tgt.Equals(_src7) { return false }
+ }
+ return true
+}
+
+func (p *Span) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("Span(%+v)", *p)
+}
+
+// Attributes:
+// - ServiceName
+// - Tags
+type Process struct {
+ ServiceName string `thrift:"serviceName,1,required" db:"serviceName" json:"serviceName"`
+ Tags []*Tag `thrift:"tags,2" db:"tags" json:"tags,omitempty"`
+}
+
+func NewProcess() *Process {
+ return &Process{}
+}
+
+
+func (p *Process) GetServiceName() string {
+ return p.ServiceName
+}
+var Process_Tags_DEFAULT []*Tag
+
+func (p *Process) GetTags() []*Tag {
+ return p.Tags
+}
+func (p *Process) IsSetTags() bool {
+ return p.Tags != nil
+}
+
+func (p *Process) Read(ctx context.Context, iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ var issetServiceName bool = false;
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP { break; }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.STRING {
+ if err := p.ReadField1(ctx, iprot); err != nil {
+ return err
+ }
+ issetServiceName = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 2:
+ if fieldTypeId == thrift.LIST {
+ if err := p.ReadField2(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ default:
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(ctx); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ if !issetServiceName{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ServiceName is not set"));
+ }
+ return nil
+}
+
+func (p *Process) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadString(ctx); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+} else {
+ p.ServiceName = v
+}
+ return nil
+}
+
+func (p *Process) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin(ctx)
+ if err != nil {
+ return thrift.PrependError("error reading list begin: ", err)
+ }
+ tSlice := make([]*Tag, 0, size)
+ p.Tags = tSlice
+ for i := 0; i < size; i ++ {
+ _elem8 := &Tag{}
+ if err := _elem8.Read(ctx, iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem8), err)
+ }
+ p.Tags = append(p.Tags, _elem8)
+ }
+ if err := iprot.ReadListEnd(ctx); err != nil {
+ return thrift.PrependError("error reading list end: ", err)
+ }
+ return nil
+}
+
+func (p *Process) Write(ctx context.Context, oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin(ctx, "Process"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+ if p != nil {
+ if err := p.writeField1(ctx, oprot); err != nil { return err }
+ if err := p.writeField2(ctx, oprot); err != nil { return err }
+ }
+ if err := oprot.WriteFieldStop(ctx); err != nil {
+ return thrift.PrependError("write field stop error: ", err) }
+ if err := oprot.WriteStructEnd(ctx); err != nil {
+ return thrift.PrependError("write struct stop error: ", err) }
+ return nil
+}
+
+func (p *Process) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "serviceName", thrift.STRING, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err) }
+ if err := oprot.WriteString(ctx, string(p.ServiceName)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err) }
+ return err
+}
+
+func (p *Process) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if p.IsSetTags() {
+ if err := oprot.WriteFieldBegin(ctx, "tags", thrift.LIST, 2); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:tags: ", p), err) }
+ if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Tags)); err != nil {
+ return thrift.PrependError("error writing list begin: ", err)
+ }
+ for _, v := range p.Tags {
+ if err := v.Write(ctx, oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+ }
+ }
+ if err := oprot.WriteListEnd(ctx); err != nil {
+ return thrift.PrependError("error writing list end: ", err)
+ }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 2:tags: ", p), err) }
+ }
+ return err
+}
+
+func (p *Process) Equals(other *Process) bool {
+ if p == other {
+ return true
+ } else if p == nil || other == nil {
+ return false
+ }
+ if p.ServiceName != other.ServiceName { return false }
+ if len(p.Tags) != len(other.Tags) { return false }
+ for i, _tgt := range p.Tags {
+ _src9 := other.Tags[i]
+ if !_tgt.Equals(_src9) { return false }
+ }
+ return true
+}
+
+func (p *Process) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("Process(%+v)", *p)
+}
+
+// Attributes:
+// - FullQueueDroppedSpans
+// - TooLargeDroppedSpans
+// - FailedToEmitSpans
+type ClientStats struct {
+ FullQueueDroppedSpans int64 `thrift:"fullQueueDroppedSpans,1,required" db:"fullQueueDroppedSpans" json:"fullQueueDroppedSpans"`
+ TooLargeDroppedSpans int64 `thrift:"tooLargeDroppedSpans,2,required" db:"tooLargeDroppedSpans" json:"tooLargeDroppedSpans"`
+ FailedToEmitSpans int64 `thrift:"failedToEmitSpans,3,required" db:"failedToEmitSpans" json:"failedToEmitSpans"`
+}
+
+func NewClientStats() *ClientStats {
+ return &ClientStats{}
+}
+
+
+func (p *ClientStats) GetFullQueueDroppedSpans() int64 {
+ return p.FullQueueDroppedSpans
+}
+
+func (p *ClientStats) GetTooLargeDroppedSpans() int64 {
+ return p.TooLargeDroppedSpans
+}
+
+func (p *ClientStats) GetFailedToEmitSpans() int64 {
+ return p.FailedToEmitSpans
+}
+func (p *ClientStats) Read(ctx context.Context, iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ var issetFullQueueDroppedSpans bool = false;
+ var issetTooLargeDroppedSpans bool = false;
+ var issetFailedToEmitSpans bool = false;
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP { break; }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.I64 {
+ if err := p.ReadField1(ctx, iprot); err != nil {
+ return err
+ }
+ issetFullQueueDroppedSpans = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 2:
+ if fieldTypeId == thrift.I64 {
+ if err := p.ReadField2(ctx, iprot); err != nil {
+ return err
+ }
+ issetTooLargeDroppedSpans = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 3:
+ if fieldTypeId == thrift.I64 {
+ if err := p.ReadField3(ctx, iprot); err != nil {
+ return err
+ }
+ issetFailedToEmitSpans = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ default:
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(ctx); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ if !issetFullQueueDroppedSpans{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field FullQueueDroppedSpans is not set"));
+ }
+ if !issetTooLargeDroppedSpans{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TooLargeDroppedSpans is not set"));
+ }
+ if !issetFailedToEmitSpans{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field FailedToEmitSpans is not set"));
+ }
+ return nil
+}
+
+func (p *ClientStats) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(ctx); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+} else {
+ p.FullQueueDroppedSpans = v
+}
+ return nil
+}
+
+func (p *ClientStats) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(ctx); err != nil {
+ return thrift.PrependError("error reading field 2: ", err)
+} else {
+ p.TooLargeDroppedSpans = v
+}
+ return nil
+}
+
+func (p *ClientStats) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(ctx); err != nil {
+ return thrift.PrependError("error reading field 3: ", err)
+} else {
+ p.FailedToEmitSpans = v
+}
+ return nil
+}
+
+func (p *ClientStats) Write(ctx context.Context, oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin(ctx, "ClientStats"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+ if p != nil {
+ if err := p.writeField1(ctx, oprot); err != nil { return err }
+ if err := p.writeField2(ctx, oprot); err != nil { return err }
+ if err := p.writeField3(ctx, oprot); err != nil { return err }
+ }
+ if err := oprot.WriteFieldStop(ctx); err != nil {
+ return thrift.PrependError("write field stop error: ", err) }
+ if err := oprot.WriteStructEnd(ctx); err != nil {
+ return thrift.PrependError("write struct stop error: ", err) }
+ return nil
+}
+
+func (p *ClientStats) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "fullQueueDroppedSpans", thrift.I64, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:fullQueueDroppedSpans: ", p), err) }
+ if err := oprot.WriteI64(ctx, int64(p.FullQueueDroppedSpans)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.fullQueueDroppedSpans (1) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:fullQueueDroppedSpans: ", p), err) }
+ return err
+}
+
+func (p *ClientStats) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "tooLargeDroppedSpans", thrift.I64, 2); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:tooLargeDroppedSpans: ", p), err) }
+ if err := oprot.WriteI64(ctx, int64(p.TooLargeDroppedSpans)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.tooLargeDroppedSpans (2) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 2:tooLargeDroppedSpans: ", p), err) }
+ return err
+}
+
+func (p *ClientStats) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "failedToEmitSpans", thrift.I64, 3); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:failedToEmitSpans: ", p), err) }
+ if err := oprot.WriteI64(ctx, int64(p.FailedToEmitSpans)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.failedToEmitSpans (3) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 3:failedToEmitSpans: ", p), err) }
+ return err
+}
+
+func (p *ClientStats) Equals(other *ClientStats) bool {
+ if p == other {
+ return true
+ } else if p == nil || other == nil {
+ return false
+ }
+ if p.FullQueueDroppedSpans != other.FullQueueDroppedSpans { return false }
+ if p.TooLargeDroppedSpans != other.TooLargeDroppedSpans { return false }
+ if p.FailedToEmitSpans != other.FailedToEmitSpans { return false }
+ return true
+}
+
+func (p *ClientStats) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("ClientStats(%+v)", *p)
+}
+
+// Attributes:
+// - Process
+// - Spans
+// - SeqNo
+// - Stats
+type Batch struct {
+ Process *Process `thrift:"process,1,required" db:"process" json:"process"`
+ Spans []*Span `thrift:"spans,2,required" db:"spans" json:"spans"`
+ SeqNo *int64 `thrift:"seqNo,3" db:"seqNo" json:"seqNo,omitempty"`
+ Stats *ClientStats `thrift:"stats,4" db:"stats" json:"stats,omitempty"`
+}
+
+func NewBatch() *Batch {
+ return &Batch{}
+}
+
+var Batch_Process_DEFAULT *Process
+func (p *Batch) GetProcess() *Process {
+ if !p.IsSetProcess() {
+ return Batch_Process_DEFAULT
+ }
+return p.Process
+}
+
+func (p *Batch) GetSpans() []*Span {
+ return p.Spans
+}
+var Batch_SeqNo_DEFAULT int64
+func (p *Batch) GetSeqNo() int64 {
+ if !p.IsSetSeqNo() {
+ return Batch_SeqNo_DEFAULT
+ }
+return *p.SeqNo
+}
+var Batch_Stats_DEFAULT *ClientStats
+func (p *Batch) GetStats() *ClientStats {
+ if !p.IsSetStats() {
+ return Batch_Stats_DEFAULT
+ }
+return p.Stats
+}
+func (p *Batch) IsSetProcess() bool {
+ return p.Process != nil
+}
+
+func (p *Batch) IsSetSeqNo() bool {
+ return p.SeqNo != nil
+}
+
+func (p *Batch) IsSetStats() bool {
+ return p.Stats != nil
+}
+
+func (p *Batch) Read(ctx context.Context, iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ var issetProcess bool = false;
+ var issetSpans bool = false;
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP { break; }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.STRUCT {
+ if err := p.ReadField1(ctx, iprot); err != nil {
+ return err
+ }
+ issetProcess = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 2:
+ if fieldTypeId == thrift.LIST {
+ if err := p.ReadField2(ctx, iprot); err != nil {
+ return err
+ }
+ issetSpans = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 3:
+ if fieldTypeId == thrift.I64 {
+ if err := p.ReadField3(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 4:
+ if fieldTypeId == thrift.STRUCT {
+ if err := p.ReadField4(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ default:
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(ctx); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ if !issetProcess{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Process is not set"));
+ }
+ if !issetSpans{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Spans is not set"));
+ }
+ return nil
+}
+
+func (p *Batch) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+ p.Process = &Process{}
+ if err := p.Process.Read(ctx, iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Process), err)
+ }
+ return nil
+}
+
+func (p *Batch) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin(ctx)
+ if err != nil {
+ return thrift.PrependError("error reading list begin: ", err)
+ }
+ tSlice := make([]*Span, 0, size)
+ p.Spans = tSlice
+ for i := 0; i < size; i ++ {
+ _elem10 := &Span{}
+ if err := _elem10.Read(ctx, iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem10), err)
+ }
+ p.Spans = append(p.Spans, _elem10)
+ }
+ if err := iprot.ReadListEnd(ctx); err != nil {
+ return thrift.PrependError("error reading list end: ", err)
+ }
+ return nil
+}
+
+func (p *Batch) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(ctx); err != nil {
+ return thrift.PrependError("error reading field 3: ", err)
+} else {
+ p.SeqNo = &v
+}
+ return nil
+}
+
+func (p *Batch) ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
+ p.Stats = &ClientStats{}
+ if err := p.Stats.Read(ctx, iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Stats), err)
+ }
+ return nil
+}
+
+func (p *Batch) Write(ctx context.Context, oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin(ctx, "Batch"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+ if p != nil {
+ if err := p.writeField1(ctx, oprot); err != nil { return err }
+ if err := p.writeField2(ctx, oprot); err != nil { return err }
+ if err := p.writeField3(ctx, oprot); err != nil { return err }
+ if err := p.writeField4(ctx, oprot); err != nil { return err }
+ }
+ if err := oprot.WriteFieldStop(ctx); err != nil {
+ return thrift.PrependError("write field stop error: ", err) }
+ if err := oprot.WriteStructEnd(ctx); err != nil {
+ return thrift.PrependError("write struct stop error: ", err) }
+ return nil
+}
+
+func (p *Batch) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "process", thrift.STRUCT, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:process: ", p), err) }
+ if err := p.Process.Write(ctx, oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Process), err)
+ }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:process: ", p), err) }
+ return err
+}
+
+func (p *Batch) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "spans", thrift.LIST, 2); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:spans: ", p), err) }
+ if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Spans)); err != nil {
+ return thrift.PrependError("error writing list begin: ", err)
+ }
+ for _, v := range p.Spans {
+ if err := v.Write(ctx, oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+ }
+ }
+ if err := oprot.WriteListEnd(ctx); err != nil {
+ return thrift.PrependError("error writing list end: ", err)
+ }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 2:spans: ", p), err) }
+ return err
+}
+
+func (p *Batch) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if p.IsSetSeqNo() {
+ if err := oprot.WriteFieldBegin(ctx, "seqNo", thrift.I64, 3); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:seqNo: ", p), err) }
+ if err := oprot.WriteI64(ctx, int64(*p.SeqNo)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.seqNo (3) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 3:seqNo: ", p), err) }
+ }
+ return err
+}
+
+func (p *Batch) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if p.IsSetStats() {
+ if err := oprot.WriteFieldBegin(ctx, "stats", thrift.STRUCT, 4); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:stats: ", p), err) }
+ if err := p.Stats.Write(ctx, oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Stats), err)
+ }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 4:stats: ", p), err) }
+ }
+ return err
+}
+
+func (p *Batch) Equals(other *Batch) bool {
+ if p == other {
+ return true
+ } else if p == nil || other == nil {
+ return false
+ }
+ if !p.Process.Equals(other.Process) { return false }
+ if len(p.Spans) != len(other.Spans) { return false }
+ for i, _tgt := range p.Spans {
+ _src11 := other.Spans[i]
+ if !_tgt.Equals(_src11) { return false }
+ }
+ if p.SeqNo != other.SeqNo {
+ if p.SeqNo == nil || other.SeqNo == nil {
+ return false
+ }
+ if (*p.SeqNo) != (*other.SeqNo) { return false }
+ }
+ if !p.Stats.Equals(other.Stats) { return false }
+ return true
+}
+
+func (p *Batch) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("Batch(%+v)", *p)
+}
+
+// Attributes:
+// - Ok
+type BatchSubmitResponse struct {
+ Ok bool `thrift:"ok,1,required" db:"ok" json:"ok"`
+}
+
+func NewBatchSubmitResponse() *BatchSubmitResponse {
+ return &BatchSubmitResponse{}
+}
+
+
+func (p *BatchSubmitResponse) GetOk() bool {
+ return p.Ok
+}
+func (p *BatchSubmitResponse) Read(ctx context.Context, iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ var issetOk bool = false;
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP { break; }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.BOOL {
+ if err := p.ReadField1(ctx, iprot); err != nil {
+ return err
+ }
+ issetOk = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ default:
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(ctx); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ if !issetOk{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Ok is not set"));
+ }
+ return nil
+}
+
+func (p *BatchSubmitResponse) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadBool(ctx); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+} else {
+ p.Ok = v
+}
+ return nil
+}
+
+func (p *BatchSubmitResponse) Write(ctx context.Context, oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin(ctx, "BatchSubmitResponse"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+ if p != nil {
+ if err := p.writeField1(ctx, oprot); err != nil { return err }
+ }
+ if err := oprot.WriteFieldStop(ctx); err != nil {
+ return thrift.PrependError("write field stop error: ", err) }
+ if err := oprot.WriteStructEnd(ctx); err != nil {
+ return thrift.PrependError("write struct stop error: ", err) }
+ return nil
+}
+
+func (p *BatchSubmitResponse) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "ok", thrift.BOOL, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ok: ", p), err) }
+ if err := oprot.WriteBool(ctx, bool(p.Ok)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.ok (1) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ok: ", p), err) }
+ return err
+}
+
+func (p *BatchSubmitResponse) Equals(other *BatchSubmitResponse) bool {
+ if p == other {
+ return true
+ } else if p == nil || other == nil {
+ return false
+ }
+ if p.Ok != other.Ok { return false }
+ return true
+}
+
+func (p *BatchSubmitResponse) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("BatchSubmitResponse(%+v)", *p)
+}
+
+type Collector interface {
+ // Parameters:
+ // - Batches
+ SubmitBatches(ctx context.Context, batches []*Batch) (_r []*BatchSubmitResponse, _err error)
+}
+
+type CollectorClient struct {
+ c thrift.TClient
+ meta thrift.ResponseMeta
+}
+
+func NewCollectorClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *CollectorClient {
+ return &CollectorClient{
+ c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)),
+ }
+}
+
+func NewCollectorClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *CollectorClient {
+ return &CollectorClient{
+ c: thrift.NewTStandardClient(iprot, oprot),
+ }
+}
+
+func NewCollectorClient(c thrift.TClient) *CollectorClient {
+ return &CollectorClient{
+ c: c,
+ }
+}
+
+func (p *CollectorClient) Client_() thrift.TClient {
+ return p.c
+}
+
+func (p *CollectorClient) LastResponseMeta_() thrift.ResponseMeta {
+ return p.meta
+}
+
+func (p *CollectorClient) SetLastResponseMeta_(meta thrift.ResponseMeta) {
+ p.meta = meta
+}
+
+// Parameters:
+// - Batches
+func (p *CollectorClient) SubmitBatches(ctx context.Context, batches []*Batch) (_r []*BatchSubmitResponse, _err error) {
+ var _args12 CollectorSubmitBatchesArgs
+ _args12.Batches = batches
+ var _result14 CollectorSubmitBatchesResult
+ var _meta13 thrift.ResponseMeta
+ _meta13, _err = p.Client_().Call(ctx, "submitBatches", &_args12, &_result14)
+ p.SetLastResponseMeta_(_meta13)
+ if _err != nil {
+ return
+ }
+ return _result14.GetSuccess(), nil
+}
+
+type CollectorProcessor struct {
+ processorMap map[string]thrift.TProcessorFunction
+ handler Collector
+}
+
+func (p *CollectorProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
+ p.processorMap[key] = processor
+}
+
+func (p *CollectorProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
+ processor, ok = p.processorMap[key]
+ return processor, ok
+}
+
+func (p *CollectorProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
+ return p.processorMap
+}
+
+func NewCollectorProcessor(handler Collector) *CollectorProcessor {
+
+ self15 := &CollectorProcessor{handler:handler, processorMap:make(map[string]thrift.TProcessorFunction)}
+ self15.processorMap["submitBatches"] = &collectorProcessorSubmitBatches{handler:handler}
+return self15
+}
+
+func (p *CollectorProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ name, _, seqId, err2 := iprot.ReadMessageBegin(ctx)
+ if err2 != nil { return false, thrift.WrapTException(err2) }
+ if processor, ok := p.GetProcessorFunction(name); ok {
+ return processor.Process(ctx, seqId, iprot, oprot)
+ }
+ iprot.Skip(ctx, thrift.STRUCT)
+ iprot.ReadMessageEnd(ctx)
+ x16 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function " + name)
+ oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId)
+ x16.Write(ctx, oprot)
+ oprot.WriteMessageEnd(ctx)
+ oprot.Flush(ctx)
+ return false, x16
+
+}
+
+type collectorProcessorSubmitBatches struct {
+ handler Collector
+}
+
+func (p *collectorProcessorSubmitBatches) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := CollectorSubmitBatchesArgs{}
+ var err2 error
+ if err2 = args.Read(ctx, iprot); err2 != nil {
+ iprot.ReadMessageEnd(ctx)
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error())
+ oprot.WriteMessageBegin(ctx, "submitBatches", thrift.EXCEPTION, seqId)
+ x.Write(ctx, oprot)
+ oprot.WriteMessageEnd(ctx)
+ oprot.Flush(ctx)
+ return false, thrift.WrapTException(err2)
+ }
+ iprot.ReadMessageEnd(ctx)
+
+ tickerCancel := func() {}
+ // Start a goroutine to do server side connectivity check.
+ if thrift.ServerConnectivityCheckInterval > 0 {
+ var cancel context.CancelFunc
+ ctx, cancel = context.WithCancel(ctx)
+ defer cancel()
+ var tickerCtx context.Context
+ tickerCtx, tickerCancel = context.WithCancel(context.Background())
+ defer tickerCancel()
+ go func(ctx context.Context, cancel context.CancelFunc) {
+ ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval)
+ defer ticker.Stop()
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-ticker.C:
+ if !iprot.Transport().IsOpen() {
+ cancel()
+ return
+ }
+ }
+ }
+ }(tickerCtx, cancel)
+ }
+
+ result := CollectorSubmitBatchesResult{}
+ var retval []*BatchSubmitResponse
+ if retval, err2 = p.handler.SubmitBatches(ctx, args.Batches); err2 != nil {
+ tickerCancel()
+ if err2 == thrift.ErrAbandonRequest {
+ return false, thrift.WrapTException(err2)
+ }
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing submitBatches: " + err2.Error())
+ oprot.WriteMessageBegin(ctx, "submitBatches", thrift.EXCEPTION, seqId)
+ x.Write(ctx, oprot)
+ oprot.WriteMessageEnd(ctx)
+ oprot.Flush(ctx)
+ return true, thrift.WrapTException(err2)
+ } else {
+ result.Success = retval
+ }
+ tickerCancel()
+ if err2 = oprot.WriteMessageBegin(ctx, "submitBatches", thrift.REPLY, seqId); err2 != nil {
+ err = thrift.WrapTException(err2)
+ }
+ if err2 = result.Write(ctx, oprot); err == nil && err2 != nil {
+ err = thrift.WrapTException(err2)
+ }
+ if err2 = oprot.WriteMessageEnd(ctx); err == nil && err2 != nil {
+ err = thrift.WrapTException(err2)
+ }
+ if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
+ err = thrift.WrapTException(err2)
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+
+// HELPER FUNCTIONS AND STRUCTURES
+
+// Attributes:
+// - Batches
+type CollectorSubmitBatchesArgs struct {
+ Batches []*Batch `thrift:"batches,1" db:"batches" json:"batches"`
+}
+
+func NewCollectorSubmitBatchesArgs() *CollectorSubmitBatchesArgs {
+ return &CollectorSubmitBatchesArgs{}
+}
+
+
+func (p *CollectorSubmitBatchesArgs) GetBatches() []*Batch {
+ return p.Batches
+}
+func (p *CollectorSubmitBatchesArgs) Read(ctx context.Context, iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP { break; }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.LIST {
+ if err := p.ReadField1(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ default:
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(ctx); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ return nil
+}
+
+func (p *CollectorSubmitBatchesArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin(ctx)
+ if err != nil {
+ return thrift.PrependError("error reading list begin: ", err)
+ }
+ tSlice := make([]*Batch, 0, size)
+ p.Batches = tSlice
+ for i := 0; i < size; i ++ {
+ _elem17 := &Batch{}
+ if err := _elem17.Read(ctx, iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem17), err)
+ }
+ p.Batches = append(p.Batches, _elem17)
+ }
+ if err := iprot.ReadListEnd(ctx); err != nil {
+ return thrift.PrependError("error reading list end: ", err)
+ }
+ return nil
+}
+
+func (p *CollectorSubmitBatchesArgs) Write(ctx context.Context, oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin(ctx, "submitBatches_args"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+ if p != nil {
+ if err := p.writeField1(ctx, oprot); err != nil { return err }
+ }
+ if err := oprot.WriteFieldStop(ctx); err != nil {
+ return thrift.PrependError("write field stop error: ", err) }
+ if err := oprot.WriteStructEnd(ctx); err != nil {
+ return thrift.PrependError("write struct stop error: ", err) }
+ return nil
+}
+
+func (p *CollectorSubmitBatchesArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "batches", thrift.LIST, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batches: ", p), err) }
+ if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Batches)); err != nil {
+ return thrift.PrependError("error writing list begin: ", err)
+ }
+ for _, v := range p.Batches {
+ if err := v.Write(ctx, oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+ }
+ }
+ if err := oprot.WriteListEnd(ctx); err != nil {
+ return thrift.PrependError("error writing list end: ", err)
+ }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batches: ", p), err) }
+ return err
+}
+
+func (p *CollectorSubmitBatchesArgs) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("CollectorSubmitBatchesArgs(%+v)", *p)
+}
+
+// Attributes:
+// - Success
+type CollectorSubmitBatchesResult struct {
+ Success []*BatchSubmitResponse `thrift:"success,0" db:"success" json:"success,omitempty"`
+}
+
+func NewCollectorSubmitBatchesResult() *CollectorSubmitBatchesResult {
+ return &CollectorSubmitBatchesResult{}
+}
+
+var CollectorSubmitBatchesResult_Success_DEFAULT []*BatchSubmitResponse
+
+func (p *CollectorSubmitBatchesResult) GetSuccess() []*BatchSubmitResponse {
+ return p.Success
+}
+func (p *CollectorSubmitBatchesResult) IsSetSuccess() bool {
+ return p.Success != nil
+}
+
+func (p *CollectorSubmitBatchesResult) Read(ctx context.Context, iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP { break; }
+ switch fieldId {
+ case 0:
+ if fieldTypeId == thrift.LIST {
+ if err := p.ReadField0(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ default:
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(ctx); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ return nil
+}
+
+func (p *CollectorSubmitBatchesResult) ReadField0(ctx context.Context, iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin(ctx)
+ if err != nil {
+ return thrift.PrependError("error reading list begin: ", err)
+ }
+ tSlice := make([]*BatchSubmitResponse, 0, size)
+ p.Success = tSlice
+ for i := 0; i < size; i ++ {
+ _elem18 := &BatchSubmitResponse{}
+ if err := _elem18.Read(ctx, iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem18), err)
+ }
+ p.Success = append(p.Success, _elem18)
+ }
+ if err := iprot.ReadListEnd(ctx); err != nil {
+ return thrift.PrependError("error reading list end: ", err)
+ }
+ return nil
+}
+
+func (p *CollectorSubmitBatchesResult) Write(ctx context.Context, oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin(ctx, "submitBatches_result"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+ if p != nil {
+ if err := p.writeField0(ctx, oprot); err != nil { return err }
+ }
+ if err := oprot.WriteFieldStop(ctx); err != nil {
+ return thrift.PrependError("write field stop error: ", err) }
+ if err := oprot.WriteStructEnd(ctx); err != nil {
+ return thrift.PrependError("write struct stop error: ", err) }
+ return nil
+}
+
+func (p *CollectorSubmitBatchesResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if p.IsSetSuccess() {
+ if err := oprot.WriteFieldBegin(ctx, "success", thrift.LIST, 0); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) }
+ if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Success)); err != nil {
+ return thrift.PrependError("error writing list begin: ", err)
+ }
+ for _, v := range p.Success {
+ if err := v.Write(ctx, oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+ }
+ }
+ if err := oprot.WriteListEnd(ctx); err != nil {
+ return thrift.PrependError("error writing list end: ", err)
+ }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) }
+ }
+ return err
+}
+
+func (p *CollectorSubmitBatchesResult) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("CollectorSubmitBatchesResult(%+v)", *p)
+}
+
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/ttypes.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/ttypes.go
deleted file mode 100644
index e69c6d603..000000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/ttypes.go
+++ /dev/null
@@ -1,2106 +0,0 @@
-// Autogenerated by Thrift Compiler (0.9.3)
-// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-
-package jaeger
-
-import (
- "bytes"
- "fmt"
- "github.com/uber/jaeger-client-go/thrift"
-)
-
-// (needed to ensure safety because of naive import list construction.)
-var _ = thrift.ZERO
-var _ = fmt.Printf
-var _ = bytes.Equal
-
-var GoUnusedProtection__ int
-
-type TagType int64
-
-const (
- TagType_STRING TagType = 0
- TagType_DOUBLE TagType = 1
- TagType_BOOL TagType = 2
- TagType_LONG TagType = 3
- TagType_BINARY TagType = 4
-)
-
-func (p TagType) String() string {
- switch p {
- case TagType_STRING:
- return "STRING"
- case TagType_DOUBLE:
- return "DOUBLE"
- case TagType_BOOL:
- return "BOOL"
- case TagType_LONG:
- return "LONG"
- case TagType_BINARY:
- return "BINARY"
- }
- return "<UNSET>"
-}
-
-func TagTypeFromString(s string) (TagType, error) {
- switch s {
- case "STRING":
- return TagType_STRING, nil
- case "DOUBLE":
- return TagType_DOUBLE, nil
- case "BOOL":
- return TagType_BOOL, nil
- case "LONG":
- return TagType_LONG, nil
- case "BINARY":
- return TagType_BINARY, nil
- }
- return TagType(0), fmt.Errorf("not a valid TagType string")
-}
-
-func TagTypePtr(v TagType) *TagType { return &v }
-
-func (p TagType) MarshalText() ([]byte, error) {
- return []byte(p.String()), nil
-}
-
-func (p *TagType) UnmarshalText(text []byte) error {
- q, err := TagTypeFromString(string(text))
- if err != nil {
- return err
- }
- *p = q
- return nil
-}
-
-type SpanRefType int64
-
-const (
- SpanRefType_CHILD_OF SpanRefType = 0
- SpanRefType_FOLLOWS_FROM SpanRefType = 1
-)
-
-func (p SpanRefType) String() string {
- switch p {
- case SpanRefType_CHILD_OF:
- return "CHILD_OF"
- case SpanRefType_FOLLOWS_FROM:
- return "FOLLOWS_FROM"
- }
- return "<UNSET>"
-}
-
-func SpanRefTypeFromString(s string) (SpanRefType, error) {
- switch s {
- case "CHILD_OF":
- return SpanRefType_CHILD_OF, nil
- case "FOLLOWS_FROM":
- return SpanRefType_FOLLOWS_FROM, nil
- }
- return SpanRefType(0), fmt.Errorf("not a valid SpanRefType string")
-}
-
-func SpanRefTypePtr(v SpanRefType) *SpanRefType { return &v }
-
-func (p SpanRefType) MarshalText() ([]byte, error) {
- return []byte(p.String()), nil
-}
-
-func (p *SpanRefType) UnmarshalText(text []byte) error {
- q, err := SpanRefTypeFromString(string(text))
- if err != nil {
- return err
- }
- *p = q
- return nil
-}
-
-// Attributes:
-// - Key
-// - VType
-// - VStr
-// - VDouble
-// - VBool
-// - VLong
-// - VBinary
-type Tag struct {
- Key string `thrift:"key,1,required" json:"key"`
- VType TagType `thrift:"vType,2,required" json:"vType"`
- VStr *string `thrift:"vStr,3" json:"vStr,omitempty"`
- VDouble *float64 `thrift:"vDouble,4" json:"vDouble,omitempty"`
- VBool *bool `thrift:"vBool,5" json:"vBool,omitempty"`
- VLong *int64 `thrift:"vLong,6" json:"vLong,omitempty"`
- VBinary []byte `thrift:"vBinary,7" json:"vBinary,omitempty"`
-}
-
-func NewTag() *Tag {
- return &Tag{}
-}
-
-func (p *Tag) GetKey() string {
- return p.Key
-}
-
-func (p *Tag) GetVType() TagType {
- return p.VType
-}
-
-var Tag_VStr_DEFAULT string
-
-func (p *Tag) GetVStr() string {
- if !p.IsSetVStr() {
- return Tag_VStr_DEFAULT
- }
- return *p.VStr
-}
-
-var Tag_VDouble_DEFAULT float64
-
-func (p *Tag) GetVDouble() float64 {
- if !p.IsSetVDouble() {
- return Tag_VDouble_DEFAULT
- }
- return *p.VDouble
-}
-
-var Tag_VBool_DEFAULT bool
-
-func (p *Tag) GetVBool() bool {
- if !p.IsSetVBool() {
- return Tag_VBool_DEFAULT
- }
- return *p.VBool
-}
-
-var Tag_VLong_DEFAULT int64
-
-func (p *Tag) GetVLong() int64 {
- if !p.IsSetVLong() {
- return Tag_VLong_DEFAULT
- }
- return *p.VLong
-}
-
-var Tag_VBinary_DEFAULT []byte
-
-func (p *Tag) GetVBinary() []byte {
- return p.VBinary
-}
-func (p *Tag) IsSetVStr() bool {
- return p.VStr != nil
-}
-
-func (p *Tag) IsSetVDouble() bool {
- return p.VDouble != nil
-}
-
-func (p *Tag) IsSetVBool() bool {
- return p.VBool != nil
-}
-
-func (p *Tag) IsSetVLong() bool {
- return p.VLong != nil
-}
-
-func (p *Tag) IsSetVBinary() bool {
- return p.VBinary != nil
-}
-
-func (p *Tag) Read(iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- var issetKey bool = false
- var issetVType bool = false
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP {
- break
- }
- switch fieldId {
- case 1:
- if err := p.readField1(iprot); err != nil {
- return err
- }
- issetKey = true
- case 2:
- if err := p.readField2(iprot); err != nil {
- return err
- }
- issetVType = true
- case 3:
- if err := p.readField3(iprot); err != nil {
- return err
- }
- case 4:
- if err := p.readField4(iprot); err != nil {
- return err
- }
- case 5:
- if err := p.readField5(iprot); err != nil {
- return err
- }
- case 6:
- if err := p.readField6(iprot); err != nil {
- return err
- }
- case 7:
- if err := p.readField7(iprot); err != nil {
- return err
- }
- default:
- if err := iprot.Skip(fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- if !issetKey {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Key is not set"))
- }
- if !issetVType {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field VType is not set"))
- }
- return nil
-}
-
-func (p *Tag) readField1(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadString(); err != nil {
- return thrift.PrependError("error reading field 1: ", err)
- } else {
- p.Key = v
- }
- return nil
-}
-
-func (p *Tag) readField2(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI32(); err != nil {
- return thrift.PrependError("error reading field 2: ", err)
- } else {
- temp := TagType(v)
- p.VType = temp
- }
- return nil
-}
-
-func (p *Tag) readField3(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadString(); err != nil {
- return thrift.PrependError("error reading field 3: ", err)
- } else {
- p.VStr = &v
- }
- return nil
-}
-
-func (p *Tag) readField4(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadDouble(); err != nil {
- return thrift.PrependError("error reading field 4: ", err)
- } else {
- p.VDouble = &v
- }
- return nil
-}
-
-func (p *Tag) readField5(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadBool(); err != nil {
- return thrift.PrependError("error reading field 5: ", err)
- } else {
- p.VBool = &v
- }
- return nil
-}
-
-func (p *Tag) readField6(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(); err != nil {
- return thrift.PrependError("error reading field 6: ", err)
- } else {
- p.VLong = &v
- }
- return nil
-}
-
-func (p *Tag) readField7(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadBinary(); err != nil {
- return thrift.PrependError("error reading field 7: ", err)
- } else {
- p.VBinary = v
- }
- return nil
-}
-
-func (p *Tag) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("Tag"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
- }
- if err := p.writeField1(oprot); err != nil {
- return err
- }
- if err := p.writeField2(oprot); err != nil {
- return err
- }
- if err := p.writeField3(oprot); err != nil {
- return err
- }
- if err := p.writeField4(oprot); err != nil {
- return err
- }
- if err := p.writeField5(oprot); err != nil {
- return err
- }
- if err := p.writeField6(oprot); err != nil {
- return err
- }
- if err := p.writeField7(oprot); err != nil {
- return err
- }
- if err := oprot.WriteFieldStop(); err != nil {
- return thrift.PrependError("write field stop error: ", err)
- }
- if err := oprot.WriteStructEnd(); err != nil {
- return thrift.PrependError("write struct stop error: ", err)
- }
- return nil
-}
-
-func (p *Tag) writeField1(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("key", thrift.STRING, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err)
- }
- if err := oprot.WriteString(string(p.Key)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err)
- }
- return err
-}
-
-func (p *Tag) writeField2(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("vType", thrift.I32, 2); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:vType: ", p), err)
- }
- if err := oprot.WriteI32(int32(p.VType)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.vType (2) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 2:vType: ", p), err)
- }
- return err
-}
-
-func (p *Tag) writeField3(oprot thrift.TProtocol) (err error) {
- if p.IsSetVStr() {
- if err := oprot.WriteFieldBegin("vStr", thrift.STRING, 3); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:vStr: ", p), err)
- }
- if err := oprot.WriteString(string(*p.VStr)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.vStr (3) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 3:vStr: ", p), err)
- }
- }
- return err
-}
-
-func (p *Tag) writeField4(oprot thrift.TProtocol) (err error) {
- if p.IsSetVDouble() {
- if err := oprot.WriteFieldBegin("vDouble", thrift.DOUBLE, 4); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:vDouble: ", p), err)
- }
- if err := oprot.WriteDouble(float64(*p.VDouble)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.vDouble (4) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 4:vDouble: ", p), err)
- }
- }
- return err
-}
-
-func (p *Tag) writeField5(oprot thrift.TProtocol) (err error) {
- if p.IsSetVBool() {
- if err := oprot.WriteFieldBegin("vBool", thrift.BOOL, 5); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:vBool: ", p), err)
- }
- if err := oprot.WriteBool(bool(*p.VBool)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.vBool (5) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 5:vBool: ", p), err)
- }
- }
- return err
-}
-
-func (p *Tag) writeField6(oprot thrift.TProtocol) (err error) {
- if p.IsSetVLong() {
- if err := oprot.WriteFieldBegin("vLong", thrift.I64, 6); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:vLong: ", p), err)
- }
- if err := oprot.WriteI64(int64(*p.VLong)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.vLong (6) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 6:vLong: ", p), err)
- }
- }
- return err
-}
-
-func (p *Tag) writeField7(oprot thrift.TProtocol) (err error) {
- if p.IsSetVBinary() {
- if err := oprot.WriteFieldBegin("vBinary", thrift.STRING, 7); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:vBinary: ", p), err)
- }
- if err := oprot.WriteBinary(p.VBinary); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.vBinary (7) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 7:vBinary: ", p), err)
- }
- }
- return err
-}
-
-func (p *Tag) String() string {
- if p == nil {
- return "<nil>"
- }
- return fmt.Sprintf("Tag(%+v)", *p)
-}
-
-// Attributes:
-// - Timestamp
-// - Fields
-type Log struct {
- Timestamp int64 `thrift:"timestamp,1,required" json:"timestamp"`
- Fields []*Tag `thrift:"fields,2,required" json:"fields"`
-}
-
-func NewLog() *Log {
- return &Log{}
-}
-
-func (p *Log) GetTimestamp() int64 {
- return p.Timestamp
-}
-
-func (p *Log) GetFields() []*Tag {
- return p.Fields
-}
-func (p *Log) Read(iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- var issetTimestamp bool = false
- var issetFields bool = false
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP {
- break
- }
- switch fieldId {
- case 1:
- if err := p.readField1(iprot); err != nil {
- return err
- }
- issetTimestamp = true
- case 2:
- if err := p.readField2(iprot); err != nil {
- return err
- }
- issetFields = true
- default:
- if err := iprot.Skip(fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- if !issetTimestamp {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Timestamp is not set"))
- }
- if !issetFields {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Fields is not set"))
- }
- return nil
-}
-
-func (p *Log) readField1(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(); err != nil {
- return thrift.PrependError("error reading field 1: ", err)
- } else {
- p.Timestamp = v
- }
- return nil
-}
-
-func (p *Log) readField2(iprot thrift.TProtocol) error {
- _, size, err := iprot.ReadListBegin()
- if err != nil {
- return thrift.PrependError("error reading list begin: ", err)
- }
- tSlice := make([]*Tag, 0, size)
- p.Fields = tSlice
- for i := 0; i < size; i++ {
- _elem0 := &Tag{}
- if err := _elem0.Read(iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err)
- }
- p.Fields = append(p.Fields, _elem0)
- }
- if err := iprot.ReadListEnd(); err != nil {
- return thrift.PrependError("error reading list end: ", err)
- }
- return nil
-}
-
-func (p *Log) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("Log"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
- }
- if err := p.writeField1(oprot); err != nil {
- return err
- }
- if err := p.writeField2(oprot); err != nil {
- return err
- }
- if err := oprot.WriteFieldStop(); err != nil {
- return thrift.PrependError("write field stop error: ", err)
- }
- if err := oprot.WriteStructEnd(); err != nil {
- return thrift.PrependError("write struct stop error: ", err)
- }
- return nil
-}
-
-func (p *Log) writeField1(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("timestamp", thrift.I64, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:timestamp: ", p), err)
- }
- if err := oprot.WriteI64(int64(p.Timestamp)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.timestamp (1) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:timestamp: ", p), err)
- }
- return err
-}
-
-func (p *Log) writeField2(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("fields", thrift.LIST, 2); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:fields: ", p), err)
- }
- if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Fields)); err != nil {
- return thrift.PrependError("error writing list begin: ", err)
- }
- for _, v := range p.Fields {
- if err := v.Write(oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
- }
- }
- if err := oprot.WriteListEnd(); err != nil {
- return thrift.PrependError("error writing list end: ", err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 2:fields: ", p), err)
- }
- return err
-}
-
-func (p *Log) String() string {
- if p == nil {
- return "<nil>"
- }
- return fmt.Sprintf("Log(%+v)", *p)
-}
-
-// Attributes:
-// - RefType
-// - TraceIdLow
-// - TraceIdHigh
-// - SpanId
-type SpanRef struct {
- RefType SpanRefType `thrift:"refType,1,required" json:"refType"`
- TraceIdLow int64 `thrift:"traceIdLow,2,required" json:"traceIdLow"`
- TraceIdHigh int64 `thrift:"traceIdHigh,3,required" json:"traceIdHigh"`
- SpanId int64 `thrift:"spanId,4,required" json:"spanId"`
-}
-
-func NewSpanRef() *SpanRef {
- return &SpanRef{}
-}
-
-func (p *SpanRef) GetRefType() SpanRefType {
- return p.RefType
-}
-
-func (p *SpanRef) GetTraceIdLow() int64 {
- return p.TraceIdLow
-}
-
-func (p *SpanRef) GetTraceIdHigh() int64 {
- return p.TraceIdHigh
-}
-
-func (p *SpanRef) GetSpanId() int64 {
- return p.SpanId
-}
-func (p *SpanRef) Read(iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- var issetRefType bool = false
- var issetTraceIdLow bool = false
- var issetTraceIdHigh bool = false
- var issetSpanId bool = false
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP {
- break
- }
- switch fieldId {
- case 1:
- if err := p.readField1(iprot); err != nil {
- return err
- }
- issetRefType = true
- case 2:
- if err := p.readField2(iprot); err != nil {
- return err
- }
- issetTraceIdLow = true
- case 3:
- if err := p.readField3(iprot); err != nil {
- return err
- }
- issetTraceIdHigh = true
- case 4:
- if err := p.readField4(iprot); err != nil {
- return err
- }
- issetSpanId = true
- default:
- if err := iprot.Skip(fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- if !issetRefType {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field RefType is not set"))
- }
- if !issetTraceIdLow {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdLow is not set"))
- }
- if !issetTraceIdHigh {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdHigh is not set"))
- }
- if !issetSpanId {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SpanId is not set"))
- }
- return nil
-}
-
-func (p *SpanRef) readField1(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI32(); err != nil {
- return thrift.PrependError("error reading field 1: ", err)
- } else {
- temp := SpanRefType(v)
- p.RefType = temp
- }
- return nil
-}
-
-func (p *SpanRef) readField2(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(); err != nil {
- return thrift.PrependError("error reading field 2: ", err)
- } else {
- p.TraceIdLow = v
- }
- return nil
-}
-
-func (p *SpanRef) readField3(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(); err != nil {
- return thrift.PrependError("error reading field 3: ", err)
- } else {
- p.TraceIdHigh = v
- }
- return nil
-}
-
-func (p *SpanRef) readField4(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(); err != nil {
- return thrift.PrependError("error reading field 4: ", err)
- } else {
- p.SpanId = v
- }
- return nil
-}
-
-func (p *SpanRef) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("SpanRef"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
- }
- if err := p.writeField1(oprot); err != nil {
- return err
- }
- if err := p.writeField2(oprot); err != nil {
- return err
- }
- if err := p.writeField3(oprot); err != nil {
- return err
- }
- if err := p.writeField4(oprot); err != nil {
- return err
- }
- if err := oprot.WriteFieldStop(); err != nil {
- return thrift.PrependError("write field stop error: ", err)
- }
- if err := oprot.WriteStructEnd(); err != nil {
- return thrift.PrependError("write struct stop error: ", err)
- }
- return nil
-}
-
-func (p *SpanRef) writeField1(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("refType", thrift.I32, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:refType: ", p), err)
- }
- if err := oprot.WriteI32(int32(p.RefType)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.refType (1) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:refType: ", p), err)
- }
- return err
-}
-
-func (p *SpanRef) writeField2(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("traceIdLow", thrift.I64, 2); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:traceIdLow: ", p), err)
- }
- if err := oprot.WriteI64(int64(p.TraceIdLow)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.traceIdLow (2) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 2:traceIdLow: ", p), err)
- }
- return err
-}
-
-func (p *SpanRef) writeField3(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("traceIdHigh", thrift.I64, 3); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:traceIdHigh: ", p), err)
- }
- if err := oprot.WriteI64(int64(p.TraceIdHigh)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.traceIdHigh (3) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 3:traceIdHigh: ", p), err)
- }
- return err
-}
-
-func (p *SpanRef) writeField4(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("spanId", thrift.I64, 4); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:spanId: ", p), err)
- }
- if err := oprot.WriteI64(int64(p.SpanId)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.spanId (4) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 4:spanId: ", p), err)
- }
- return err
-}
-
-func (p *SpanRef) String() string {
- if p == nil {
- return "<nil>"
- }
- return fmt.Sprintf("SpanRef(%+v)", *p)
-}
-
-// Attributes:
-// - TraceIdLow
-// - TraceIdHigh
-// - SpanId
-// - ParentSpanId
-// - OperationName
-// - References
-// - Flags
-// - StartTime
-// - Duration
-// - Tags
-// - Logs
-type Span struct {
- TraceIdLow int64 `thrift:"traceIdLow,1,required" json:"traceIdLow"`
- TraceIdHigh int64 `thrift:"traceIdHigh,2,required" json:"traceIdHigh"`
- SpanId int64 `thrift:"spanId,3,required" json:"spanId"`
- ParentSpanId int64 `thrift:"parentSpanId,4,required" json:"parentSpanId"`
- OperationName string `thrift:"operationName,5,required" json:"operationName"`
- References []*SpanRef `thrift:"references,6" json:"references,omitempty"`
- Flags int32 `thrift:"flags,7,required" json:"flags"`
- StartTime int64 `thrift:"startTime,8,required" json:"startTime"`
- Duration int64 `thrift:"duration,9,required" json:"duration"`
- Tags []*Tag `thrift:"tags,10" json:"tags,omitempty"`
- Logs []*Log `thrift:"logs,11" json:"logs,omitempty"`
-}
-
-func NewSpan() *Span {
- return &Span{}
-}
-
-func (p *Span) GetTraceIdLow() int64 {
- return p.TraceIdLow
-}
-
-func (p *Span) GetTraceIdHigh() int64 {
- return p.TraceIdHigh
-}
-
-func (p *Span) GetSpanId() int64 {
- return p.SpanId
-}
-
-func (p *Span) GetParentSpanId() int64 {
- return p.ParentSpanId
-}
-
-func (p *Span) GetOperationName() string {
- return p.OperationName
-}
-
-var Span_References_DEFAULT []*SpanRef
-
-func (p *Span) GetReferences() []*SpanRef {
- return p.References
-}
-
-func (p *Span) GetFlags() int32 {
- return p.Flags
-}
-
-func (p *Span) GetStartTime() int64 {
- return p.StartTime
-}
-
-func (p *Span) GetDuration() int64 {
- return p.Duration
-}
-
-var Span_Tags_DEFAULT []*Tag
-
-func (p *Span) GetTags() []*Tag {
- return p.Tags
-}
-
-var Span_Logs_DEFAULT []*Log
-
-func (p *Span) GetLogs() []*Log {
- return p.Logs
-}
-func (p *Span) IsSetReferences() bool {
- return p.References != nil
-}
-
-func (p *Span) IsSetTags() bool {
- return p.Tags != nil
-}
-
-func (p *Span) IsSetLogs() bool {
- return p.Logs != nil
-}
-
-func (p *Span) Read(iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- var issetTraceIdLow bool = false
- var issetTraceIdHigh bool = false
- var issetSpanId bool = false
- var issetParentSpanId bool = false
- var issetOperationName bool = false
- var issetFlags bool = false
- var issetStartTime bool = false
- var issetDuration bool = false
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP {
- break
- }
- switch fieldId {
- case 1:
- if err := p.readField1(iprot); err != nil {
- return err
- }
- issetTraceIdLow = true
- case 2:
- if err := p.readField2(iprot); err != nil {
- return err
- }
- issetTraceIdHigh = true
- case 3:
- if err := p.readField3(iprot); err != nil {
- return err
- }
- issetSpanId = true
- case 4:
- if err := p.readField4(iprot); err != nil {
- return err
- }
- issetParentSpanId = true
- case 5:
- if err := p.readField5(iprot); err != nil {
- return err
- }
- issetOperationName = true
- case 6:
- if err := p.readField6(iprot); err != nil {
- return err
- }
- case 7:
- if err := p.readField7(iprot); err != nil {
- return err
- }
- issetFlags = true
- case 8:
- if err := p.readField8(iprot); err != nil {
- return err
- }
- issetStartTime = true
- case 9:
- if err := p.readField9(iprot); err != nil {
- return err
- }
- issetDuration = true
- case 10:
- if err := p.readField10(iprot); err != nil {
- return err
- }
- case 11:
- if err := p.readField11(iprot); err != nil {
- return err
- }
- default:
- if err := iprot.Skip(fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- if !issetTraceIdLow {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdLow is not set"))
- }
- if !issetTraceIdHigh {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdHigh is not set"))
- }
- if !issetSpanId {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SpanId is not set"))
- }
- if !issetParentSpanId {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ParentSpanId is not set"))
- }
- if !issetOperationName {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field OperationName is not set"))
- }
- if !issetFlags {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Flags is not set"))
- }
- if !issetStartTime {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field StartTime is not set"))
- }
- if !issetDuration {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Duration is not set"))
- }
- return nil
-}
-
-func (p *Span) readField1(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(); err != nil {
- return thrift.PrependError("error reading field 1: ", err)
- } else {
- p.TraceIdLow = v
- }
- return nil
-}
-
-func (p *Span) readField2(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(); err != nil {
- return thrift.PrependError("error reading field 2: ", err)
- } else {
- p.TraceIdHigh = v
- }
- return nil
-}
-
-func (p *Span) readField3(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(); err != nil {
- return thrift.PrependError("error reading field 3: ", err)
- } else {
- p.SpanId = v
- }
- return nil
-}
-
-func (p *Span) readField4(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(); err != nil {
- return thrift.PrependError("error reading field 4: ", err)
- } else {
- p.ParentSpanId = v
- }
- return nil
-}
-
-func (p *Span) readField5(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadString(); err != nil {
- return thrift.PrependError("error reading field 5: ", err)
- } else {
- p.OperationName = v
- }
- return nil
-}
-
-func (p *Span) readField6(iprot thrift.TProtocol) error {
- _, size, err := iprot.ReadListBegin()
- if err != nil {
- return thrift.PrependError("error reading list begin: ", err)
- }
- tSlice := make([]*SpanRef, 0, size)
- p.References = tSlice
- for i := 0; i < size; i++ {
- _elem1 := &SpanRef{}
- if err := _elem1.Read(iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem1), err)
- }
- p.References = append(p.References, _elem1)
- }
- if err := iprot.ReadListEnd(); err != nil {
- return thrift.PrependError("error reading list end: ", err)
- }
- return nil
-}
-
-func (p *Span) readField7(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI32(); err != nil {
- return thrift.PrependError("error reading field 7: ", err)
- } else {
- p.Flags = v
- }
- return nil
-}
-
-func (p *Span) readField8(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(); err != nil {
- return thrift.PrependError("error reading field 8: ", err)
- } else {
- p.StartTime = v
- }
- return nil
-}
-
-func (p *Span) readField9(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(); err != nil {
- return thrift.PrependError("error reading field 9: ", err)
- } else {
- p.Duration = v
- }
- return nil
-}
-
-func (p *Span) readField10(iprot thrift.TProtocol) error {
- _, size, err := iprot.ReadListBegin()
- if err != nil {
- return thrift.PrependError("error reading list begin: ", err)
- }
- tSlice := make([]*Tag, 0, size)
- p.Tags = tSlice
- for i := 0; i < size; i++ {
- _elem2 := &Tag{}
- if err := _elem2.Read(iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem2), err)
- }
- p.Tags = append(p.Tags, _elem2)
- }
- if err := iprot.ReadListEnd(); err != nil {
- return thrift.PrependError("error reading list end: ", err)
- }
- return nil
-}
-
-func (p *Span) readField11(iprot thrift.TProtocol) error {
- _, size, err := iprot.ReadListBegin()
- if err != nil {
- return thrift.PrependError("error reading list begin: ", err)
- }
- tSlice := make([]*Log, 0, size)
- p.Logs = tSlice
- for i := 0; i < size; i++ {
- _elem3 := &Log{}
- if err := _elem3.Read(iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem3), err)
- }
- p.Logs = append(p.Logs, _elem3)
- }
- if err := iprot.ReadListEnd(); err != nil {
- return thrift.PrependError("error reading list end: ", err)
- }
- return nil
-}
-
-func (p *Span) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("Span"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
- }
- if err := p.writeField1(oprot); err != nil {
- return err
- }
- if err := p.writeField2(oprot); err != nil {
- return err
- }
- if err := p.writeField3(oprot); err != nil {
- return err
- }
- if err := p.writeField4(oprot); err != nil {
- return err
- }
- if err := p.writeField5(oprot); err != nil {
- return err
- }
- if err := p.writeField6(oprot); err != nil {
- return err
- }
- if err := p.writeField7(oprot); err != nil {
- return err
- }
- if err := p.writeField8(oprot); err != nil {
- return err
- }
- if err := p.writeField9(oprot); err != nil {
- return err
- }
- if err := p.writeField10(oprot); err != nil {
- return err
- }
- if err := p.writeField11(oprot); err != nil {
- return err
- }
- if err := oprot.WriteFieldStop(); err != nil {
- return thrift.PrependError("write field stop error: ", err)
- }
- if err := oprot.WriteStructEnd(); err != nil {
- return thrift.PrependError("write struct stop error: ", err)
- }
- return nil
-}
-
-func (p *Span) writeField1(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("traceIdLow", thrift.I64, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:traceIdLow: ", p), err)
- }
- if err := oprot.WriteI64(int64(p.TraceIdLow)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.traceIdLow (1) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:traceIdLow: ", p), err)
- }
- return err
-}
-
-func (p *Span) writeField2(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("traceIdHigh", thrift.I64, 2); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:traceIdHigh: ", p), err)
- }
- if err := oprot.WriteI64(int64(p.TraceIdHigh)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.traceIdHigh (2) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 2:traceIdHigh: ", p), err)
- }
- return err
-}
-
-func (p *Span) writeField3(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("spanId", thrift.I64, 3); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:spanId: ", p), err)
- }
- if err := oprot.WriteI64(int64(p.SpanId)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.spanId (3) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 3:spanId: ", p), err)
- }
- return err
-}
-
-func (p *Span) writeField4(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("parentSpanId", thrift.I64, 4); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:parentSpanId: ", p), err)
- }
- if err := oprot.WriteI64(int64(p.ParentSpanId)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.parentSpanId (4) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 4:parentSpanId: ", p), err)
- }
- return err
-}
-
-func (p *Span) writeField5(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("operationName", thrift.STRING, 5); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:operationName: ", p), err)
- }
- if err := oprot.WriteString(string(p.OperationName)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.operationName (5) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 5:operationName: ", p), err)
- }
- return err
-}
-
-func (p *Span) writeField6(oprot thrift.TProtocol) (err error) {
- if p.IsSetReferences() {
- if err := oprot.WriteFieldBegin("references", thrift.LIST, 6); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:references: ", p), err)
- }
- if err := oprot.WriteListBegin(thrift.STRUCT, len(p.References)); err != nil {
- return thrift.PrependError("error writing list begin: ", err)
- }
- for _, v := range p.References {
- if err := v.Write(oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
- }
- }
- if err := oprot.WriteListEnd(); err != nil {
- return thrift.PrependError("error writing list end: ", err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 6:references: ", p), err)
- }
- }
- return err
-}
-
-func (p *Span) writeField7(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("flags", thrift.I32, 7); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:flags: ", p), err)
- }
- if err := oprot.WriteI32(int32(p.Flags)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.flags (7) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 7:flags: ", p), err)
- }
- return err
-}
-
-func (p *Span) writeField8(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("startTime", thrift.I64, 8); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:startTime: ", p), err)
- }
- if err := oprot.WriteI64(int64(p.StartTime)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.startTime (8) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 8:startTime: ", p), err)
- }
- return err
-}
-
-func (p *Span) writeField9(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("duration", thrift.I64, 9); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:duration: ", p), err)
- }
- if err := oprot.WriteI64(int64(p.Duration)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.duration (9) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 9:duration: ", p), err)
- }
- return err
-}
-
-func (p *Span) writeField10(oprot thrift.TProtocol) (err error) {
- if p.IsSetTags() {
- if err := oprot.WriteFieldBegin("tags", thrift.LIST, 10); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:tags: ", p), err)
- }
- if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Tags)); err != nil {
- return thrift.PrependError("error writing list begin: ", err)
- }
- for _, v := range p.Tags {
- if err := v.Write(oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
- }
- }
- if err := oprot.WriteListEnd(); err != nil {
- return thrift.PrependError("error writing list end: ", err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 10:tags: ", p), err)
- }
- }
- return err
-}
-
-func (p *Span) writeField11(oprot thrift.TProtocol) (err error) {
- if p.IsSetLogs() {
- if err := oprot.WriteFieldBegin("logs", thrift.LIST, 11); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:logs: ", p), err)
- }
- if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Logs)); err != nil {
- return thrift.PrependError("error writing list begin: ", err)
- }
- for _, v := range p.Logs {
- if err := v.Write(oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
- }
- }
- if err := oprot.WriteListEnd(); err != nil {
- return thrift.PrependError("error writing list end: ", err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 11:logs: ", p), err)
- }
- }
- return err
-}
-
-func (p *Span) String() string {
- if p == nil {
- return "<nil>"
- }
- return fmt.Sprintf("Span(%+v)", *p)
-}
-
-// Attributes:
-// - ServiceName
-// - Tags
-type Process struct {
- ServiceName string `thrift:"serviceName,1,required" json:"serviceName"`
- Tags []*Tag `thrift:"tags,2" json:"tags,omitempty"`
-}
-
-func NewProcess() *Process {
- return &Process{}
-}
-
-func (p *Process) GetServiceName() string {
- return p.ServiceName
-}
-
-var Process_Tags_DEFAULT []*Tag
-
-func (p *Process) GetTags() []*Tag {
- return p.Tags
-}
-func (p *Process) IsSetTags() bool {
- return p.Tags != nil
-}
-
-func (p *Process) Read(iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- var issetServiceName bool = false
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP {
- break
- }
- switch fieldId {
- case 1:
- if err := p.readField1(iprot); err != nil {
- return err
- }
- issetServiceName = true
- case 2:
- if err := p.readField2(iprot); err != nil {
- return err
- }
- default:
- if err := iprot.Skip(fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- if !issetServiceName {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ServiceName is not set"))
- }
- return nil
-}
-
-func (p *Process) readField1(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadString(); err != nil {
- return thrift.PrependError("error reading field 1: ", err)
- } else {
- p.ServiceName = v
- }
- return nil
-}
-
-func (p *Process) readField2(iprot thrift.TProtocol) error {
- _, size, err := iprot.ReadListBegin()
- if err != nil {
- return thrift.PrependError("error reading list begin: ", err)
- }
- tSlice := make([]*Tag, 0, size)
- p.Tags = tSlice
- for i := 0; i < size; i++ {
- _elem4 := &Tag{}
- if err := _elem4.Read(iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err)
- }
- p.Tags = append(p.Tags, _elem4)
- }
- if err := iprot.ReadListEnd(); err != nil {
- return thrift.PrependError("error reading list end: ", err)
- }
- return nil
-}
-
-func (p *Process) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("Process"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
- }
- if err := p.writeField1(oprot); err != nil {
- return err
- }
- if err := p.writeField2(oprot); err != nil {
- return err
- }
- if err := oprot.WriteFieldStop(); err != nil {
- return thrift.PrependError("write field stop error: ", err)
- }
- if err := oprot.WriteStructEnd(); err != nil {
- return thrift.PrependError("write struct stop error: ", err)
- }
- return nil
-}
-
-func (p *Process) writeField1(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("serviceName", thrift.STRING, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err)
- }
- if err := oprot.WriteString(string(p.ServiceName)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err)
- }
- return err
-}
-
-func (p *Process) writeField2(oprot thrift.TProtocol) (err error) {
- if p.IsSetTags() {
- if err := oprot.WriteFieldBegin("tags", thrift.LIST, 2); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:tags: ", p), err)
- }
- if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Tags)); err != nil {
- return thrift.PrependError("error writing list begin: ", err)
- }
- for _, v := range p.Tags {
- if err := v.Write(oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
- }
- }
- if err := oprot.WriteListEnd(); err != nil {
- return thrift.PrependError("error writing list end: ", err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 2:tags: ", p), err)
- }
- }
- return err
-}
-
-func (p *Process) String() string {
- if p == nil {
- return "<nil>"
- }
- return fmt.Sprintf("Process(%+v)", *p)
-}
-
-// Attributes:
-// - FullQueueDroppedSpans
-// - TooLargeDroppedSpans
-// - FailedToEmitSpans
-type ClientStats struct {
- FullQueueDroppedSpans int64 `thrift:"fullQueueDroppedSpans,1,required" json:"fullQueueDroppedSpans"`
- TooLargeDroppedSpans int64 `thrift:"tooLargeDroppedSpans,2,required" json:"tooLargeDroppedSpans"`
- FailedToEmitSpans int64 `thrift:"failedToEmitSpans,3,required" json:"failedToEmitSpans"`
-}
-
-func NewClientStats() *ClientStats {
- return &ClientStats{}
-}
-
-func (p *ClientStats) GetFullQueueDroppedSpans() int64 {
- return p.FullQueueDroppedSpans
-}
-
-func (p *ClientStats) GetTooLargeDroppedSpans() int64 {
- return p.TooLargeDroppedSpans
-}
-
-func (p *ClientStats) GetFailedToEmitSpans() int64 {
- return p.FailedToEmitSpans
-}
-func (p *ClientStats) Read(iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- var issetFullQueueDroppedSpans bool = false
- var issetTooLargeDroppedSpans bool = false
- var issetFailedToEmitSpans bool = false
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP {
- break
- }
- switch fieldId {
- case 1:
- if err := p.readField1(iprot); err != nil {
- return err
- }
- issetFullQueueDroppedSpans = true
- case 2:
- if err := p.readField2(iprot); err != nil {
- return err
- }
- issetTooLargeDroppedSpans = true
- case 3:
- if err := p.readField3(iprot); err != nil {
- return err
- }
- issetFailedToEmitSpans = true
- default:
- if err := iprot.Skip(fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- if !issetFullQueueDroppedSpans {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field FullQueueDroppedSpans is not set"))
- }
- if !issetTooLargeDroppedSpans {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TooLargeDroppedSpans is not set"))
- }
- if !issetFailedToEmitSpans {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field FailedToEmitSpans is not set"))
- }
- return nil
-}
-
-func (p *ClientStats) readField1(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(); err != nil {
- return thrift.PrependError("error reading field 1: ", err)
- } else {
- p.FullQueueDroppedSpans = v
- }
- return nil
-}
-
-func (p *ClientStats) readField2(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(); err != nil {
- return thrift.PrependError("error reading field 2: ", err)
- } else {
- p.TooLargeDroppedSpans = v
- }
- return nil
-}
-
-func (p *ClientStats) readField3(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(); err != nil {
- return thrift.PrependError("error reading field 3: ", err)
- } else {
- p.FailedToEmitSpans = v
- }
- return nil
-}
-
-func (p *ClientStats) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("ClientStats"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
- }
- if err := p.writeField1(oprot); err != nil {
- return err
- }
- if err := p.writeField2(oprot); err != nil {
- return err
- }
- if err := p.writeField3(oprot); err != nil {
- return err
- }
- if err := oprot.WriteFieldStop(); err != nil {
- return thrift.PrependError("write field stop error: ", err)
- }
- if err := oprot.WriteStructEnd(); err != nil {
- return thrift.PrependError("write struct stop error: ", err)
- }
- return nil
-}
-
-func (p *ClientStats) writeField1(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("fullQueueDroppedSpans", thrift.I64, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:fullQueueDroppedSpans: ", p), err)
- }
- if err := oprot.WriteI64(int64(p.FullQueueDroppedSpans)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.fullQueueDroppedSpans (1) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:fullQueueDroppedSpans: ", p), err)
- }
- return err
-}
-
-func (p *ClientStats) writeField2(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("tooLargeDroppedSpans", thrift.I64, 2); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:tooLargeDroppedSpans: ", p), err)
- }
- if err := oprot.WriteI64(int64(p.TooLargeDroppedSpans)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.tooLargeDroppedSpans (2) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 2:tooLargeDroppedSpans: ", p), err)
- }
- return err
-}
-
-func (p *ClientStats) writeField3(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("failedToEmitSpans", thrift.I64, 3); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:failedToEmitSpans: ", p), err)
- }
- if err := oprot.WriteI64(int64(p.FailedToEmitSpans)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.failedToEmitSpans (3) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 3:failedToEmitSpans: ", p), err)
- }
- return err
-}
-
-func (p *ClientStats) String() string {
- if p == nil {
- return "<nil>"
- }
- return fmt.Sprintf("ClientStats(%+v)", *p)
-}
-
-// Attributes:
-// - Process
-// - Spans
-// - SeqNo
-// - Stats
-type Batch struct {
- Process *Process `thrift:"process,1,required" json:"process"`
- Spans []*Span `thrift:"spans,2,required" json:"spans"`
- SeqNo *int64 `thrift:"seqNo,3" json:"seqNo,omitempty"`
- Stats *ClientStats `thrift:"stats,4" json:"stats,omitempty"`
-}
-
-func NewBatch() *Batch {
- return &Batch{}
-}
-
-var Batch_Process_DEFAULT *Process
-
-func (p *Batch) GetProcess() *Process {
- if !p.IsSetProcess() {
- return Batch_Process_DEFAULT
- }
- return p.Process
-}
-
-func (p *Batch) GetSpans() []*Span {
- return p.Spans
-}
-
-var Batch_SeqNo_DEFAULT int64
-
-func (p *Batch) GetSeqNo() int64 {
- if !p.IsSetSeqNo() {
- return Batch_SeqNo_DEFAULT
- }
- return *p.SeqNo
-}
-
-var Batch_Stats_DEFAULT *ClientStats
-
-func (p *Batch) GetStats() *ClientStats {
- if !p.IsSetStats() {
- return Batch_Stats_DEFAULT
- }
- return p.Stats
-}
-func (p *Batch) IsSetProcess() bool {
- return p.Process != nil
-}
-
-func (p *Batch) IsSetSeqNo() bool {
- return p.SeqNo != nil
-}
-
-func (p *Batch) IsSetStats() bool {
- return p.Stats != nil
-}
-
-func (p *Batch) Read(iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- var issetProcess bool = false
- var issetSpans bool = false
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP {
- break
- }
- switch fieldId {
- case 1:
- if err := p.readField1(iprot); err != nil {
- return err
- }
- issetProcess = true
- case 2:
- if err := p.readField2(iprot); err != nil {
- return err
- }
- issetSpans = true
- case 3:
- if err := p.readField3(iprot); err != nil {
- return err
- }
- case 4:
- if err := p.readField4(iprot); err != nil {
- return err
- }
- default:
- if err := iprot.Skip(fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- if !issetProcess {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Process is not set"))
- }
- if !issetSpans {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Spans is not set"))
- }
- return nil
-}
-
-func (p *Batch) readField1(iprot thrift.TProtocol) error {
- p.Process = &Process{}
- if err := p.Process.Read(iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Process), err)
- }
- return nil
-}
-
-func (p *Batch) readField2(iprot thrift.TProtocol) error {
- _, size, err := iprot.ReadListBegin()
- if err != nil {
- return thrift.PrependError("error reading list begin: ", err)
- }
- tSlice := make([]*Span, 0, size)
- p.Spans = tSlice
- for i := 0; i < size; i++ {
- _elem5 := &Span{}
- if err := _elem5.Read(iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem5), err)
- }
- p.Spans = append(p.Spans, _elem5)
- }
- if err := iprot.ReadListEnd(); err != nil {
- return thrift.PrependError("error reading list end: ", err)
- }
- return nil
-}
-
-func (p *Batch) readField3(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(); err != nil {
- return thrift.PrependError("error reading field 3: ", err)
- } else {
- p.SeqNo = &v
- }
- return nil
-}
-
-func (p *Batch) readField4(iprot thrift.TProtocol) error {
- p.Stats = &ClientStats{}
- if err := p.Stats.Read(iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Stats), err)
- }
- return nil
-}
-
-func (p *Batch) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("Batch"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
- }
- if err := p.writeField1(oprot); err != nil {
- return err
- }
- if err := p.writeField2(oprot); err != nil {
- return err
- }
- if err := p.writeField3(oprot); err != nil {
- return err
- }
- if err := p.writeField4(oprot); err != nil {
- return err
- }
- if err := oprot.WriteFieldStop(); err != nil {
- return thrift.PrependError("write field stop error: ", err)
- }
- if err := oprot.WriteStructEnd(); err != nil {
- return thrift.PrependError("write struct stop error: ", err)
- }
- return nil
-}
-
-func (p *Batch) writeField1(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("process", thrift.STRUCT, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:process: ", p), err)
- }
- if err := p.Process.Write(oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Process), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:process: ", p), err)
- }
- return err
-}
-
-func (p *Batch) writeField2(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("spans", thrift.LIST, 2); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:spans: ", p), err)
- }
- if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Spans)); err != nil {
- return thrift.PrependError("error writing list begin: ", err)
- }
- for _, v := range p.Spans {
- if err := v.Write(oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
- }
- }
- if err := oprot.WriteListEnd(); err != nil {
- return thrift.PrependError("error writing list end: ", err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 2:spans: ", p), err)
- }
- return err
-}
-
-func (p *Batch) writeField3(oprot thrift.TProtocol) (err error) {
- if p.IsSetSeqNo() {
- if err := oprot.WriteFieldBegin("seqNo", thrift.I64, 3); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:seqNo: ", p), err)
- }
- if err := oprot.WriteI64(int64(*p.SeqNo)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.seqNo (3) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 3:seqNo: ", p), err)
- }
- }
- return err
-}
-
-func (p *Batch) writeField4(oprot thrift.TProtocol) (err error) {
- if p.IsSetStats() {
- if err := oprot.WriteFieldBegin("stats", thrift.STRUCT, 4); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:stats: ", p), err)
- }
- if err := p.Stats.Write(oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Stats), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 4:stats: ", p), err)
- }
- }
- return err
-}
-
-func (p *Batch) String() string {
- if p == nil {
- return "<nil>"
- }
- return fmt.Sprintf("Batch(%+v)", *p)
-}
-
-// Attributes:
-// - Ok
-type BatchSubmitResponse struct {
- Ok bool `thrift:"ok,1,required" json:"ok"`
-}
-
-func NewBatchSubmitResponse() *BatchSubmitResponse {
- return &BatchSubmitResponse{}
-}
-
-func (p *BatchSubmitResponse) GetOk() bool {
- return p.Ok
-}
-func (p *BatchSubmitResponse) Read(iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- var issetOk bool = false
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP {
- break
- }
- switch fieldId {
- case 1:
- if err := p.readField1(iprot); err != nil {
- return err
- }
- issetOk = true
- default:
- if err := iprot.Skip(fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- if !issetOk {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Ok is not set"))
- }
- return nil
-}
-
-func (p *BatchSubmitResponse) readField1(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadBool(); err != nil {
- return thrift.PrependError("error reading field 1: ", err)
- } else {
- p.Ok = v
- }
- return nil
-}
-
-func (p *BatchSubmitResponse) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("BatchSubmitResponse"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
- }
- if err := p.writeField1(oprot); err != nil {
- return err
- }
- if err := oprot.WriteFieldStop(); err != nil {
- return thrift.PrependError("write field stop error: ", err)
- }
- if err := oprot.WriteStructEnd(); err != nil {
- return thrift.PrependError("write struct stop error: ", err)
- }
- return nil
-}
-
-func (p *BatchSubmitResponse) writeField1(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("ok", thrift.BOOL, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ok: ", p), err)
- }
- if err := oprot.WriteBool(bool(p.Ok)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.ok (1) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ok: ", p), err)
- }
- return err
-}
-
-func (p *BatchSubmitResponse) String() string {
- if p == nil {
- return "<nil>"
- }
- return fmt.Sprintf("BatchSubmitResponse(%+v)", *p)
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/GoUnusedProtection__.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/GoUnusedProtection__.go
new file mode 100644
index 000000000..ebf43018f
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/GoUnusedProtection__.go
@@ -0,0 +1,6 @@
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
+
+package zipkincore
+
+var GoUnusedProtection__ int;
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/ttypes.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/ttypes.go
deleted file mode 100644
index 15583e56b..000000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/ttypes.go
+++ /dev/null
@@ -1,1337 +0,0 @@
-// Autogenerated by Thrift Compiler (0.9.3)
-// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-
-package zipkincore
-
-import (
- "bytes"
- "fmt"
- "github.com/uber/jaeger-client-go/thrift"
-)
-
-// (needed to ensure safety because of naive import list construction.)
-var _ = thrift.ZERO
-var _ = fmt.Printf
-var _ = bytes.Equal
-
-var GoUnusedProtection__ int
-
-type AnnotationType int64
-
-const (
- AnnotationType_BOOL AnnotationType = 0
- AnnotationType_BYTES AnnotationType = 1
- AnnotationType_I16 AnnotationType = 2
- AnnotationType_I32 AnnotationType = 3
- AnnotationType_I64 AnnotationType = 4
- AnnotationType_DOUBLE AnnotationType = 5
- AnnotationType_STRING AnnotationType = 6
-)
-
-func (p AnnotationType) String() string {
- switch p {
- case AnnotationType_BOOL:
- return "BOOL"
- case AnnotationType_BYTES:
- return "BYTES"
- case AnnotationType_I16:
- return "I16"
- case AnnotationType_I32:
- return "I32"
- case AnnotationType_I64:
- return "I64"
- case AnnotationType_DOUBLE:
- return "DOUBLE"
- case AnnotationType_STRING:
- return "STRING"
- }
- return "<UNSET>"
-}
-
-func AnnotationTypeFromString(s string) (AnnotationType, error) {
- switch s {
- case "BOOL":
- return AnnotationType_BOOL, nil
- case "BYTES":
- return AnnotationType_BYTES, nil
- case "I16":
- return AnnotationType_I16, nil
- case "I32":
- return AnnotationType_I32, nil
- case "I64":
- return AnnotationType_I64, nil
- case "DOUBLE":
- return AnnotationType_DOUBLE, nil
- case "STRING":
- return AnnotationType_STRING, nil
- }
- return AnnotationType(0), fmt.Errorf("not a valid AnnotationType string")
-}
-
-func AnnotationTypePtr(v AnnotationType) *AnnotationType { return &v }
-
-func (p AnnotationType) MarshalText() ([]byte, error) {
- return []byte(p.String()), nil
-}
-
-func (p *AnnotationType) UnmarshalText(text []byte) error {
- q, err := AnnotationTypeFromString(string(text))
- if err != nil {
- return err
- }
- *p = q
- return nil
-}
-
-// Indicates the network context of a service recording an annotation with two
-// exceptions.
-//
-// When a BinaryAnnotation, and key is CLIENT_ADDR or SERVER_ADDR,
-// the endpoint indicates the source or destination of an RPC. This exception
-// allows zipkin to display network context of uninstrumented services, or
-// clients such as web browsers.
-//
-// Attributes:
-// - Ipv4: IPv4 host address packed into 4 bytes.
-//
-// Ex for the ip 1.2.3.4, it would be (1 << 24) | (2 << 16) | (3 << 8) | 4
-// - Port: IPv4 port
-//
-// Note: this is to be treated as an unsigned integer, so watch for negatives.
-//
-// Conventionally, when the port isn't known, port = 0.
-// - ServiceName: Service name in lowercase, such as "memcache" or "zipkin-web"
-//
-// Conventionally, when the service name isn't known, service_name = "unknown".
-// - Ipv6: IPv6 host address packed into 16 bytes. Ex Inet6Address.getBytes()
-type Endpoint struct {
- Ipv4 int32 `thrift:"ipv4,1" json:"ipv4"`
- Port int16 `thrift:"port,2" json:"port"`
- ServiceName string `thrift:"service_name,3" json:"service_name"`
- Ipv6 []byte `thrift:"ipv6,4" json:"ipv6,omitempty"`
-}
-
-func NewEndpoint() *Endpoint {
- return &Endpoint{}
-}
-
-func (p *Endpoint) GetIpv4() int32 {
- return p.Ipv4
-}
-
-func (p *Endpoint) GetPort() int16 {
- return p.Port
-}
-
-func (p *Endpoint) GetServiceName() string {
- return p.ServiceName
-}
-
-var Endpoint_Ipv6_DEFAULT []byte
-
-func (p *Endpoint) GetIpv6() []byte {
- return p.Ipv6
-}
-func (p *Endpoint) IsSetIpv6() bool {
- return p.Ipv6 != nil
-}
-
-func (p *Endpoint) Read(iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP {
- break
- }
- switch fieldId {
- case 1:
- if err := p.readField1(iprot); err != nil {
- return err
- }
- case 2:
- if err := p.readField2(iprot); err != nil {
- return err
- }
- case 3:
- if err := p.readField3(iprot); err != nil {
- return err
- }
- case 4:
- if err := p.readField4(iprot); err != nil {
- return err
- }
- default:
- if err := iprot.Skip(fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- return nil
-}
-
-func (p *Endpoint) readField1(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI32(); err != nil {
- return thrift.PrependError("error reading field 1: ", err)
- } else {
- p.Ipv4 = v
- }
- return nil
-}
-
-func (p *Endpoint) readField2(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI16(); err != nil {
- return thrift.PrependError("error reading field 2: ", err)
- } else {
- p.Port = v
- }
- return nil
-}
-
-func (p *Endpoint) readField3(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadString(); err != nil {
- return thrift.PrependError("error reading field 3: ", err)
- } else {
- p.ServiceName = v
- }
- return nil
-}
-
-func (p *Endpoint) readField4(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadBinary(); err != nil {
- return thrift.PrependError("error reading field 4: ", err)
- } else {
- p.Ipv6 = v
- }
- return nil
-}
-
-func (p *Endpoint) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("Endpoint"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
- }
- if err := p.writeField1(oprot); err != nil {
- return err
- }
- if err := p.writeField2(oprot); err != nil {
- return err
- }
- if err := p.writeField3(oprot); err != nil {
- return err
- }
- if err := p.writeField4(oprot); err != nil {
- return err
- }
- if err := oprot.WriteFieldStop(); err != nil {
- return thrift.PrependError("write field stop error: ", err)
- }
- if err := oprot.WriteStructEnd(); err != nil {
- return thrift.PrependError("write struct stop error: ", err)
- }
- return nil
-}
-
-func (p *Endpoint) writeField1(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("ipv4", thrift.I32, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ipv4: ", p), err)
- }
- if err := oprot.WriteI32(int32(p.Ipv4)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.ipv4 (1) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ipv4: ", p), err)
- }
- return err
-}
-
-func (p *Endpoint) writeField2(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("port", thrift.I16, 2); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:port: ", p), err)
- }
- if err := oprot.WriteI16(int16(p.Port)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.port (2) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 2:port: ", p), err)
- }
- return err
-}
-
-func (p *Endpoint) writeField3(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("service_name", thrift.STRING, 3); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:service_name: ", p), err)
- }
- if err := oprot.WriteString(string(p.ServiceName)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.service_name (3) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 3:service_name: ", p), err)
- }
- return err
-}
-
-func (p *Endpoint) writeField4(oprot thrift.TProtocol) (err error) {
- if p.IsSetIpv6() {
- if err := oprot.WriteFieldBegin("ipv6", thrift.STRING, 4); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:ipv6: ", p), err)
- }
- if err := oprot.WriteBinary(p.Ipv6); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.ipv6 (4) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 4:ipv6: ", p), err)
- }
- }
- return err
-}
-
-func (p *Endpoint) String() string {
- if p == nil {
- return "<nil>"
- }
- return fmt.Sprintf("Endpoint(%+v)", *p)
-}
-
-// An annotation is similar to a log statement. It includes a host field which
-// allows these events to be attributed properly, and also aggregatable.
-//
-// Attributes:
-// - Timestamp: Microseconds from epoch.
-//
-// This value should use the most precise value possible. For example,
-// gettimeofday or syncing nanoTime against a tick of currentTimeMillis.
-// - Value
-// - Host: Always the host that recorded the event. By specifying the host you allow
-// rollup of all events (such as client requests to a service) by IP address.
-type Annotation struct {
- Timestamp int64 `thrift:"timestamp,1" json:"timestamp"`
- Value string `thrift:"value,2" json:"value"`
- Host *Endpoint `thrift:"host,3" json:"host,omitempty"`
-}
-
-func NewAnnotation() *Annotation {
- return &Annotation{}
-}
-
-func (p *Annotation) GetTimestamp() int64 {
- return p.Timestamp
-}
-
-func (p *Annotation) GetValue() string {
- return p.Value
-}
-
-var Annotation_Host_DEFAULT *Endpoint
-
-func (p *Annotation) GetHost() *Endpoint {
- if !p.IsSetHost() {
- return Annotation_Host_DEFAULT
- }
- return p.Host
-}
-func (p *Annotation) IsSetHost() bool {
- return p.Host != nil
-}
-
-func (p *Annotation) Read(iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP {
- break
- }
- switch fieldId {
- case 1:
- if err := p.readField1(iprot); err != nil {
- return err
- }
- case 2:
- if err := p.readField2(iprot); err != nil {
- return err
- }
- case 3:
- if err := p.readField3(iprot); err != nil {
- return err
- }
- default:
- if err := iprot.Skip(fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- return nil
-}
-
-func (p *Annotation) readField1(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(); err != nil {
- return thrift.PrependError("error reading field 1: ", err)
- } else {
- p.Timestamp = v
- }
- return nil
-}
-
-func (p *Annotation) readField2(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadString(); err != nil {
- return thrift.PrependError("error reading field 2: ", err)
- } else {
- p.Value = v
- }
- return nil
-}
-
-func (p *Annotation) readField3(iprot thrift.TProtocol) error {
- p.Host = &Endpoint{}
- if err := p.Host.Read(iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err)
- }
- return nil
-}
-
-func (p *Annotation) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("Annotation"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
- }
- if err := p.writeField1(oprot); err != nil {
- return err
- }
- if err := p.writeField2(oprot); err != nil {
- return err
- }
- if err := p.writeField3(oprot); err != nil {
- return err
- }
- if err := oprot.WriteFieldStop(); err != nil {
- return thrift.PrependError("write field stop error: ", err)
- }
- if err := oprot.WriteStructEnd(); err != nil {
- return thrift.PrependError("write struct stop error: ", err)
- }
- return nil
-}
-
-func (p *Annotation) writeField1(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("timestamp", thrift.I64, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:timestamp: ", p), err)
- }
- if err := oprot.WriteI64(int64(p.Timestamp)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.timestamp (1) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:timestamp: ", p), err)
- }
- return err
-}
-
-func (p *Annotation) writeField2(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("value", thrift.STRING, 2); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err)
- }
- if err := oprot.WriteString(string(p.Value)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err)
- }
- return err
-}
-
-func (p *Annotation) writeField3(oprot thrift.TProtocol) (err error) {
- if p.IsSetHost() {
- if err := oprot.WriteFieldBegin("host", thrift.STRUCT, 3); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:host: ", p), err)
- }
- if err := p.Host.Write(oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 3:host: ", p), err)
- }
- }
- return err
-}
-
-func (p *Annotation) String() string {
- if p == nil {
- return "<nil>"
- }
- return fmt.Sprintf("Annotation(%+v)", *p)
-}
-
-// Binary annotations are tags applied to a Span to give it context. For
-// example, a binary annotation of "http.uri" could the path to a resource in a
-// RPC call.
-//
-// Binary annotations of type STRING are always queryable, though more a
-// historical implementation detail than a structural concern.
-//
-// Binary annotations can repeat, and vary on the host. Similar to Annotation,
-// the host indicates who logged the event. This allows you to tell the
-// difference between the client and server side of the same key. For example,
-// the key "http.uri" might be different on the client and server side due to
-// rewriting, like "/api/v1/myresource" vs "/myresource. Via the host field,
-// you can see the different points of view, which often help in debugging.
-//
-// Attributes:
-// - Key
-// - Value
-// - AnnotationType
-// - Host: The host that recorded tag, which allows you to differentiate between
-// multiple tags with the same key. There are two exceptions to this.
-//
-// When the key is CLIENT_ADDR or SERVER_ADDR, host indicates the source or
-// destination of an RPC. This exception allows zipkin to display network
-// context of uninstrumented services, or clients such as web browsers.
-type BinaryAnnotation struct {
- Key string `thrift:"key,1" json:"key"`
- Value []byte `thrift:"value,2" json:"value"`
- AnnotationType AnnotationType `thrift:"annotation_type,3" json:"annotation_type"`
- Host *Endpoint `thrift:"host,4" json:"host,omitempty"`
-}
-
-func NewBinaryAnnotation() *BinaryAnnotation {
- return &BinaryAnnotation{}
-}
-
-func (p *BinaryAnnotation) GetKey() string {
- return p.Key
-}
-
-func (p *BinaryAnnotation) GetValue() []byte {
- return p.Value
-}
-
-func (p *BinaryAnnotation) GetAnnotationType() AnnotationType {
- return p.AnnotationType
-}
-
-var BinaryAnnotation_Host_DEFAULT *Endpoint
-
-func (p *BinaryAnnotation) GetHost() *Endpoint {
- if !p.IsSetHost() {
- return BinaryAnnotation_Host_DEFAULT
- }
- return p.Host
-}
-func (p *BinaryAnnotation) IsSetHost() bool {
- return p.Host != nil
-}
-
-func (p *BinaryAnnotation) Read(iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP {
- break
- }
- switch fieldId {
- case 1:
- if err := p.readField1(iprot); err != nil {
- return err
- }
- case 2:
- if err := p.readField2(iprot); err != nil {
- return err
- }
- case 3:
- if err := p.readField3(iprot); err != nil {
- return err
- }
- case 4:
- if err := p.readField4(iprot); err != nil {
- return err
- }
- default:
- if err := iprot.Skip(fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- return nil
-}
-
-func (p *BinaryAnnotation) readField1(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadString(); err != nil {
- return thrift.PrependError("error reading field 1: ", err)
- } else {
- p.Key = v
- }
- return nil
-}
-
-func (p *BinaryAnnotation) readField2(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadBinary(); err != nil {
- return thrift.PrependError("error reading field 2: ", err)
- } else {
- p.Value = v
- }
- return nil
-}
-
-func (p *BinaryAnnotation) readField3(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI32(); err != nil {
- return thrift.PrependError("error reading field 3: ", err)
- } else {
- temp := AnnotationType(v)
- p.AnnotationType = temp
- }
- return nil
-}
-
-func (p *BinaryAnnotation) readField4(iprot thrift.TProtocol) error {
- p.Host = &Endpoint{}
- if err := p.Host.Read(iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err)
- }
- return nil
-}
-
-func (p *BinaryAnnotation) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("BinaryAnnotation"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
- }
- if err := p.writeField1(oprot); err != nil {
- return err
- }
- if err := p.writeField2(oprot); err != nil {
- return err
- }
- if err := p.writeField3(oprot); err != nil {
- return err
- }
- if err := p.writeField4(oprot); err != nil {
- return err
- }
- if err := oprot.WriteFieldStop(); err != nil {
- return thrift.PrependError("write field stop error: ", err)
- }
- if err := oprot.WriteStructEnd(); err != nil {
- return thrift.PrependError("write struct stop error: ", err)
- }
- return nil
-}
-
-func (p *BinaryAnnotation) writeField1(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("key", thrift.STRING, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err)
- }
- if err := oprot.WriteString(string(p.Key)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err)
- }
- return err
-}
-
-func (p *BinaryAnnotation) writeField2(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("value", thrift.STRING, 2); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err)
- }
- if err := oprot.WriteBinary(p.Value); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err)
- }
- return err
-}
-
-func (p *BinaryAnnotation) writeField3(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("annotation_type", thrift.I32, 3); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:annotation_type: ", p), err)
- }
- if err := oprot.WriteI32(int32(p.AnnotationType)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.annotation_type (3) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 3:annotation_type: ", p), err)
- }
- return err
-}
-
-func (p *BinaryAnnotation) writeField4(oprot thrift.TProtocol) (err error) {
- if p.IsSetHost() {
- if err := oprot.WriteFieldBegin("host", thrift.STRUCT, 4); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:host: ", p), err)
- }
- if err := p.Host.Write(oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 4:host: ", p), err)
- }
- }
- return err
-}
-
-func (p *BinaryAnnotation) String() string {
- if p == nil {
- return "<nil>"
- }
- return fmt.Sprintf("BinaryAnnotation(%+v)", *p)
-}
-
-// A trace is a series of spans (often RPC calls) which form a latency tree.
-//
-// The root span is where trace_id = id and parent_id = Nil. The root span is
-// usually the longest interval in the trace, starting with a SERVER_RECV
-// annotation and ending with a SERVER_SEND.
-//
-// Attributes:
-// - TraceID
-// - Name: Span name in lowercase, rpc method for example
-//
-// Conventionally, when the span name isn't known, name = "unknown".
-// - ID
-// - ParentID
-// - Annotations
-// - BinaryAnnotations
-// - Debug
-// - Timestamp: Microseconds from epoch of the creation of this span.
-//
-// This value should be set directly by instrumentation, using the most
-// precise value possible. For example, gettimeofday or syncing nanoTime
-// against a tick of currentTimeMillis.
-//
-// For compatibility with instrumentation that precede this field, collectors
-// or span stores can derive this via Annotation.timestamp.
-// For example, SERVER_RECV.timestamp or CLIENT_SEND.timestamp.
-//
-// This field is optional for compatibility with old data: first-party span
-// stores are expected to support this at time of introduction.
-// - Duration: Measurement of duration in microseconds, used to support queries.
-//
-// This value should be set directly, where possible. Doing so encourages
-// precise measurement decoupled from problems of clocks, such as skew or NTP
-// updates causing time to move backwards.
-//
-// For compatibility with instrumentation that precede this field, collectors
-// or span stores can derive this by subtracting Annotation.timestamp.
-// For example, SERVER_SEND.timestamp - SERVER_RECV.timestamp.
-//
-// If this field is persisted as unset, zipkin will continue to work, except
-// duration query support will be implementation-specific. Similarly, setting
-// this field non-atomically is implementation-specific.
-//
-// This field is i64 vs i32 to support spans longer than 35 minutes.
-// - TraceIDHigh: Optional unique 8-byte additional identifier for a trace. If non zero, this
-// means the trace uses 128 bit traceIds instead of 64 bit.
-type Span struct {
- TraceID int64 `thrift:"trace_id,1" json:"trace_id"`
- // unused field # 2
- Name string `thrift:"name,3" json:"name"`
- ID int64 `thrift:"id,4" json:"id"`
- ParentID *int64 `thrift:"parent_id,5" json:"parent_id,omitempty"`
- Annotations []*Annotation `thrift:"annotations,6" json:"annotations"`
- // unused field # 7
- BinaryAnnotations []*BinaryAnnotation `thrift:"binary_annotations,8" json:"binary_annotations"`
- Debug bool `thrift:"debug,9" json:"debug,omitempty"`
- Timestamp *int64 `thrift:"timestamp,10" json:"timestamp,omitempty"`
- Duration *int64 `thrift:"duration,11" json:"duration,omitempty"`
- TraceIDHigh *int64 `thrift:"trace_id_high,12" json:"trace_id_high,omitempty"`
-}
-
-func NewSpan() *Span {
- return &Span{}
-}
-
-func (p *Span) GetTraceID() int64 {
- return p.TraceID
-}
-
-func (p *Span) GetName() string {
- return p.Name
-}
-
-func (p *Span) GetID() int64 {
- return p.ID
-}
-
-var Span_ParentID_DEFAULT int64
-
-func (p *Span) GetParentID() int64 {
- if !p.IsSetParentID() {
- return Span_ParentID_DEFAULT
- }
- return *p.ParentID
-}
-
-func (p *Span) GetAnnotations() []*Annotation {
- return p.Annotations
-}
-
-func (p *Span) GetBinaryAnnotations() []*BinaryAnnotation {
- return p.BinaryAnnotations
-}
-
-var Span_Debug_DEFAULT bool = false
-
-func (p *Span) GetDebug() bool {
- return p.Debug
-}
-
-var Span_Timestamp_DEFAULT int64
-
-func (p *Span) GetTimestamp() int64 {
- if !p.IsSetTimestamp() {
- return Span_Timestamp_DEFAULT
- }
- return *p.Timestamp
-}
-
-var Span_Duration_DEFAULT int64
-
-func (p *Span) GetDuration() int64 {
- if !p.IsSetDuration() {
- return Span_Duration_DEFAULT
- }
- return *p.Duration
-}
-
-var Span_TraceIDHigh_DEFAULT int64
-
-func (p *Span) GetTraceIDHigh() int64 {
- if !p.IsSetTraceIDHigh() {
- return Span_TraceIDHigh_DEFAULT
- }
- return *p.TraceIDHigh
-}
-func (p *Span) IsSetParentID() bool {
- return p.ParentID != nil
-}
-
-func (p *Span) IsSetDebug() bool {
- return p.Debug != Span_Debug_DEFAULT
-}
-
-func (p *Span) IsSetTimestamp() bool {
- return p.Timestamp != nil
-}
-
-func (p *Span) IsSetDuration() bool {
- return p.Duration != nil
-}
-
-func (p *Span) IsSetTraceIDHigh() bool {
- return p.TraceIDHigh != nil
-}
-
-func (p *Span) Read(iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP {
- break
- }
- switch fieldId {
- case 1:
- if err := p.readField1(iprot); err != nil {
- return err
- }
- case 3:
- if err := p.readField3(iprot); err != nil {
- return err
- }
- case 4:
- if err := p.readField4(iprot); err != nil {
- return err
- }
- case 5:
- if err := p.readField5(iprot); err != nil {
- return err
- }
- case 6:
- if err := p.readField6(iprot); err != nil {
- return err
- }
- case 8:
- if err := p.readField8(iprot); err != nil {
- return err
- }
- case 9:
- if err := p.readField9(iprot); err != nil {
- return err
- }
- case 10:
- if err := p.readField10(iprot); err != nil {
- return err
- }
- case 11:
- if err := p.readField11(iprot); err != nil {
- return err
- }
- case 12:
- if err := p.readField12(iprot); err != nil {
- return err
- }
- default:
- if err := iprot.Skip(fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- return nil
-}
-
-func (p *Span) readField1(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(); err != nil {
- return thrift.PrependError("error reading field 1: ", err)
- } else {
- p.TraceID = v
- }
- return nil
-}
-
-func (p *Span) readField3(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadString(); err != nil {
- return thrift.PrependError("error reading field 3: ", err)
- } else {
- p.Name = v
- }
- return nil
-}
-
-func (p *Span) readField4(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(); err != nil {
- return thrift.PrependError("error reading field 4: ", err)
- } else {
- p.ID = v
- }
- return nil
-}
-
-func (p *Span) readField5(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(); err != nil {
- return thrift.PrependError("error reading field 5: ", err)
- } else {
- p.ParentID = &v
- }
- return nil
-}
-
-func (p *Span) readField6(iprot thrift.TProtocol) error {
- _, size, err := iprot.ReadListBegin()
- if err != nil {
- return thrift.PrependError("error reading list begin: ", err)
- }
- tSlice := make([]*Annotation, 0, size)
- p.Annotations = tSlice
- for i := 0; i < size; i++ {
- _elem0 := &Annotation{}
- if err := _elem0.Read(iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err)
- }
- p.Annotations = append(p.Annotations, _elem0)
- }
- if err := iprot.ReadListEnd(); err != nil {
- return thrift.PrependError("error reading list end: ", err)
- }
- return nil
-}
-
-func (p *Span) readField8(iprot thrift.TProtocol) error {
- _, size, err := iprot.ReadListBegin()
- if err != nil {
- return thrift.PrependError("error reading list begin: ", err)
- }
- tSlice := make([]*BinaryAnnotation, 0, size)
- p.BinaryAnnotations = tSlice
- for i := 0; i < size; i++ {
- _elem1 := &BinaryAnnotation{}
- if err := _elem1.Read(iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem1), err)
- }
- p.BinaryAnnotations = append(p.BinaryAnnotations, _elem1)
- }
- if err := iprot.ReadListEnd(); err != nil {
- return thrift.PrependError("error reading list end: ", err)
- }
- return nil
-}
-
-func (p *Span) readField9(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadBool(); err != nil {
- return thrift.PrependError("error reading field 9: ", err)
- } else {
- p.Debug = v
- }
- return nil
-}
-
-func (p *Span) readField10(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(); err != nil {
- return thrift.PrependError("error reading field 10: ", err)
- } else {
- p.Timestamp = &v
- }
- return nil
-}
-
-func (p *Span) readField11(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(); err != nil {
- return thrift.PrependError("error reading field 11: ", err)
- } else {
- p.Duration = &v
- }
- return nil
-}
-
-func (p *Span) readField12(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(); err != nil {
- return thrift.PrependError("error reading field 12: ", err)
- } else {
- p.TraceIDHigh = &v
- }
- return nil
-}
-
-func (p *Span) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("Span"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
- }
- if err := p.writeField1(oprot); err != nil {
- return err
- }
- if err := p.writeField3(oprot); err != nil {
- return err
- }
- if err := p.writeField4(oprot); err != nil {
- return err
- }
- if err := p.writeField5(oprot); err != nil {
- return err
- }
- if err := p.writeField6(oprot); err != nil {
- return err
- }
- if err := p.writeField8(oprot); err != nil {
- return err
- }
- if err := p.writeField9(oprot); err != nil {
- return err
- }
- if err := p.writeField10(oprot); err != nil {
- return err
- }
- if err := p.writeField11(oprot); err != nil {
- return err
- }
- if err := p.writeField12(oprot); err != nil {
- return err
- }
- if err := oprot.WriteFieldStop(); err != nil {
- return thrift.PrependError("write field stop error: ", err)
- }
- if err := oprot.WriteStructEnd(); err != nil {
- return thrift.PrependError("write struct stop error: ", err)
- }
- return nil
-}
-
-func (p *Span) writeField1(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("trace_id", thrift.I64, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:trace_id: ", p), err)
- }
- if err := oprot.WriteI64(int64(p.TraceID)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.trace_id (1) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:trace_id: ", p), err)
- }
- return err
-}
-
-func (p *Span) writeField3(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("name", thrift.STRING, 3); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:name: ", p), err)
- }
- if err := oprot.WriteString(string(p.Name)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.name (3) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 3:name: ", p), err)
- }
- return err
-}
-
-func (p *Span) writeField4(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("id", thrift.I64, 4); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:id: ", p), err)
- }
- if err := oprot.WriteI64(int64(p.ID)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.id (4) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 4:id: ", p), err)
- }
- return err
-}
-
-func (p *Span) writeField5(oprot thrift.TProtocol) (err error) {
- if p.IsSetParentID() {
- if err := oprot.WriteFieldBegin("parent_id", thrift.I64, 5); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:parent_id: ", p), err)
- }
- if err := oprot.WriteI64(int64(*p.ParentID)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.parent_id (5) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 5:parent_id: ", p), err)
- }
- }
- return err
-}
-
-func (p *Span) writeField6(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("annotations", thrift.LIST, 6); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:annotations: ", p), err)
- }
- if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Annotations)); err != nil {
- return thrift.PrependError("error writing list begin: ", err)
- }
- for _, v := range p.Annotations {
- if err := v.Write(oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
- }
- }
- if err := oprot.WriteListEnd(); err != nil {
- return thrift.PrependError("error writing list end: ", err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 6:annotations: ", p), err)
- }
- return err
-}
-
-func (p *Span) writeField8(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("binary_annotations", thrift.LIST, 8); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:binary_annotations: ", p), err)
- }
- if err := oprot.WriteListBegin(thrift.STRUCT, len(p.BinaryAnnotations)); err != nil {
- return thrift.PrependError("error writing list begin: ", err)
- }
- for _, v := range p.BinaryAnnotations {
- if err := v.Write(oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
- }
- }
- if err := oprot.WriteListEnd(); err != nil {
- return thrift.PrependError("error writing list end: ", err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 8:binary_annotations: ", p), err)
- }
- return err
-}
-
-func (p *Span) writeField9(oprot thrift.TProtocol) (err error) {
- if p.IsSetDebug() {
- if err := oprot.WriteFieldBegin("debug", thrift.BOOL, 9); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:debug: ", p), err)
- }
- if err := oprot.WriteBool(bool(p.Debug)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.debug (9) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 9:debug: ", p), err)
- }
- }
- return err
-}
-
-func (p *Span) writeField10(oprot thrift.TProtocol) (err error) {
- if p.IsSetTimestamp() {
- if err := oprot.WriteFieldBegin("timestamp", thrift.I64, 10); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:timestamp: ", p), err)
- }
- if err := oprot.WriteI64(int64(*p.Timestamp)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.timestamp (10) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 10:timestamp: ", p), err)
- }
- }
- return err
-}
-
-func (p *Span) writeField11(oprot thrift.TProtocol) (err error) {
- if p.IsSetDuration() {
- if err := oprot.WriteFieldBegin("duration", thrift.I64, 11); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:duration: ", p), err)
- }
- if err := oprot.WriteI64(int64(*p.Duration)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.duration (11) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 11:duration: ", p), err)
- }
- }
- return err
-}
-
-func (p *Span) writeField12(oprot thrift.TProtocol) (err error) {
- if p.IsSetTraceIDHigh() {
- if err := oprot.WriteFieldBegin("trace_id_high", thrift.I64, 12); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 12:trace_id_high: ", p), err)
- }
- if err := oprot.WriteI64(int64(*p.TraceIDHigh)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.trace_id_high (12) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 12:trace_id_high: ", p), err)
- }
- }
- return err
-}
-
-func (p *Span) String() string {
- if p == nil {
- return "<nil>"
- }
- return fmt.Sprintf("Span(%+v)", *p)
-}
-
-// Attributes:
-// - Ok
-type Response struct {
- Ok bool `thrift:"ok,1,required" json:"ok"`
-}
-
-func NewResponse() *Response {
- return &Response{}
-}
-
-func (p *Response) GetOk() bool {
- return p.Ok
-}
-func (p *Response) Read(iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- var issetOk bool = false
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP {
- break
- }
- switch fieldId {
- case 1:
- if err := p.readField1(iprot); err != nil {
- return err
- }
- issetOk = true
- default:
- if err := iprot.Skip(fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- if !issetOk {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Ok is not set"))
- }
- return nil
-}
-
-func (p *Response) readField1(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadBool(); err != nil {
- return thrift.PrependError("error reading field 1: ", err)
- } else {
- p.Ok = v
- }
- return nil
-}
-
-func (p *Response) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("Response"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
- }
- if err := p.writeField1(oprot); err != nil {
- return err
- }
- if err := oprot.WriteFieldStop(); err != nil {
- return thrift.PrependError("write field stop error: ", err)
- }
- if err := oprot.WriteStructEnd(); err != nil {
- return thrift.PrependError("write struct stop error: ", err)
- }
- return nil
-}
-
-func (p *Response) writeField1(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("ok", thrift.BOOL, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ok: ", p), err)
- }
- if err := oprot.WriteBool(bool(p.Ok)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.ok (1) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ok: ", p), err)
- }
- return err
-}
-
-func (p *Response) String() string {
- if p == nil {
- return "<nil>"
- }
- return fmt.Sprintf("Response(%+v)", *p)
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincollector.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincollector.go
deleted file mode 100644
index 417e883d0..000000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincollector.go
+++ /dev/null
@@ -1,446 +0,0 @@
-// Autogenerated by Thrift Compiler (0.9.3)
-// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-
-package zipkincore
-
-import (
- "bytes"
- "fmt"
- "github.com/uber/jaeger-client-go/thrift"
-)
-
-// (needed to ensure safety because of naive import list construction.)
-var _ = thrift.ZERO
-var _ = fmt.Printf
-var _ = bytes.Equal
-
-type ZipkinCollector interface {
- // Parameters:
- // - Spans
- SubmitZipkinBatch(spans []*Span) (r []*Response, err error)
-}
-
-type ZipkinCollectorClient struct {
- Transport thrift.TTransport
- ProtocolFactory thrift.TProtocolFactory
- InputProtocol thrift.TProtocol
- OutputProtocol thrift.TProtocol
- SeqId int32
-}
-
-func NewZipkinCollectorClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *ZipkinCollectorClient {
- return &ZipkinCollectorClient{Transport: t,
- ProtocolFactory: f,
- InputProtocol: f.GetProtocol(t),
- OutputProtocol: f.GetProtocol(t),
- SeqId: 0,
- }
-}
-
-func NewZipkinCollectorClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *ZipkinCollectorClient {
- return &ZipkinCollectorClient{Transport: t,
- ProtocolFactory: nil,
- InputProtocol: iprot,
- OutputProtocol: oprot,
- SeqId: 0,
- }
-}
-
-// Parameters:
-// - Spans
-func (p *ZipkinCollectorClient) SubmitZipkinBatch(spans []*Span) (r []*Response, err error) {
- if err = p.sendSubmitZipkinBatch(spans); err != nil {
- return
- }
- return p.recvSubmitZipkinBatch()
-}
-
-func (p *ZipkinCollectorClient) sendSubmitZipkinBatch(spans []*Span) (err error) {
- oprot := p.OutputProtocol
- if oprot == nil {
- oprot = p.ProtocolFactory.GetProtocol(p.Transport)
- p.OutputProtocol = oprot
- }
- p.SeqId++
- if err = oprot.WriteMessageBegin("submitZipkinBatch", thrift.CALL, p.SeqId); err != nil {
- return
- }
- args := ZipkinCollectorSubmitZipkinBatchArgs{
- Spans: spans,
- }
- if err = args.Write(oprot); err != nil {
- return
- }
- if err = oprot.WriteMessageEnd(); err != nil {
- return
- }
- return oprot.Flush()
-}
-
-func (p *ZipkinCollectorClient) recvSubmitZipkinBatch() (value []*Response, err error) {
- iprot := p.InputProtocol
- if iprot == nil {
- iprot = p.ProtocolFactory.GetProtocol(p.Transport)
- p.InputProtocol = iprot
- }
- method, mTypeId, seqId, err := iprot.ReadMessageBegin()
- if err != nil {
- return
- }
- if method != "submitZipkinBatch" {
- err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "submitZipkinBatch failed: wrong method name")
- return
- }
- if p.SeqId != seqId {
- err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "submitZipkinBatch failed: out of sequence response")
- return
- }
- if mTypeId == thrift.EXCEPTION {
- error2 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
- var error3 error
- error3, err = error2.Read(iprot)
- if err != nil {
- return
- }
- if err = iprot.ReadMessageEnd(); err != nil {
- return
- }
- err = error3
- return
- }
- if mTypeId != thrift.REPLY {
- err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "submitZipkinBatch failed: invalid message type")
- return
- }
- result := ZipkinCollectorSubmitZipkinBatchResult{}
- if err = result.Read(iprot); err != nil {
- return
- }
- if err = iprot.ReadMessageEnd(); err != nil {
- return
- }
- value = result.GetSuccess()
- return
-}
-
-type ZipkinCollectorProcessor struct {
- processorMap map[string]thrift.TProcessorFunction
- handler ZipkinCollector
-}
-
-func (p *ZipkinCollectorProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
- p.processorMap[key] = processor
-}
-
-func (p *ZipkinCollectorProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
- processor, ok = p.processorMap[key]
- return processor, ok
-}
-
-func (p *ZipkinCollectorProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
- return p.processorMap
-}
-
-func NewZipkinCollectorProcessor(handler ZipkinCollector) *ZipkinCollectorProcessor {
-
- self4 := &ZipkinCollectorProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)}
- self4.processorMap["submitZipkinBatch"] = &zipkinCollectorProcessorSubmitZipkinBatch{handler: handler}
- return self4
-}
-
-func (p *ZipkinCollectorProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- name, _, seqId, err := iprot.ReadMessageBegin()
- if err != nil {
- return false, err
- }
- if processor, ok := p.GetProcessorFunction(name); ok {
- return processor.Process(seqId, iprot, oprot)
- }
- iprot.Skip(thrift.STRUCT)
- iprot.ReadMessageEnd()
- x5 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name)
- oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId)
- x5.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush()
- return false, x5
-
-}
-
-type zipkinCollectorProcessorSubmitZipkinBatch struct {
- handler ZipkinCollector
-}
-
-func (p *zipkinCollectorProcessorSubmitZipkinBatch) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := ZipkinCollectorSubmitZipkinBatchArgs{}
- if err = args.Read(iprot); err != nil {
- iprot.ReadMessageEnd()
- x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
- oprot.WriteMessageBegin("submitZipkinBatch", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush()
- return false, err
- }
-
- iprot.ReadMessageEnd()
- result := ZipkinCollectorSubmitZipkinBatchResult{}
- var retval []*Response
- var err2 error
- if retval, err2 = p.handler.SubmitZipkinBatch(args.Spans); err2 != nil {
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing submitZipkinBatch: "+err2.Error())
- oprot.WriteMessageBegin("submitZipkinBatch", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush()
- return true, err2
- } else {
- result.Success = retval
- }
- if err2 = oprot.WriteMessageBegin("submitZipkinBatch", thrift.REPLY, seqId); err2 != nil {
- err = err2
- }
- if err2 = result.Write(oprot); err == nil && err2 != nil {
- err = err2
- }
- if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
- err = err2
- }
- if err2 = oprot.Flush(); err == nil && err2 != nil {
- err = err2
- }
- if err != nil {
- return
- }
- return true, err
-}
-
-// HELPER FUNCTIONS AND STRUCTURES
-
-// Attributes:
-// - Spans
-type ZipkinCollectorSubmitZipkinBatchArgs struct {
- Spans []*Span `thrift:"spans,1" json:"spans"`
-}
-
-func NewZipkinCollectorSubmitZipkinBatchArgs() *ZipkinCollectorSubmitZipkinBatchArgs {
- return &ZipkinCollectorSubmitZipkinBatchArgs{}
-}
-
-func (p *ZipkinCollectorSubmitZipkinBatchArgs) GetSpans() []*Span {
- return p.Spans
-}
-func (p *ZipkinCollectorSubmitZipkinBatchArgs) Read(iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP {
- break
- }
- switch fieldId {
- case 1:
- if err := p.readField1(iprot); err != nil {
- return err
- }
- default:
- if err := iprot.Skip(fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- return nil
-}
-
-func (p *ZipkinCollectorSubmitZipkinBatchArgs) readField1(iprot thrift.TProtocol) error {
- _, size, err := iprot.ReadListBegin()
- if err != nil {
- return thrift.PrependError("error reading list begin: ", err)
- }
- tSlice := make([]*Span, 0, size)
- p.Spans = tSlice
- for i := 0; i < size; i++ {
- _elem6 := &Span{}
- if err := _elem6.Read(iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem6), err)
- }
- p.Spans = append(p.Spans, _elem6)
- }
- if err := iprot.ReadListEnd(); err != nil {
- return thrift.PrependError("error reading list end: ", err)
- }
- return nil
-}
-
-func (p *ZipkinCollectorSubmitZipkinBatchArgs) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("submitZipkinBatch_args"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
- }
- if err := p.writeField1(oprot); err != nil {
- return err
- }
- if err := oprot.WriteFieldStop(); err != nil {
- return thrift.PrependError("write field stop error: ", err)
- }
- if err := oprot.WriteStructEnd(); err != nil {
- return thrift.PrependError("write struct stop error: ", err)
- }
- return nil
-}
-
-func (p *ZipkinCollectorSubmitZipkinBatchArgs) writeField1(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("spans", thrift.LIST, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:spans: ", p), err)
- }
- if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Spans)); err != nil {
- return thrift.PrependError("error writing list begin: ", err)
- }
- for _, v := range p.Spans {
- if err := v.Write(oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
- }
- }
- if err := oprot.WriteListEnd(); err != nil {
- return thrift.PrependError("error writing list end: ", err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:spans: ", p), err)
- }
- return err
-}
-
-func (p *ZipkinCollectorSubmitZipkinBatchArgs) String() string {
- if p == nil {
- return "<nil>"
- }
- return fmt.Sprintf("ZipkinCollectorSubmitZipkinBatchArgs(%+v)", *p)
-}
-
-// Attributes:
-// - Success
-type ZipkinCollectorSubmitZipkinBatchResult struct {
- Success []*Response `thrift:"success,0" json:"success,omitempty"`
-}
-
-func NewZipkinCollectorSubmitZipkinBatchResult() *ZipkinCollectorSubmitZipkinBatchResult {
- return &ZipkinCollectorSubmitZipkinBatchResult{}
-}
-
-var ZipkinCollectorSubmitZipkinBatchResult_Success_DEFAULT []*Response
-
-func (p *ZipkinCollectorSubmitZipkinBatchResult) GetSuccess() []*Response {
- return p.Success
-}
-func (p *ZipkinCollectorSubmitZipkinBatchResult) IsSetSuccess() bool {
- return p.Success != nil
-}
-
-func (p *ZipkinCollectorSubmitZipkinBatchResult) Read(iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP {
- break
- }
- switch fieldId {
- case 0:
- if err := p.readField0(iprot); err != nil {
- return err
- }
- default:
- if err := iprot.Skip(fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- return nil
-}
-
-func (p *ZipkinCollectorSubmitZipkinBatchResult) readField0(iprot thrift.TProtocol) error {
- _, size, err := iprot.ReadListBegin()
- if err != nil {
- return thrift.PrependError("error reading list begin: ", err)
- }
- tSlice := make([]*Response, 0, size)
- p.Success = tSlice
- for i := 0; i < size; i++ {
- _elem7 := &Response{}
- if err := _elem7.Read(iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem7), err)
- }
- p.Success = append(p.Success, _elem7)
- }
- if err := iprot.ReadListEnd(); err != nil {
- return thrift.PrependError("error reading list end: ", err)
- }
- return nil
-}
-
-func (p *ZipkinCollectorSubmitZipkinBatchResult) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("submitZipkinBatch_result"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
- }
- if err := p.writeField0(oprot); err != nil {
- return err
- }
- if err := oprot.WriteFieldStop(); err != nil {
- return thrift.PrependError("write field stop error: ", err)
- }
- if err := oprot.WriteStructEnd(); err != nil {
- return thrift.PrependError("write struct stop error: ", err)
- }
- return nil
-}
-
-func (p *ZipkinCollectorSubmitZipkinBatchResult) writeField0(oprot thrift.TProtocol) (err error) {
- if p.IsSetSuccess() {
- if err := oprot.WriteFieldBegin("success", thrift.LIST, 0); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err)
- }
- if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Success)); err != nil {
- return thrift.PrependError("error writing list begin: ", err)
- }
- for _, v := range p.Success {
- if err := v.Write(oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
- }
- }
- if err := oprot.WriteListEnd(); err != nil {
- return thrift.PrependError("error writing list end: ", err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err)
- }
- }
- return err
-}
-
-func (p *ZipkinCollectorSubmitZipkinBatchResult) String() string {
- if p == nil {
- return "<nil>"
- }
- return fmt.Sprintf("ZipkinCollectorSubmitZipkinBatchResult(%+v)", *p)
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/constants.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincore-consts.go
index a53d46f0e..7a924b977 100644
--- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/constants.go
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincore-consts.go
@@ -1,17 +1,20 @@
-// Autogenerated by Thrift Compiler (0.9.3)
-// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
package zipkincore
-import (
+import(
"bytes"
+ "context"
"fmt"
+ "time"
"github.com/uber/jaeger-client-go/thrift"
)
// (needed to ensure safety because of naive import list construction.)
var _ = thrift.ZERO
var _ = fmt.Printf
+var _ = context.Background
+var _ = time.Now
var _ = bytes.Equal
const CLIENT_SEND = "cs"
@@ -33,3 +36,4 @@ const MESSAGE_ADDR = "ma"
func init() {
}
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincore.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincore.go
new file mode 100644
index 000000000..b00ecd23f
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincore.go
@@ -0,0 +1,1853 @@
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
+
+package zipkincore
+
+import(
+ "bytes"
+ "context"
+ "database/sql/driver"
+ "errors"
+ "fmt"
+ "time"
+ "github.com/uber/jaeger-client-go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = context.Background
+var _ = time.Now
+var _ = bytes.Equal
+
+type AnnotationType int64
+const (
+ AnnotationType_BOOL AnnotationType = 0
+ AnnotationType_BYTES AnnotationType = 1
+ AnnotationType_I16 AnnotationType = 2
+ AnnotationType_I32 AnnotationType = 3
+ AnnotationType_I64 AnnotationType = 4
+ AnnotationType_DOUBLE AnnotationType = 5
+ AnnotationType_STRING AnnotationType = 6
+)
+
+func (p AnnotationType) String() string {
+ switch p {
+ case AnnotationType_BOOL: return "BOOL"
+ case AnnotationType_BYTES: return "BYTES"
+ case AnnotationType_I16: return "I16"
+ case AnnotationType_I32: return "I32"
+ case AnnotationType_I64: return "I64"
+ case AnnotationType_DOUBLE: return "DOUBLE"
+ case AnnotationType_STRING: return "STRING"
+ }
+ return "<UNSET>"
+}
+
+func AnnotationTypeFromString(s string) (AnnotationType, error) {
+ switch s {
+ case "BOOL": return AnnotationType_BOOL, nil
+ case "BYTES": return AnnotationType_BYTES, nil
+ case "I16": return AnnotationType_I16, nil
+ case "I32": return AnnotationType_I32, nil
+ case "I64": return AnnotationType_I64, nil
+ case "DOUBLE": return AnnotationType_DOUBLE, nil
+ case "STRING": return AnnotationType_STRING, nil
+ }
+ return AnnotationType(0), fmt.Errorf("not a valid AnnotationType string")
+}
+
+
+func AnnotationTypePtr(v AnnotationType) *AnnotationType { return &v }
+
+func (p AnnotationType) MarshalText() ([]byte, error) {
+return []byte(p.String()), nil
+}
+
+func (p *AnnotationType) UnmarshalText(text []byte) error {
+q, err := AnnotationTypeFromString(string(text))
+if (err != nil) {
+return err
+}
+*p = q
+return nil
+}
+
+func (p *AnnotationType) Scan(value interface{}) error {
+v, ok := value.(int64)
+if !ok {
+return errors.New("Scan value is not int64")
+}
+*p = AnnotationType(v)
+return nil
+}
+
+func (p * AnnotationType) Value() (driver.Value, error) {
+ if p == nil {
+ return nil, nil
+ }
+return int64(*p), nil
+}
+// Indicates the network context of a service recording an annotation with two
+// exceptions.
+//
+// When a BinaryAnnotation, and key is CLIENT_ADDR or SERVER_ADDR,
+// the endpoint indicates the source or destination of an RPC. This exception
+// allows zipkin to display network context of uninstrumented services, or
+// clients such as web browsers.
+//
+// Attributes:
+// - Ipv4: IPv4 host address packed into 4 bytes.
+//
+// Ex for the ip 1.2.3.4, it would be (1 << 24) | (2 << 16) | (3 << 8) | 4
+// - Port: IPv4 port
+//
+// Note: this is to be treated as an unsigned integer, so watch for negatives.
+//
+// Conventionally, when the port isn't known, port = 0.
+// - ServiceName: Service name in lowercase, such as "memcache" or "zipkin-web"
+//
+// Conventionally, when the service name isn't known, service_name = "unknown".
+// - Ipv6: IPv6 host address packed into 16 bytes. Ex Inet6Address.getBytes()
+type Endpoint struct {
+ Ipv4 int32 `thrift:"ipv4,1" db:"ipv4" json:"ipv4"`
+ Port int16 `thrift:"port,2" db:"port" json:"port"`
+ ServiceName string `thrift:"service_name,3" db:"service_name" json:"service_name"`
+ Ipv6 []byte `thrift:"ipv6,4" db:"ipv6" json:"ipv6,omitempty"`
+}
+
+func NewEndpoint() *Endpoint {
+ return &Endpoint{}
+}
+
+
+func (p *Endpoint) GetIpv4() int32 {
+ return p.Ipv4
+}
+
+func (p *Endpoint) GetPort() int16 {
+ return p.Port
+}
+
+func (p *Endpoint) GetServiceName() string {
+ return p.ServiceName
+}
+var Endpoint_Ipv6_DEFAULT []byte
+
+func (p *Endpoint) GetIpv6() []byte {
+ return p.Ipv6
+}
+func (p *Endpoint) IsSetIpv6() bool {
+ return p.Ipv6 != nil
+}
+
+func (p *Endpoint) Read(ctx context.Context, iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP { break; }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.I32 {
+ if err := p.ReadField1(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 2:
+ if fieldTypeId == thrift.I16 {
+ if err := p.ReadField2(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 3:
+ if fieldTypeId == thrift.STRING {
+ if err := p.ReadField3(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 4:
+ if fieldTypeId == thrift.STRING {
+ if err := p.ReadField4(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ default:
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(ctx); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ return nil
+}
+
+func (p *Endpoint) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI32(ctx); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+} else {
+ p.Ipv4 = v
+}
+ return nil
+}
+
+func (p *Endpoint) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI16(ctx); err != nil {
+ return thrift.PrependError("error reading field 2: ", err)
+} else {
+ p.Port = v
+}
+ return nil
+}
+
+func (p *Endpoint) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadString(ctx); err != nil {
+ return thrift.PrependError("error reading field 3: ", err)
+} else {
+ p.ServiceName = v
+}
+ return nil
+}
+
+func (p *Endpoint) ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadBinary(ctx); err != nil {
+ return thrift.PrependError("error reading field 4: ", err)
+} else {
+ p.Ipv6 = v
+}
+ return nil
+}
+
+func (p *Endpoint) Write(ctx context.Context, oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin(ctx, "Endpoint"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+ if p != nil {
+ if err := p.writeField1(ctx, oprot); err != nil { return err }
+ if err := p.writeField2(ctx, oprot); err != nil { return err }
+ if err := p.writeField3(ctx, oprot); err != nil { return err }
+ if err := p.writeField4(ctx, oprot); err != nil { return err }
+ }
+ if err := oprot.WriteFieldStop(ctx); err != nil {
+ return thrift.PrependError("write field stop error: ", err) }
+ if err := oprot.WriteStructEnd(ctx); err != nil {
+ return thrift.PrependError("write struct stop error: ", err) }
+ return nil
+}
+
+func (p *Endpoint) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "ipv4", thrift.I32, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ipv4: ", p), err) }
+ if err := oprot.WriteI32(ctx, int32(p.Ipv4)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.ipv4 (1) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ipv4: ", p), err) }
+ return err
+}
+
+func (p *Endpoint) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "port", thrift.I16, 2); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:port: ", p), err) }
+ if err := oprot.WriteI16(ctx, int16(p.Port)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.port (2) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 2:port: ", p), err) }
+ return err
+}
+
+func (p *Endpoint) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "service_name", thrift.STRING, 3); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:service_name: ", p), err) }
+ if err := oprot.WriteString(ctx, string(p.ServiceName)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.service_name (3) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 3:service_name: ", p), err) }
+ return err
+}
+
+func (p *Endpoint) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if p.IsSetIpv6() {
+ if err := oprot.WriteFieldBegin(ctx, "ipv6", thrift.STRING, 4); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:ipv6: ", p), err) }
+ if err := oprot.WriteBinary(ctx, p.Ipv6); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.ipv6 (4) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 4:ipv6: ", p), err) }
+ }
+ return err
+}
+
+func (p *Endpoint) Equals(other *Endpoint) bool {
+ if p == other {
+ return true
+ } else if p == nil || other == nil {
+ return false
+ }
+ if p.Ipv4 != other.Ipv4 { return false }
+ if p.Port != other.Port { return false }
+ if p.ServiceName != other.ServiceName { return false }
+ if bytes.Compare(p.Ipv6, other.Ipv6) != 0 { return false }
+ return true
+}
+
+func (p *Endpoint) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("Endpoint(%+v)", *p)
+}
+
+// An annotation is similar to a log statement. It includes a host field which
+// allows these events to be attributed properly, and also aggregatable.
+//
+// Attributes:
+// - Timestamp: Microseconds from epoch.
+//
+// This value should use the most precise value possible. For example,
+// gettimeofday or syncing nanoTime against a tick of currentTimeMillis.
+// - Value
+// - Host: Always the host that recorded the event. By specifying the host you allow
+// rollup of all events (such as client requests to a service) by IP address.
+type Annotation struct {
+ Timestamp int64 `thrift:"timestamp,1" db:"timestamp" json:"timestamp"`
+ Value string `thrift:"value,2" db:"value" json:"value"`
+ Host *Endpoint `thrift:"host,3" db:"host" json:"host,omitempty"`
+}
+
+func NewAnnotation() *Annotation {
+ return &Annotation{}
+}
+
+
+func (p *Annotation) GetTimestamp() int64 {
+ return p.Timestamp
+}
+
+func (p *Annotation) GetValue() string {
+ return p.Value
+}
+var Annotation_Host_DEFAULT *Endpoint
+func (p *Annotation) GetHost() *Endpoint {
+ if !p.IsSetHost() {
+ return Annotation_Host_DEFAULT
+ }
+return p.Host
+}
+func (p *Annotation) IsSetHost() bool {
+ return p.Host != nil
+}
+
+func (p *Annotation) Read(ctx context.Context, iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP { break; }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.I64 {
+ if err := p.ReadField1(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 2:
+ if fieldTypeId == thrift.STRING {
+ if err := p.ReadField2(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 3:
+ if fieldTypeId == thrift.STRUCT {
+ if err := p.ReadField3(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ default:
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(ctx); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ return nil
+}
+
+func (p *Annotation) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(ctx); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+} else {
+ p.Timestamp = v
+}
+ return nil
+}
+
+func (p *Annotation) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadString(ctx); err != nil {
+ return thrift.PrependError("error reading field 2: ", err)
+} else {
+ p.Value = v
+}
+ return nil
+}
+
+func (p *Annotation) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
+ p.Host = &Endpoint{}
+ if err := p.Host.Read(ctx, iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err)
+ }
+ return nil
+}
+
+func (p *Annotation) Write(ctx context.Context, oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin(ctx, "Annotation"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+ if p != nil {
+ if err := p.writeField1(ctx, oprot); err != nil { return err }
+ if err := p.writeField2(ctx, oprot); err != nil { return err }
+ if err := p.writeField3(ctx, oprot); err != nil { return err }
+ }
+ if err := oprot.WriteFieldStop(ctx); err != nil {
+ return thrift.PrependError("write field stop error: ", err) }
+ if err := oprot.WriteStructEnd(ctx); err != nil {
+ return thrift.PrependError("write struct stop error: ", err) }
+ return nil
+}
+
+func (p *Annotation) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "timestamp", thrift.I64, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:timestamp: ", p), err) }
+ if err := oprot.WriteI64(ctx, int64(p.Timestamp)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.timestamp (1) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:timestamp: ", p), err) }
+ return err
+}
+
+func (p *Annotation) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "value", thrift.STRING, 2); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) }
+ if err := oprot.WriteString(ctx, string(p.Value)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) }
+ return err
+}
+
+func (p *Annotation) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if p.IsSetHost() {
+ if err := oprot.WriteFieldBegin(ctx, "host", thrift.STRUCT, 3); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:host: ", p), err) }
+ if err := p.Host.Write(ctx, oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err)
+ }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 3:host: ", p), err) }
+ }
+ return err
+}
+
+func (p *Annotation) Equals(other *Annotation) bool {
+ if p == other {
+ return true
+ } else if p == nil || other == nil {
+ return false
+ }
+ if p.Timestamp != other.Timestamp { return false }
+ if p.Value != other.Value { return false }
+ if !p.Host.Equals(other.Host) { return false }
+ return true
+}
+
+func (p *Annotation) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("Annotation(%+v)", *p)
+}
+
+// Binary annotations are tags applied to a Span to give it context. For
+// example, a binary annotation of "http.uri" could the path to a resource in a
+// RPC call.
+//
+// Binary annotations of type STRING are always queryable, though more a
+// historical implementation detail than a structural concern.
+//
+// Binary annotations can repeat, and vary on the host. Similar to Annotation,
+// the host indicates who logged the event. This allows you to tell the
+// difference between the client and server side of the same key. For example,
+// the key "http.uri" might be different on the client and server side due to
+// rewriting, like "/api/v1/myresource" vs "/myresource. Via the host field,
+// you can see the different points of view, which often help in debugging.
+//
+// Attributes:
+// - Key
+// - Value
+// - AnnotationType
+// - Host: The host that recorded tag, which allows you to differentiate between
+// multiple tags with the same key. There are two exceptions to this.
+//
+// When the key is CLIENT_ADDR or SERVER_ADDR, host indicates the source or
+// destination of an RPC. This exception allows zipkin to display network
+// context of uninstrumented services, or clients such as web browsers.
+type BinaryAnnotation struct {
+ Key string `thrift:"key,1" db:"key" json:"key"`
+ Value []byte `thrift:"value,2" db:"value" json:"value"`
+ AnnotationType AnnotationType `thrift:"annotation_type,3" db:"annotation_type" json:"annotation_type"`
+ Host *Endpoint `thrift:"host,4" db:"host" json:"host,omitempty"`
+}
+
+func NewBinaryAnnotation() *BinaryAnnotation {
+ return &BinaryAnnotation{}
+}
+
+
+func (p *BinaryAnnotation) GetKey() string {
+ return p.Key
+}
+
+func (p *BinaryAnnotation) GetValue() []byte {
+ return p.Value
+}
+
+func (p *BinaryAnnotation) GetAnnotationType() AnnotationType {
+ return p.AnnotationType
+}
+var BinaryAnnotation_Host_DEFAULT *Endpoint
+func (p *BinaryAnnotation) GetHost() *Endpoint {
+ if !p.IsSetHost() {
+ return BinaryAnnotation_Host_DEFAULT
+ }
+return p.Host
+}
+func (p *BinaryAnnotation) IsSetHost() bool {
+ return p.Host != nil
+}
+
+func (p *BinaryAnnotation) Read(ctx context.Context, iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP { break; }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.STRING {
+ if err := p.ReadField1(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 2:
+ if fieldTypeId == thrift.STRING {
+ if err := p.ReadField2(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 3:
+ if fieldTypeId == thrift.I32 {
+ if err := p.ReadField3(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 4:
+ if fieldTypeId == thrift.STRUCT {
+ if err := p.ReadField4(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ default:
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(ctx); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ return nil
+}
+
+func (p *BinaryAnnotation) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadString(ctx); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+} else {
+ p.Key = v
+}
+ return nil
+}
+
+func (p *BinaryAnnotation) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadBinary(ctx); err != nil {
+ return thrift.PrependError("error reading field 2: ", err)
+} else {
+ p.Value = v
+}
+ return nil
+}
+
+func (p *BinaryAnnotation) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI32(ctx); err != nil {
+ return thrift.PrependError("error reading field 3: ", err)
+} else {
+ temp := AnnotationType(v)
+ p.AnnotationType = temp
+}
+ return nil
+}
+
+func (p *BinaryAnnotation) ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
+ p.Host = &Endpoint{}
+ if err := p.Host.Read(ctx, iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err)
+ }
+ return nil
+}
+
+func (p *BinaryAnnotation) Write(ctx context.Context, oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin(ctx, "BinaryAnnotation"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+ if p != nil {
+ if err := p.writeField1(ctx, oprot); err != nil { return err }
+ if err := p.writeField2(ctx, oprot); err != nil { return err }
+ if err := p.writeField3(ctx, oprot); err != nil { return err }
+ if err := p.writeField4(ctx, oprot); err != nil { return err }
+ }
+ if err := oprot.WriteFieldStop(ctx); err != nil {
+ return thrift.PrependError("write field stop error: ", err) }
+ if err := oprot.WriteStructEnd(ctx); err != nil {
+ return thrift.PrependError("write struct stop error: ", err) }
+ return nil
+}
+
+func (p *BinaryAnnotation) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "key", thrift.STRING, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) }
+ if err := oprot.WriteString(ctx, string(p.Key)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) }
+ return err
+}
+
+func (p *BinaryAnnotation) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "value", thrift.STRING, 2); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) }
+ if err := oprot.WriteBinary(ctx, p.Value); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) }
+ return err
+}
+
+func (p *BinaryAnnotation) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "annotation_type", thrift.I32, 3); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:annotation_type: ", p), err) }
+ if err := oprot.WriteI32(ctx, int32(p.AnnotationType)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.annotation_type (3) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 3:annotation_type: ", p), err) }
+ return err
+}
+
+func (p *BinaryAnnotation) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if p.IsSetHost() {
+ if err := oprot.WriteFieldBegin(ctx, "host", thrift.STRUCT, 4); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:host: ", p), err) }
+ if err := p.Host.Write(ctx, oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err)
+ }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 4:host: ", p), err) }
+ }
+ return err
+}
+
+func (p *BinaryAnnotation) Equals(other *BinaryAnnotation) bool {
+ if p == other {
+ return true
+ } else if p == nil || other == nil {
+ return false
+ }
+ if p.Key != other.Key { return false }
+ if bytes.Compare(p.Value, other.Value) != 0 { return false }
+ if p.AnnotationType != other.AnnotationType { return false }
+ if !p.Host.Equals(other.Host) { return false }
+ return true
+}
+
+func (p *BinaryAnnotation) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("BinaryAnnotation(%+v)", *p)
+}
+
+// A trace is a series of spans (often RPC calls) which form a latency tree.
+//
+// The root span is where trace_id = id and parent_id = Nil. The root span is
+// usually the longest interval in the trace, starting with a SERVER_RECV
+// annotation and ending with a SERVER_SEND.
+//
+// Attributes:
+// - TraceID
+// - Name: Span name in lowercase, rpc method for example
+//
+// Conventionally, when the span name isn't known, name = "unknown".
+// - ID
+// - ParentID
+// - Annotations
+// - BinaryAnnotations
+// - Debug
+// - Timestamp: Microseconds from epoch of the creation of this span.
+//
+// This value should be set directly by instrumentation, using the most
+// precise value possible. For example, gettimeofday or syncing nanoTime
+// against a tick of currentTimeMillis.
+//
+// For compatibility with instrumentation that precede this field, collectors
+// or span stores can derive this via Annotation.timestamp.
+// For example, SERVER_RECV.timestamp or CLIENT_SEND.timestamp.
+//
+// This field is optional for compatibility with old data: first-party span
+// stores are expected to support this at time of introduction.
+// - Duration: Measurement of duration in microseconds, used to support queries.
+//
+// This value should be set directly, where possible. Doing so encourages
+// precise measurement decoupled from problems of clocks, such as skew or NTP
+// updates causing time to move backwards.
+//
+// For compatibility with instrumentation that precede this field, collectors
+// or span stores can derive this by subtracting Annotation.timestamp.
+// For example, SERVER_SEND.timestamp - SERVER_RECV.timestamp.
+//
+// If this field is persisted as unset, zipkin will continue to work, except
+// duration query support will be implementation-specific. Similarly, setting
+// this field non-atomically is implementation-specific.
+//
+// This field is i64 vs i32 to support spans longer than 35 minutes.
+// - TraceIDHigh: Optional unique 8-byte additional identifier for a trace. If non zero, this
+// means the trace uses 128 bit traceIds instead of 64 bit.
+type Span struct {
+ TraceID int64 `thrift:"trace_id,1" db:"trace_id" json:"trace_id"`
+ // unused field # 2
+ Name string `thrift:"name,3" db:"name" json:"name"`
+ ID int64 `thrift:"id,4" db:"id" json:"id"`
+ ParentID *int64 `thrift:"parent_id,5" db:"parent_id" json:"parent_id,omitempty"`
+ Annotations []*Annotation `thrift:"annotations,6" db:"annotations" json:"annotations"`
+ // unused field # 7
+ BinaryAnnotations []*BinaryAnnotation `thrift:"binary_annotations,8" db:"binary_annotations" json:"binary_annotations"`
+ Debug bool `thrift:"debug,9" db:"debug" json:"debug"`
+ Timestamp *int64 `thrift:"timestamp,10" db:"timestamp" json:"timestamp,omitempty"`
+ Duration *int64 `thrift:"duration,11" db:"duration" json:"duration,omitempty"`
+ TraceIDHigh *int64 `thrift:"trace_id_high,12" db:"trace_id_high" json:"trace_id_high,omitempty"`
+}
+
+func NewSpan() *Span {
+ return &Span{}
+}
+
+
+func (p *Span) GetTraceID() int64 {
+ return p.TraceID
+}
+
+func (p *Span) GetName() string {
+ return p.Name
+}
+
+func (p *Span) GetID() int64 {
+ return p.ID
+}
+var Span_ParentID_DEFAULT int64
+func (p *Span) GetParentID() int64 {
+ if !p.IsSetParentID() {
+ return Span_ParentID_DEFAULT
+ }
+return *p.ParentID
+}
+
+func (p *Span) GetAnnotations() []*Annotation {
+ return p.Annotations
+}
+
+func (p *Span) GetBinaryAnnotations() []*BinaryAnnotation {
+ return p.BinaryAnnotations
+}
+var Span_Debug_DEFAULT bool = false
+
+func (p *Span) GetDebug() bool {
+ return p.Debug
+}
+var Span_Timestamp_DEFAULT int64
+func (p *Span) GetTimestamp() int64 {
+ if !p.IsSetTimestamp() {
+ return Span_Timestamp_DEFAULT
+ }
+return *p.Timestamp
+}
+var Span_Duration_DEFAULT int64
+func (p *Span) GetDuration() int64 {
+ if !p.IsSetDuration() {
+ return Span_Duration_DEFAULT
+ }
+return *p.Duration
+}
+var Span_TraceIDHigh_DEFAULT int64
+func (p *Span) GetTraceIDHigh() int64 {
+ if !p.IsSetTraceIDHigh() {
+ return Span_TraceIDHigh_DEFAULT
+ }
+return *p.TraceIDHigh
+}
+func (p *Span) IsSetParentID() bool {
+ return p.ParentID != nil
+}
+
+func (p *Span) IsSetDebug() bool {
+ return p.Debug != Span_Debug_DEFAULT
+}
+
+func (p *Span) IsSetTimestamp() bool {
+ return p.Timestamp != nil
+}
+
+func (p *Span) IsSetDuration() bool {
+ return p.Duration != nil
+}
+
+func (p *Span) IsSetTraceIDHigh() bool {
+ return p.TraceIDHigh != nil
+}
+
+func (p *Span) Read(ctx context.Context, iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP { break; }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.I64 {
+ if err := p.ReadField1(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 3:
+ if fieldTypeId == thrift.STRING {
+ if err := p.ReadField3(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 4:
+ if fieldTypeId == thrift.I64 {
+ if err := p.ReadField4(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 5:
+ if fieldTypeId == thrift.I64 {
+ if err := p.ReadField5(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 6:
+ if fieldTypeId == thrift.LIST {
+ if err := p.ReadField6(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 8:
+ if fieldTypeId == thrift.LIST {
+ if err := p.ReadField8(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 9:
+ if fieldTypeId == thrift.BOOL {
+ if err := p.ReadField9(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 10:
+ if fieldTypeId == thrift.I64 {
+ if err := p.ReadField10(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 11:
+ if fieldTypeId == thrift.I64 {
+ if err := p.ReadField11(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 12:
+ if fieldTypeId == thrift.I64 {
+ if err := p.ReadField12(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ default:
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(ctx); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ return nil
+}
+
+func (p *Span) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(ctx); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+} else {
+ p.TraceID = v
+}
+ return nil
+}
+
+func (p *Span) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadString(ctx); err != nil {
+ return thrift.PrependError("error reading field 3: ", err)
+} else {
+ p.Name = v
+}
+ return nil
+}
+
+func (p *Span) ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(ctx); err != nil {
+ return thrift.PrependError("error reading field 4: ", err)
+} else {
+ p.ID = v
+}
+ return nil
+}
+
+func (p *Span) ReadField5(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(ctx); err != nil {
+ return thrift.PrependError("error reading field 5: ", err)
+} else {
+ p.ParentID = &v
+}
+ return nil
+}
+
+func (p *Span) ReadField6(ctx context.Context, iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin(ctx)
+ if err != nil {
+ return thrift.PrependError("error reading list begin: ", err)
+ }
+ tSlice := make([]*Annotation, 0, size)
+ p.Annotations = tSlice
+ for i := 0; i < size; i ++ {
+ _elem0 := &Annotation{}
+ if err := _elem0.Read(ctx, iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err)
+ }
+ p.Annotations = append(p.Annotations, _elem0)
+ }
+ if err := iprot.ReadListEnd(ctx); err != nil {
+ return thrift.PrependError("error reading list end: ", err)
+ }
+ return nil
+}
+
+func (p *Span) ReadField8(ctx context.Context, iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin(ctx)
+ if err != nil {
+ return thrift.PrependError("error reading list begin: ", err)
+ }
+ tSlice := make([]*BinaryAnnotation, 0, size)
+ p.BinaryAnnotations = tSlice
+ for i := 0; i < size; i ++ {
+ _elem1 := &BinaryAnnotation{}
+ if err := _elem1.Read(ctx, iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem1), err)
+ }
+ p.BinaryAnnotations = append(p.BinaryAnnotations, _elem1)
+ }
+ if err := iprot.ReadListEnd(ctx); err != nil {
+ return thrift.PrependError("error reading list end: ", err)
+ }
+ return nil
+}
+
+func (p *Span) ReadField9(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadBool(ctx); err != nil {
+ return thrift.PrependError("error reading field 9: ", err)
+} else {
+ p.Debug = v
+}
+ return nil
+}
+
+func (p *Span) ReadField10(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(ctx); err != nil {
+ return thrift.PrependError("error reading field 10: ", err)
+} else {
+ p.Timestamp = &v
+}
+ return nil
+}
+
+func (p *Span) ReadField11(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(ctx); err != nil {
+ return thrift.PrependError("error reading field 11: ", err)
+} else {
+ p.Duration = &v
+}
+ return nil
+}
+
+func (p *Span) ReadField12(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(ctx); err != nil {
+ return thrift.PrependError("error reading field 12: ", err)
+} else {
+ p.TraceIDHigh = &v
+}
+ return nil
+}
+
+func (p *Span) Write(ctx context.Context, oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin(ctx, "Span"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+ if p != nil {
+ if err := p.writeField1(ctx, oprot); err != nil { return err }
+ if err := p.writeField3(ctx, oprot); err != nil { return err }
+ if err := p.writeField4(ctx, oprot); err != nil { return err }
+ if err := p.writeField5(ctx, oprot); err != nil { return err }
+ if err := p.writeField6(ctx, oprot); err != nil { return err }
+ if err := p.writeField8(ctx, oprot); err != nil { return err }
+ if err := p.writeField9(ctx, oprot); err != nil { return err }
+ if err := p.writeField10(ctx, oprot); err != nil { return err }
+ if err := p.writeField11(ctx, oprot); err != nil { return err }
+ if err := p.writeField12(ctx, oprot); err != nil { return err }
+ }
+ if err := oprot.WriteFieldStop(ctx); err != nil {
+ return thrift.PrependError("write field stop error: ", err) }
+ if err := oprot.WriteStructEnd(ctx); err != nil {
+ return thrift.PrependError("write struct stop error: ", err) }
+ return nil
+}
+
+func (p *Span) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "trace_id", thrift.I64, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:trace_id: ", p), err) }
+ if err := oprot.WriteI64(ctx, int64(p.TraceID)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.trace_id (1) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:trace_id: ", p), err) }
+ return err
+}
+
+func (p *Span) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "name", thrift.STRING, 3); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:name: ", p), err) }
+ if err := oprot.WriteString(ctx, string(p.Name)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.name (3) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 3:name: ", p), err) }
+ return err
+}
+
+func (p *Span) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "id", thrift.I64, 4); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:id: ", p), err) }
+ if err := oprot.WriteI64(ctx, int64(p.ID)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.id (4) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 4:id: ", p), err) }
+ return err
+}
+
+func (p *Span) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if p.IsSetParentID() {
+ if err := oprot.WriteFieldBegin(ctx, "parent_id", thrift.I64, 5); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:parent_id: ", p), err) }
+ if err := oprot.WriteI64(ctx, int64(*p.ParentID)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.parent_id (5) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 5:parent_id: ", p), err) }
+ }
+ return err
+}
+
+func (p *Span) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "annotations", thrift.LIST, 6); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:annotations: ", p), err) }
+ if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Annotations)); err != nil {
+ return thrift.PrependError("error writing list begin: ", err)
+ }
+ for _, v := range p.Annotations {
+ if err := v.Write(ctx, oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+ }
+ }
+ if err := oprot.WriteListEnd(ctx); err != nil {
+ return thrift.PrependError("error writing list end: ", err)
+ }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 6:annotations: ", p), err) }
+ return err
+}
+
+func (p *Span) writeField8(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "binary_annotations", thrift.LIST, 8); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:binary_annotations: ", p), err) }
+ if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.BinaryAnnotations)); err != nil {
+ return thrift.PrependError("error writing list begin: ", err)
+ }
+ for _, v := range p.BinaryAnnotations {
+ if err := v.Write(ctx, oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+ }
+ }
+ if err := oprot.WriteListEnd(ctx); err != nil {
+ return thrift.PrependError("error writing list end: ", err)
+ }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 8:binary_annotations: ", p), err) }
+ return err
+}
+
+func (p *Span) writeField9(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if p.IsSetDebug() {
+ if err := oprot.WriteFieldBegin(ctx, "debug", thrift.BOOL, 9); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:debug: ", p), err) }
+ if err := oprot.WriteBool(ctx, bool(p.Debug)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.debug (9) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 9:debug: ", p), err) }
+ }
+ return err
+}
+
+func (p *Span) writeField10(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if p.IsSetTimestamp() {
+ if err := oprot.WriteFieldBegin(ctx, "timestamp", thrift.I64, 10); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:timestamp: ", p), err) }
+ if err := oprot.WriteI64(ctx, int64(*p.Timestamp)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.timestamp (10) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 10:timestamp: ", p), err) }
+ }
+ return err
+}
+
+func (p *Span) writeField11(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if p.IsSetDuration() {
+ if err := oprot.WriteFieldBegin(ctx, "duration", thrift.I64, 11); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:duration: ", p), err) }
+ if err := oprot.WriteI64(ctx, int64(*p.Duration)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.duration (11) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 11:duration: ", p), err) }
+ }
+ return err
+}
+
+func (p *Span) writeField12(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if p.IsSetTraceIDHigh() {
+ if err := oprot.WriteFieldBegin(ctx, "trace_id_high", thrift.I64, 12); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 12:trace_id_high: ", p), err) }
+ if err := oprot.WriteI64(ctx, int64(*p.TraceIDHigh)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.trace_id_high (12) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 12:trace_id_high: ", p), err) }
+ }
+ return err
+}
+
+func (p *Span) Equals(other *Span) bool {
+ if p == other {
+ return true
+ } else if p == nil || other == nil {
+ return false
+ }
+ if p.TraceID != other.TraceID { return false }
+ if p.Name != other.Name { return false }
+ if p.ID != other.ID { return false }
+ if p.ParentID != other.ParentID {
+ if p.ParentID == nil || other.ParentID == nil {
+ return false
+ }
+ if (*p.ParentID) != (*other.ParentID) { return false }
+ }
+ if len(p.Annotations) != len(other.Annotations) { return false }
+ for i, _tgt := range p.Annotations {
+ _src2 := other.Annotations[i]
+ if !_tgt.Equals(_src2) { return false }
+ }
+ if len(p.BinaryAnnotations) != len(other.BinaryAnnotations) { return false }
+ for i, _tgt := range p.BinaryAnnotations {
+ _src3 := other.BinaryAnnotations[i]
+ if !_tgt.Equals(_src3) { return false }
+ }
+ if p.Debug != other.Debug { return false }
+ if p.Timestamp != other.Timestamp {
+ if p.Timestamp == nil || other.Timestamp == nil {
+ return false
+ }
+ if (*p.Timestamp) != (*other.Timestamp) { return false }
+ }
+ if p.Duration != other.Duration {
+ if p.Duration == nil || other.Duration == nil {
+ return false
+ }
+ if (*p.Duration) != (*other.Duration) { return false }
+ }
+ if p.TraceIDHigh != other.TraceIDHigh {
+ if p.TraceIDHigh == nil || other.TraceIDHigh == nil {
+ return false
+ }
+ if (*p.TraceIDHigh) != (*other.TraceIDHigh) { return false }
+ }
+ return true
+}
+
+func (p *Span) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("Span(%+v)", *p)
+}
+
+// Attributes:
+// - Ok
+type Response struct {
+ Ok bool `thrift:"ok,1,required" db:"ok" json:"ok"`
+}
+
+func NewResponse() *Response {
+ return &Response{}
+}
+
+
+func (p *Response) GetOk() bool {
+ return p.Ok
+}
+func (p *Response) Read(ctx context.Context, iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ var issetOk bool = false;
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP { break; }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.BOOL {
+ if err := p.ReadField1(ctx, iprot); err != nil {
+ return err
+ }
+ issetOk = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ default:
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(ctx); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ if !issetOk{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Ok is not set"));
+ }
+ return nil
+}
+
+func (p *Response) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadBool(ctx); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+} else {
+ p.Ok = v
+}
+ return nil
+}
+
+func (p *Response) Write(ctx context.Context, oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin(ctx, "Response"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+ if p != nil {
+ if err := p.writeField1(ctx, oprot); err != nil { return err }
+ }
+ if err := oprot.WriteFieldStop(ctx); err != nil {
+ return thrift.PrependError("write field stop error: ", err) }
+ if err := oprot.WriteStructEnd(ctx); err != nil {
+ return thrift.PrependError("write struct stop error: ", err) }
+ return nil
+}
+
+func (p *Response) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "ok", thrift.BOOL, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ok: ", p), err) }
+ if err := oprot.WriteBool(ctx, bool(p.Ok)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.ok (1) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ok: ", p), err) }
+ return err
+}
+
+func (p *Response) Equals(other *Response) bool {
+ if p == other {
+ return true
+ } else if p == nil || other == nil {
+ return false
+ }
+ if p.Ok != other.Ok { return false }
+ return true
+}
+
+func (p *Response) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("Response(%+v)", *p)
+}
+
+type ZipkinCollector interface {
+ // Parameters:
+ // - Spans
+ SubmitZipkinBatch(ctx context.Context, spans []*Span) (_r []*Response, _err error)
+}
+
+type ZipkinCollectorClient struct {
+ c thrift.TClient
+ meta thrift.ResponseMeta
+}
+
+func NewZipkinCollectorClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *ZipkinCollectorClient {
+ return &ZipkinCollectorClient{
+ c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)),
+ }
+}
+
+func NewZipkinCollectorClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *ZipkinCollectorClient {
+ return &ZipkinCollectorClient{
+ c: thrift.NewTStandardClient(iprot, oprot),
+ }
+}
+
+func NewZipkinCollectorClient(c thrift.TClient) *ZipkinCollectorClient {
+ return &ZipkinCollectorClient{
+ c: c,
+ }
+}
+
+func (p *ZipkinCollectorClient) Client_() thrift.TClient {
+ return p.c
+}
+
+func (p *ZipkinCollectorClient) LastResponseMeta_() thrift.ResponseMeta {
+ return p.meta
+}
+
+func (p *ZipkinCollectorClient) SetLastResponseMeta_(meta thrift.ResponseMeta) {
+ p.meta = meta
+}
+
+// Parameters:
+// - Spans
+func (p *ZipkinCollectorClient) SubmitZipkinBatch(ctx context.Context, spans []*Span) (_r []*Response, _err error) {
+ var _args4 ZipkinCollectorSubmitZipkinBatchArgs
+ _args4.Spans = spans
+ var _result6 ZipkinCollectorSubmitZipkinBatchResult
+ var _meta5 thrift.ResponseMeta
+ _meta5, _err = p.Client_().Call(ctx, "submitZipkinBatch", &_args4, &_result6)
+ p.SetLastResponseMeta_(_meta5)
+ if _err != nil {
+ return
+ }
+ return _result6.GetSuccess(), nil
+}
+
+type ZipkinCollectorProcessor struct {
+ processorMap map[string]thrift.TProcessorFunction
+ handler ZipkinCollector
+}
+
+func (p *ZipkinCollectorProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
+ p.processorMap[key] = processor
+}
+
+func (p *ZipkinCollectorProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
+ processor, ok = p.processorMap[key]
+ return processor, ok
+}
+
+func (p *ZipkinCollectorProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
+ return p.processorMap
+}
+
+func NewZipkinCollectorProcessor(handler ZipkinCollector) *ZipkinCollectorProcessor {
+
+ self7 := &ZipkinCollectorProcessor{handler:handler, processorMap:make(map[string]thrift.TProcessorFunction)}
+ self7.processorMap["submitZipkinBatch"] = &zipkinCollectorProcessorSubmitZipkinBatch{handler:handler}
+return self7
+}
+
+func (p *ZipkinCollectorProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ name, _, seqId, err2 := iprot.ReadMessageBegin(ctx)
+ if err2 != nil { return false, thrift.WrapTException(err2) }
+ if processor, ok := p.GetProcessorFunction(name); ok {
+ return processor.Process(ctx, seqId, iprot, oprot)
+ }
+ iprot.Skip(ctx, thrift.STRUCT)
+ iprot.ReadMessageEnd(ctx)
+ x8 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function " + name)
+ oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId)
+ x8.Write(ctx, oprot)
+ oprot.WriteMessageEnd(ctx)
+ oprot.Flush(ctx)
+ return false, x8
+
+}
+
+type zipkinCollectorProcessorSubmitZipkinBatch struct {
+ handler ZipkinCollector
+}
+
+func (p *zipkinCollectorProcessorSubmitZipkinBatch) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := ZipkinCollectorSubmitZipkinBatchArgs{}
+ var err2 error
+ if err2 = args.Read(ctx, iprot); err2 != nil {
+ iprot.ReadMessageEnd(ctx)
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error())
+ oprot.WriteMessageBegin(ctx, "submitZipkinBatch", thrift.EXCEPTION, seqId)
+ x.Write(ctx, oprot)
+ oprot.WriteMessageEnd(ctx)
+ oprot.Flush(ctx)
+ return false, thrift.WrapTException(err2)
+ }
+ iprot.ReadMessageEnd(ctx)
+
+ tickerCancel := func() {}
+ // Start a goroutine to do server side connectivity check.
+ if thrift.ServerConnectivityCheckInterval > 0 {
+ var cancel context.CancelFunc
+ ctx, cancel = context.WithCancel(ctx)
+ defer cancel()
+ var tickerCtx context.Context
+ tickerCtx, tickerCancel = context.WithCancel(context.Background())
+ defer tickerCancel()
+ go func(ctx context.Context, cancel context.CancelFunc) {
+ ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval)
+ defer ticker.Stop()
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-ticker.C:
+ if !iprot.Transport().IsOpen() {
+ cancel()
+ return
+ }
+ }
+ }
+ }(tickerCtx, cancel)
+ }
+
+ result := ZipkinCollectorSubmitZipkinBatchResult{}
+ var retval []*Response
+ if retval, err2 = p.handler.SubmitZipkinBatch(ctx, args.Spans); err2 != nil {
+ tickerCancel()
+ if err2 == thrift.ErrAbandonRequest {
+ return false, thrift.WrapTException(err2)
+ }
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing submitZipkinBatch: " + err2.Error())
+ oprot.WriteMessageBegin(ctx, "submitZipkinBatch", thrift.EXCEPTION, seqId)
+ x.Write(ctx, oprot)
+ oprot.WriteMessageEnd(ctx)
+ oprot.Flush(ctx)
+ return true, thrift.WrapTException(err2)
+ } else {
+ result.Success = retval
+ }
+ tickerCancel()
+ if err2 = oprot.WriteMessageBegin(ctx, "submitZipkinBatch", thrift.REPLY, seqId); err2 != nil {
+ err = thrift.WrapTException(err2)
+ }
+ if err2 = result.Write(ctx, oprot); err == nil && err2 != nil {
+ err = thrift.WrapTException(err2)
+ }
+ if err2 = oprot.WriteMessageEnd(ctx); err == nil && err2 != nil {
+ err = thrift.WrapTException(err2)
+ }
+ if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
+ err = thrift.WrapTException(err2)
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+
+// HELPER FUNCTIONS AND STRUCTURES
+
+// Attributes:
+// - Spans
+type ZipkinCollectorSubmitZipkinBatchArgs struct {
+ Spans []*Span `thrift:"spans,1" db:"spans" json:"spans"`
+}
+
+func NewZipkinCollectorSubmitZipkinBatchArgs() *ZipkinCollectorSubmitZipkinBatchArgs {
+ return &ZipkinCollectorSubmitZipkinBatchArgs{}
+}
+
+
+func (p *ZipkinCollectorSubmitZipkinBatchArgs) GetSpans() []*Span {
+ return p.Spans
+}
+func (p *ZipkinCollectorSubmitZipkinBatchArgs) Read(ctx context.Context, iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP { break; }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.LIST {
+ if err := p.ReadField1(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ default:
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(ctx); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ return nil
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin(ctx)
+ if err != nil {
+ return thrift.PrependError("error reading list begin: ", err)
+ }
+ tSlice := make([]*Span, 0, size)
+ p.Spans = tSlice
+ for i := 0; i < size; i ++ {
+ _elem9 := &Span{}
+ if err := _elem9.Read(ctx, iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem9), err)
+ }
+ p.Spans = append(p.Spans, _elem9)
+ }
+ if err := iprot.ReadListEnd(ctx); err != nil {
+ return thrift.PrependError("error reading list end: ", err)
+ }
+ return nil
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchArgs) Write(ctx context.Context, oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin(ctx, "submitZipkinBatch_args"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+ if p != nil {
+ if err := p.writeField1(ctx, oprot); err != nil { return err }
+ }
+ if err := oprot.WriteFieldStop(ctx); err != nil {
+ return thrift.PrependError("write field stop error: ", err) }
+ if err := oprot.WriteStructEnd(ctx); err != nil {
+ return thrift.PrependError("write struct stop error: ", err) }
+ return nil
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "spans", thrift.LIST, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:spans: ", p), err) }
+ if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Spans)); err != nil {
+ return thrift.PrependError("error writing list begin: ", err)
+ }
+ for _, v := range p.Spans {
+ if err := v.Write(ctx, oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+ }
+ }
+ if err := oprot.WriteListEnd(ctx); err != nil {
+ return thrift.PrependError("error writing list end: ", err)
+ }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:spans: ", p), err) }
+ return err
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchArgs) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("ZipkinCollectorSubmitZipkinBatchArgs(%+v)", *p)
+}
+
+// Attributes:
+// - Success
+type ZipkinCollectorSubmitZipkinBatchResult struct {
+ Success []*Response `thrift:"success,0" db:"success" json:"success,omitempty"`
+}
+
+func NewZipkinCollectorSubmitZipkinBatchResult() *ZipkinCollectorSubmitZipkinBatchResult {
+ return &ZipkinCollectorSubmitZipkinBatchResult{}
+}
+
+var ZipkinCollectorSubmitZipkinBatchResult_Success_DEFAULT []*Response
+
+func (p *ZipkinCollectorSubmitZipkinBatchResult) GetSuccess() []*Response {
+ return p.Success
+}
+func (p *ZipkinCollectorSubmitZipkinBatchResult) IsSetSuccess() bool {
+ return p.Success != nil
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchResult) Read(ctx context.Context, iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP { break; }
+ switch fieldId {
+ case 0:
+ if fieldTypeId == thrift.LIST {
+ if err := p.ReadField0(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ default:
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(ctx); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ return nil
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchResult) ReadField0(ctx context.Context, iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin(ctx)
+ if err != nil {
+ return thrift.PrependError("error reading list begin: ", err)
+ }
+ tSlice := make([]*Response, 0, size)
+ p.Success = tSlice
+ for i := 0; i < size; i ++ {
+ _elem10 := &Response{}
+ if err := _elem10.Read(ctx, iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem10), err)
+ }
+ p.Success = append(p.Success, _elem10)
+ }
+ if err := iprot.ReadListEnd(ctx); err != nil {
+ return thrift.PrependError("error reading list end: ", err)
+ }
+ return nil
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchResult) Write(ctx context.Context, oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin(ctx, "submitZipkinBatch_result"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+ if p != nil {
+ if err := p.writeField0(ctx, oprot); err != nil { return err }
+ }
+ if err := oprot.WriteFieldStop(ctx); err != nil {
+ return thrift.PrependError("write field stop error: ", err) }
+ if err := oprot.WriteStructEnd(ctx); err != nil {
+ return thrift.PrependError("write struct stop error: ", err) }
+ return nil
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if p.IsSetSuccess() {
+ if err := oprot.WriteFieldBegin(ctx, "success", thrift.LIST, 0); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) }
+ if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Success)); err != nil {
+ return thrift.PrependError("error writing list begin: ", err)
+ }
+ for _, v := range p.Success {
+ if err := v.Write(ctx, oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+ }
+ }
+ if err := oprot.WriteListEnd(ctx); err != nil {
+ return thrift.PrependError("error writing list end: ", err)
+ }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) }
+ }
+ return err
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchResult) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("ZipkinCollectorSubmitZipkinBatchResult(%+v)", *p)
+}
+
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/README.md b/vendor/github.com/uber/jaeger-client-go/thrift/README.md
index 1d8e642e0..c4c38ae01 100644
--- a/vendor/github.com/uber/jaeger-client-go/thrift/README.md
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/README.md
@@ -1,7 +1,11 @@
# Apache Thrift
-This is a partial copy of Apache Thrift v0.10 (https://github.com/apache/thrift/commit/b2a4d4ae21c789b689dd162deb819665567f481c).
+This is a partial copy of Apache Thrift v0.14.1 (https://github.com/apache/thrift/commit/f6fa1794539e68ac294038ac388d6bde40a6c237).
-It is vendored code to avoid compatibility issues introduced in Thrift v0.11.
+It is vendored code to avoid compatibility issues with Thrift versions.
-See https://github.com/jaegertracing/jaeger-client-go/pull/303.
+The file logger.go is modified to remove dependency on "testing" (see Issue #585).
+
+See:
+ * https://github.com/jaegertracing/jaeger-client-go/pull/584
+ * https://github.com/jaegertracing/jaeger-client-go/pull/303
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/application_exception.go b/vendor/github.com/uber/jaeger-client-go/thrift/application_exception.go
index 6655cc5a9..32d5b0147 100644
--- a/vendor/github.com/uber/jaeger-client-go/thrift/application_exception.go
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/application_exception.go
@@ -19,6 +19,10 @@
package thrift
+import (
+ "context"
+)
+
const (
UNKNOWN_APPLICATION_EXCEPTION = 0
UNKNOWN_METHOD = 1
@@ -28,14 +32,31 @@ const (
MISSING_RESULT = 5
INTERNAL_ERROR = 6
PROTOCOL_ERROR = 7
+ INVALID_TRANSFORM = 8
+ INVALID_PROTOCOL = 9
+ UNSUPPORTED_CLIENT_TYPE = 10
)
+var defaultApplicationExceptionMessage = map[int32]string{
+ UNKNOWN_APPLICATION_EXCEPTION: "unknown application exception",
+ UNKNOWN_METHOD: "unknown method",
+ INVALID_MESSAGE_TYPE_EXCEPTION: "invalid message type",
+ WRONG_METHOD_NAME: "wrong method name",
+ BAD_SEQUENCE_ID: "bad sequence ID",
+ MISSING_RESULT: "missing result",
+ INTERNAL_ERROR: "unknown internal error",
+ PROTOCOL_ERROR: "unknown protocol error",
+ INVALID_TRANSFORM: "Invalid transform",
+ INVALID_PROTOCOL: "Invalid protocol",
+ UNSUPPORTED_CLIENT_TYPE: "Unsupported client type",
+}
+
// Application level Thrift exception
type TApplicationException interface {
TException
TypeId() int32
- Read(iprot TProtocol) (TApplicationException, error)
- Write(oprot TProtocol) error
+ Read(ctx context.Context, iprot TProtocol) error
+ Write(ctx context.Context, oprot TProtocol) error
}
type tApplicationException struct {
@@ -43,8 +64,17 @@ type tApplicationException struct {
type_ int32
}
+var _ TApplicationException = (*tApplicationException)(nil)
+
+func (tApplicationException) TExceptionType() TExceptionType {
+ return TExceptionTypeApplication
+}
+
func (e tApplicationException) Error() string {
- return e.message
+ if e.message != "" {
+ return e.message
+ }
+ return defaultApplicationExceptionMessage[e.type_]
}
func NewTApplicationException(type_ int32, message string) TApplicationException {
@@ -55,19 +85,20 @@ func (p *tApplicationException) TypeId() int32 {
return p.type_
}
-func (p *tApplicationException) Read(iprot TProtocol) (TApplicationException, error) {
- _, err := iprot.ReadStructBegin()
+func (p *tApplicationException) Read(ctx context.Context, iprot TProtocol) error {
+ // TODO: this should really be generated by the compiler
+ _, err := iprot.ReadStructBegin(ctx)
if err != nil {
- return nil, err
+ return err
}
message := ""
type_ := int32(UNKNOWN_APPLICATION_EXCEPTION)
for {
- _, ttype, id, err := iprot.ReadFieldBegin()
+ _, ttype, id, err := iprot.ReadFieldBegin(ctx)
if err != nil {
- return nil, err
+ return err
}
if ttype == STOP {
break
@@ -75,68 +106,75 @@ func (p *tApplicationException) Read(iprot TProtocol) (TApplicationException, er
switch id {
case 1:
if ttype == STRING {
- if message, err = iprot.ReadString(); err != nil {
- return nil, err
+ if message, err = iprot.ReadString(ctx); err != nil {
+ return err
}
} else {
- if err = SkipDefaultDepth(iprot, ttype); err != nil {
- return nil, err
+ if err = SkipDefaultDepth(ctx, iprot, ttype); err != nil {
+ return err
}
}
case 2:
if ttype == I32 {
- if type_, err = iprot.ReadI32(); err != nil {
- return nil, err
+ if type_, err = iprot.ReadI32(ctx); err != nil {
+ return err
}
} else {
- if err = SkipDefaultDepth(iprot, ttype); err != nil {
- return nil, err
+ if err = SkipDefaultDepth(ctx, iprot, ttype); err != nil {
+ return err
}
}
default:
- if err = SkipDefaultDepth(iprot, ttype); err != nil {
- return nil, err
+ if err = SkipDefaultDepth(ctx, iprot, ttype); err != nil {
+ return err
}
}
- if err = iprot.ReadFieldEnd(); err != nil {
- return nil, err
+ if err = iprot.ReadFieldEnd(ctx); err != nil {
+ return err
}
}
- return NewTApplicationException(type_, message), iprot.ReadStructEnd()
+ if err := iprot.ReadStructEnd(ctx); err != nil {
+ return err
+ }
+
+ p.message = message
+ p.type_ = type_
+
+ return nil
}
-func (p *tApplicationException) Write(oprot TProtocol) (err error) {
- err = oprot.WriteStructBegin("TApplicationException")
+func (p *tApplicationException) Write(ctx context.Context, oprot TProtocol) (err error) {
+ err = oprot.WriteStructBegin(ctx, "TApplicationException")
if len(p.Error()) > 0 {
- err = oprot.WriteFieldBegin("message", STRING, 1)
+ err = oprot.WriteFieldBegin(ctx, "message", STRING, 1)
if err != nil {
return
}
- err = oprot.WriteString(p.Error())
+ err = oprot.WriteString(ctx, p.Error())
if err != nil {
return
}
- err = oprot.WriteFieldEnd()
+ err = oprot.WriteFieldEnd(ctx)
if err != nil {
return
}
}
- err = oprot.WriteFieldBegin("type", I32, 2)
+ err = oprot.WriteFieldBegin(ctx, "type", I32, 2)
if err != nil {
return
}
- err = oprot.WriteI32(p.type_)
+ err = oprot.WriteI32(ctx, p.type_)
if err != nil {
return
}
- err = oprot.WriteFieldEnd()
+ err = oprot.WriteFieldEnd(ctx)
if err != nil {
return
}
- err = oprot.WriteFieldStop()
+ err = oprot.WriteFieldStop(ctx)
if err != nil {
return
}
- err = oprot.WriteStructEnd()
+ err = oprot.WriteStructEnd(ctx)
return
}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/binary_protocol.go b/vendor/github.com/uber/jaeger-client-go/thrift/binary_protocol.go
index 690d34111..45c880d32 100644
--- a/vendor/github.com/uber/jaeger-client-go/thrift/binary_protocol.go
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/binary_protocol.go
@@ -21,6 +21,7 @@ package thrift
import (
"bytes"
+ "context"
"encoding/binary"
"errors"
"fmt"
@@ -31,190 +32,220 @@ import (
type TBinaryProtocol struct {
trans TRichTransport
origTransport TTransport
- reader io.Reader
- writer io.Writer
- strictRead bool
- strictWrite bool
+ cfg *TConfiguration
buffer [64]byte
}
type TBinaryProtocolFactory struct {
- strictRead bool
- strictWrite bool
+ cfg *TConfiguration
}
+// Deprecated: Use NewTBinaryProtocolConf instead.
func NewTBinaryProtocolTransport(t TTransport) *TBinaryProtocol {
- return NewTBinaryProtocol(t, false, true)
+ return NewTBinaryProtocolConf(t, &TConfiguration{
+ noPropagation: true,
+ })
}
+// Deprecated: Use NewTBinaryProtocolConf instead.
func NewTBinaryProtocol(t TTransport, strictRead, strictWrite bool) *TBinaryProtocol {
- p := &TBinaryProtocol{origTransport: t, strictRead: strictRead, strictWrite: strictWrite}
+ return NewTBinaryProtocolConf(t, &TConfiguration{
+ TBinaryStrictRead: &strictRead,
+ TBinaryStrictWrite: &strictWrite,
+
+ noPropagation: true,
+ })
+}
+
+func NewTBinaryProtocolConf(t TTransport, conf *TConfiguration) *TBinaryProtocol {
+ PropagateTConfiguration(t, conf)
+ p := &TBinaryProtocol{
+ origTransport: t,
+ cfg: conf,
+ }
if et, ok := t.(TRichTransport); ok {
p.trans = et
} else {
p.trans = NewTRichTransport(t)
}
- p.reader = p.trans
- p.writer = p.trans
return p
}
+// Deprecated: Use NewTBinaryProtocolFactoryConf instead.
func NewTBinaryProtocolFactoryDefault() *TBinaryProtocolFactory {
- return NewTBinaryProtocolFactory(false, true)
+ return NewTBinaryProtocolFactoryConf(&TConfiguration{
+ noPropagation: true,
+ })
}
+// Deprecated: Use NewTBinaryProtocolFactoryConf instead.
func NewTBinaryProtocolFactory(strictRead, strictWrite bool) *TBinaryProtocolFactory {
- return &TBinaryProtocolFactory{strictRead: strictRead, strictWrite: strictWrite}
+ return NewTBinaryProtocolFactoryConf(&TConfiguration{
+ TBinaryStrictRead: &strictRead,
+ TBinaryStrictWrite: &strictWrite,
+
+ noPropagation: true,
+ })
+}
+
+func NewTBinaryProtocolFactoryConf(conf *TConfiguration) *TBinaryProtocolFactory {
+ return &TBinaryProtocolFactory{
+ cfg: conf,
+ }
}
func (p *TBinaryProtocolFactory) GetProtocol(t TTransport) TProtocol {
- return NewTBinaryProtocol(t, p.strictRead, p.strictWrite)
+ return NewTBinaryProtocolConf(t, p.cfg)
+}
+
+func (p *TBinaryProtocolFactory) SetTConfiguration(conf *TConfiguration) {
+ p.cfg = conf
}
/**
* Writing Methods
*/
-func (p *TBinaryProtocol) WriteMessageBegin(name string, typeId TMessageType, seqId int32) error {
- if p.strictWrite {
+func (p *TBinaryProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqId int32) error {
+ if p.cfg.GetTBinaryStrictWrite() {
version := uint32(VERSION_1) | uint32(typeId)
- e := p.WriteI32(int32(version))
+ e := p.WriteI32(ctx, int32(version))
if e != nil {
return e
}
- e = p.WriteString(name)
+ e = p.WriteString(ctx, name)
if e != nil {
return e
}
- e = p.WriteI32(seqId)
+ e = p.WriteI32(ctx, seqId)
return e
} else {
- e := p.WriteString(name)
+ e := p.WriteString(ctx, name)
if e != nil {
return e
}
- e = p.WriteByte(int8(typeId))
+ e = p.WriteByte(ctx, int8(typeId))
if e != nil {
return e
}
- e = p.WriteI32(seqId)
+ e = p.WriteI32(ctx, seqId)
return e
}
return nil
}
-func (p *TBinaryProtocol) WriteMessageEnd() error {
+func (p *TBinaryProtocol) WriteMessageEnd(ctx context.Context) error {
return nil
}
-func (p *TBinaryProtocol) WriteStructBegin(name string) error {
+func (p *TBinaryProtocol) WriteStructBegin(ctx context.Context, name string) error {
return nil
}
-func (p *TBinaryProtocol) WriteStructEnd() error {
+func (p *TBinaryProtocol) WriteStructEnd(ctx context.Context) error {
return nil
}
-func (p *TBinaryProtocol) WriteFieldBegin(name string, typeId TType, id int16) error {
- e := p.WriteByte(int8(typeId))
+func (p *TBinaryProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error {
+ e := p.WriteByte(ctx, int8(typeId))
if e != nil {
return e
}
- e = p.WriteI16(id)
+ e = p.WriteI16(ctx, id)
return e
}
-func (p *TBinaryProtocol) WriteFieldEnd() error {
+func (p *TBinaryProtocol) WriteFieldEnd(ctx context.Context) error {
return nil
}
-func (p *TBinaryProtocol) WriteFieldStop() error {
- e := p.WriteByte(STOP)
+func (p *TBinaryProtocol) WriteFieldStop(ctx context.Context) error {
+ e := p.WriteByte(ctx, STOP)
return e
}
-func (p *TBinaryProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error {
- e := p.WriteByte(int8(keyType))
+func (p *TBinaryProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error {
+ e := p.WriteByte(ctx, int8(keyType))
if e != nil {
return e
}
- e = p.WriteByte(int8(valueType))
+ e = p.WriteByte(ctx, int8(valueType))
if e != nil {
return e
}
- e = p.WriteI32(int32(size))
+ e = p.WriteI32(ctx, int32(size))
return e
}
-func (p *TBinaryProtocol) WriteMapEnd() error {
+func (p *TBinaryProtocol) WriteMapEnd(ctx context.Context) error {
return nil
}
-func (p *TBinaryProtocol) WriteListBegin(elemType TType, size int) error {
- e := p.WriteByte(int8(elemType))
+func (p *TBinaryProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error {
+ e := p.WriteByte(ctx, int8(elemType))
if e != nil {
return e
}
- e = p.WriteI32(int32(size))
+ e = p.WriteI32(ctx, int32(size))
return e
}
-func (p *TBinaryProtocol) WriteListEnd() error {
+func (p *TBinaryProtocol) WriteListEnd(ctx context.Context) error {
return nil
}
-func (p *TBinaryProtocol) WriteSetBegin(elemType TType, size int) error {
- e := p.WriteByte(int8(elemType))
+func (p *TBinaryProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error {
+ e := p.WriteByte(ctx, int8(elemType))
if e != nil {
return e
}
- e = p.WriteI32(int32(size))
+ e = p.WriteI32(ctx, int32(size))
return e
}
-func (p *TBinaryProtocol) WriteSetEnd() error {
+func (p *TBinaryProtocol) WriteSetEnd(ctx context.Context) error {
return nil
}
-func (p *TBinaryProtocol) WriteBool(value bool) error {
+func (p *TBinaryProtocol) WriteBool(ctx context.Context, value bool) error {
if value {
- return p.WriteByte(1)
+ return p.WriteByte(ctx, 1)
}
- return p.WriteByte(0)
+ return p.WriteByte(ctx, 0)
}
-func (p *TBinaryProtocol) WriteByte(value int8) error {
+func (p *TBinaryProtocol) WriteByte(ctx context.Context, value int8) error {
e := p.trans.WriteByte(byte(value))
return NewTProtocolException(e)
}
-func (p *TBinaryProtocol) WriteI16(value int16) error {
+func (p *TBinaryProtocol) WriteI16(ctx context.Context, value int16) error {
v := p.buffer[0:2]
binary.BigEndian.PutUint16(v, uint16(value))
- _, e := p.writer.Write(v)
+ _, e := p.trans.Write(v)
return NewTProtocolException(e)
}
-func (p *TBinaryProtocol) WriteI32(value int32) error {
+func (p *TBinaryProtocol) WriteI32(ctx context.Context, value int32) error {
v := p.buffer[0:4]
binary.BigEndian.PutUint32(v, uint32(value))
- _, e := p.writer.Write(v)
+ _, e := p.trans.Write(v)
return NewTProtocolException(e)
}
-func (p *TBinaryProtocol) WriteI64(value int64) error {
+func (p *TBinaryProtocol) WriteI64(ctx context.Context, value int64) error {
v := p.buffer[0:8]
binary.BigEndian.PutUint64(v, uint64(value))
- _, err := p.writer.Write(v)
+ _, err := p.trans.Write(v)
return NewTProtocolException(err)
}
-func (p *TBinaryProtocol) WriteDouble(value float64) error {
- return p.WriteI64(int64(math.Float64bits(value)))
+func (p *TBinaryProtocol) WriteDouble(ctx context.Context, value float64) error {
+ return p.WriteI64(ctx, int64(math.Float64bits(value)))
}
-func (p *TBinaryProtocol) WriteString(value string) error {
- e := p.WriteI32(int32(len(value)))
+func (p *TBinaryProtocol) WriteString(ctx context.Context, value string) error {
+ e := p.WriteI32(ctx, int32(len(value)))
if e != nil {
return e
}
@@ -222,12 +253,12 @@ func (p *TBinaryProtocol) WriteString(value string) error {
return NewTProtocolException(err)
}
-func (p *TBinaryProtocol) WriteBinary(value []byte) error {
- e := p.WriteI32(int32(len(value)))
+func (p *TBinaryProtocol) WriteBinary(ctx context.Context, value []byte) error {
+ e := p.WriteI32(ctx, int32(len(value)))
if e != nil {
return e
}
- _, err := p.writer.Write(value)
+ _, err := p.trans.Write(value)
return NewTProtocolException(err)
}
@@ -235,8 +266,8 @@ func (p *TBinaryProtocol) WriteBinary(value []byte) error {
* Reading methods
*/
-func (p *TBinaryProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) {
- size, e := p.ReadI32()
+func (p *TBinaryProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) {
+ size, e := p.ReadI32(ctx)
if e != nil {
return "", typeId, 0, NewTProtocolException(e)
}
@@ -246,79 +277,79 @@ func (p *TBinaryProtocol) ReadMessageBegin() (name string, typeId TMessageType,
if version != VERSION_1 {
return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Bad version in ReadMessageBegin"))
}
- name, e = p.ReadString()
+ name, e = p.ReadString(ctx)
if e != nil {
return name, typeId, seqId, NewTProtocolException(e)
}
- seqId, e = p.ReadI32()
+ seqId, e = p.ReadI32(ctx)
if e != nil {
return name, typeId, seqId, NewTProtocolException(e)
}
return name, typeId, seqId, nil
}
- if p.strictRead {
+ if p.cfg.GetTBinaryStrictRead() {
return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Missing version in ReadMessageBegin"))
}
name, e2 := p.readStringBody(size)
if e2 != nil {
return name, typeId, seqId, e2
}
- b, e3 := p.ReadByte()
+ b, e3 := p.ReadByte(ctx)
if e3 != nil {
return name, typeId, seqId, e3
}
typeId = TMessageType(b)
- seqId, e4 := p.ReadI32()
+ seqId, e4 := p.ReadI32(ctx)
if e4 != nil {
return name, typeId, seqId, e4
}
return name, typeId, seqId, nil
}
-func (p *TBinaryProtocol) ReadMessageEnd() error {
+func (p *TBinaryProtocol) ReadMessageEnd(ctx context.Context) error {
return nil
}
-func (p *TBinaryProtocol) ReadStructBegin() (name string, err error) {
+func (p *TBinaryProtocol) ReadStructBegin(ctx context.Context) (name string, err error) {
return
}
-func (p *TBinaryProtocol) ReadStructEnd() error {
+func (p *TBinaryProtocol) ReadStructEnd(ctx context.Context) error {
return nil
}
-func (p *TBinaryProtocol) ReadFieldBegin() (name string, typeId TType, seqId int16, err error) {
- t, err := p.ReadByte()
+func (p *TBinaryProtocol) ReadFieldBegin(ctx context.Context) (name string, typeId TType, seqId int16, err error) {
+ t, err := p.ReadByte(ctx)
typeId = TType(t)
if err != nil {
return name, typeId, seqId, err
}
if t != STOP {
- seqId, err = p.ReadI16()
+ seqId, err = p.ReadI16(ctx)
}
return name, typeId, seqId, err
}
-func (p *TBinaryProtocol) ReadFieldEnd() error {
+func (p *TBinaryProtocol) ReadFieldEnd(ctx context.Context) error {
return nil
}
var invalidDataLength = NewTProtocolExceptionWithType(INVALID_DATA, errors.New("Invalid data length"))
-func (p *TBinaryProtocol) ReadMapBegin() (kType, vType TType, size int, err error) {
- k, e := p.ReadByte()
+func (p *TBinaryProtocol) ReadMapBegin(ctx context.Context) (kType, vType TType, size int, err error) {
+ k, e := p.ReadByte(ctx)
if e != nil {
err = NewTProtocolException(e)
return
}
kType = TType(k)
- v, e := p.ReadByte()
+ v, e := p.ReadByte(ctx)
if e != nil {
err = NewTProtocolException(e)
return
}
vType = TType(v)
- size32, e := p.ReadI32()
+ size32, e := p.ReadI32(ctx)
if e != nil {
err = NewTProtocolException(e)
return
@@ -331,18 +362,18 @@ func (p *TBinaryProtocol) ReadMapBegin() (kType, vType TType, size int, err erro
return kType, vType, size, nil
}
-func (p *TBinaryProtocol) ReadMapEnd() error {
+func (p *TBinaryProtocol) ReadMapEnd(ctx context.Context) error {
return nil
}
-func (p *TBinaryProtocol) ReadListBegin() (elemType TType, size int, err error) {
- b, e := p.ReadByte()
+func (p *TBinaryProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) {
+ b, e := p.ReadByte(ctx)
if e != nil {
err = NewTProtocolException(e)
return
}
elemType = TType(b)
- size32, e := p.ReadI32()
+ size32, e := p.ReadI32(ctx)
if e != nil {
err = NewTProtocolException(e)
return
@@ -356,18 +387,18 @@ func (p *TBinaryProtocol) ReadListBegin() (elemType TType, size int, err error)
return
}
-func (p *TBinaryProtocol) ReadListEnd() error {
+func (p *TBinaryProtocol) ReadListEnd(ctx context.Context) error {
return nil
}
-func (p *TBinaryProtocol) ReadSetBegin() (elemType TType, size int, err error) {
- b, e := p.ReadByte()
+func (p *TBinaryProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) {
+ b, e := p.ReadByte(ctx)
if e != nil {
err = NewTProtocolException(e)
return
}
elemType = TType(b)
- size32, e := p.ReadI32()
+ size32, e := p.ReadI32(ctx)
if e != nil {
err = NewTProtocolException(e)
return
@@ -380,12 +411,12 @@ func (p *TBinaryProtocol) ReadSetBegin() (elemType TType, size int, err error) {
return elemType, size, nil
}
-func (p *TBinaryProtocol) ReadSetEnd() error {
+func (p *TBinaryProtocol) ReadSetEnd(ctx context.Context) error {
return nil
}
-func (p *TBinaryProtocol) ReadBool() (bool, error) {
- b, e := p.ReadByte()
+func (p *TBinaryProtocol) ReadBool(ctx context.Context) (bool, error) {
+ b, e := p.ReadByte(ctx)
v := true
if b != 1 {
v = false
@@ -393,122 +424,132 @@ func (p *TBinaryProtocol) ReadBool() (bool, error) {
return v, e
}
-func (p *TBinaryProtocol) ReadByte() (int8, error) {
+func (p *TBinaryProtocol) ReadByte(ctx context.Context) (int8, error) {
v, err := p.trans.ReadByte()
return int8(v), err
}
-func (p *TBinaryProtocol) ReadI16() (value int16, err error) {
+func (p *TBinaryProtocol) ReadI16(ctx context.Context) (value int16, err error) {
buf := p.buffer[0:2]
- err = p.readAll(buf)
+ err = p.readAll(ctx, buf)
value = int16(binary.BigEndian.Uint16(buf))
return value, err
}
-func (p *TBinaryProtocol) ReadI32() (value int32, err error) {
+func (p *TBinaryProtocol) ReadI32(ctx context.Context) (value int32, err error) {
buf := p.buffer[0:4]
- err = p.readAll(buf)
+ err = p.readAll(ctx, buf)
value = int32(binary.BigEndian.Uint32(buf))
return value, err
}
-func (p *TBinaryProtocol) ReadI64() (value int64, err error) {
+func (p *TBinaryProtocol) ReadI64(ctx context.Context) (value int64, err error) {
buf := p.buffer[0:8]
- err = p.readAll(buf)
+ err = p.readAll(ctx, buf)
value = int64(binary.BigEndian.Uint64(buf))
return value, err
}
-func (p *TBinaryProtocol) ReadDouble() (value float64, err error) {
+func (p *TBinaryProtocol) ReadDouble(ctx context.Context) (value float64, err error) {
buf := p.buffer[0:8]
- err = p.readAll(buf)
+ err = p.readAll(ctx, buf)
value = math.Float64frombits(binary.BigEndian.Uint64(buf))
return value, err
}
-func (p *TBinaryProtocol) ReadString() (value string, err error) {
- size, e := p.ReadI32()
+func (p *TBinaryProtocol) ReadString(ctx context.Context) (value string, err error) {
+ size, e := p.ReadI32(ctx)
if e != nil {
return "", e
}
+ err = checkSizeForProtocol(size, p.cfg)
+ if err != nil {
+ return
+ }
if size < 0 {
err = invalidDataLength
return
}
+ if size == 0 {
+ return "", nil
+ }
+ if size < int32(len(p.buffer)) {
+ // Avoid allocation on small reads
+ buf := p.buffer[:size]
+ read, e := io.ReadFull(p.trans, buf)
+ return string(buf[:read]), NewTProtocolException(e)
+ }
return p.readStringBody(size)
}
-func (p *TBinaryProtocol) ReadBinary() ([]byte, error) {
- size, e := p.ReadI32()
+func (p *TBinaryProtocol) ReadBinary(ctx context.Context) ([]byte, error) {
+ size, e := p.ReadI32(ctx)
if e != nil {
return nil, e
}
- if size < 0 {
- return nil, invalidDataLength
- }
- if uint64(size) > p.trans.RemainingBytes() {
- return nil, invalidDataLength
+ if err := checkSizeForProtocol(size, p.cfg); err != nil {
+ return nil, err
}
- isize := int(size)
- buf := make([]byte, isize)
- _, err := io.ReadFull(p.trans, buf)
+ buf, err := safeReadBytes(size, p.trans)
return buf, NewTProtocolException(err)
}
-func (p *TBinaryProtocol) Flush() (err error) {
- return NewTProtocolException(p.trans.Flush())
+func (p *TBinaryProtocol) Flush(ctx context.Context) (err error) {
+ return NewTProtocolException(p.trans.Flush(ctx))
}
-func (p *TBinaryProtocol) Skip(fieldType TType) (err error) {
- return SkipDefaultDepth(p, fieldType)
+func (p *TBinaryProtocol) Skip(ctx context.Context, fieldType TType) (err error) {
+ return SkipDefaultDepth(ctx, p, fieldType)
}
func (p *TBinaryProtocol) Transport() TTransport {
return p.origTransport
}
-func (p *TBinaryProtocol) readAll(buf []byte) error {
- _, err := io.ReadFull(p.reader, buf)
+func (p *TBinaryProtocol) readAll(ctx context.Context, buf []byte) (err error) {
+ var read int
+ _, deadlineSet := ctx.Deadline()
+ for {
+ read, err = io.ReadFull(p.trans, buf)
+ if deadlineSet && read == 0 && isTimeoutError(err) && ctx.Err() == nil {
+ // This is I/O timeout without anything read,
+ // and we still have time left, keep retrying.
+ continue
+ }
+ // For anything else, don't retry
+ break
+ }
return NewTProtocolException(err)
}
-const readLimit = 32768
-
func (p *TBinaryProtocol) readStringBody(size int32) (value string, err error) {
- if size < 0 {
- return "", nil
- }
- if uint64(size) > p.trans.RemainingBytes() {
- return "", invalidDataLength
- }
+ buf, err := safeReadBytes(size, p.trans)
+ return string(buf), NewTProtocolException(err)
+}
- var (
- buf bytes.Buffer
- e error
- b []byte
- )
+func (p *TBinaryProtocol) SetTConfiguration(conf *TConfiguration) {
+ PropagateTConfiguration(p.trans, conf)
+ PropagateTConfiguration(p.origTransport, conf)
+ p.cfg = conf
+}
- switch {
- case int(size) <= len(p.buffer):
- b = p.buffer[:size] // avoids allocation for small reads
- case int(size) < readLimit:
- b = make([]byte, size)
- default:
- b = make([]byte, readLimit)
- }
+var (
+ _ TConfigurationSetter = (*TBinaryProtocolFactory)(nil)
+ _ TConfigurationSetter = (*TBinaryProtocol)(nil)
+)
- for size > 0 {
- _, e = io.ReadFull(p.trans, b)
- buf.Write(b)
- if e != nil {
- break
- }
- size -= readLimit
- if size < readLimit && size > 0 {
- b = b[:size]
- }
+// This function is shared between TBinaryProtocol and TCompactProtocol.
+//
+// It tries to read size bytes from trans, in a way that prevents large
+// allocations when size is insanely large (mostly caused by malformed message).
+func safeReadBytes(size int32, trans io.Reader) ([]byte, error) {
+ if size < 0 {
+ return nil, nil
}
- return buf.String(), NewTProtocolException(e)
+
+ buf := new(bytes.Buffer)
+ _, err := io.CopyN(buf, trans, int64(size))
+ return buf.Bytes(), err
}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/client.go b/vendor/github.com/uber/jaeger-client-go/thrift/client.go
new file mode 100644
index 000000000..ea2c01fda
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/client.go
@@ -0,0 +1,109 @@
+package thrift
+
+import (
+ "context"
+ "fmt"
+)
+
+// ResponseMeta represents the metadata attached to the response.
+type ResponseMeta struct {
+ // The headers in the response, if any.
+ // If the underlying transport/protocol is not THeader, this will always be nil.
+ Headers THeaderMap
+}
+
+type TClient interface {
+ Call(ctx context.Context, method string, args, result TStruct) (ResponseMeta, error)
+}
+
+type TStandardClient struct {
+ seqId int32
+ iprot, oprot TProtocol
+}
+
+// TStandardClient implements TClient, and uses the standard message format for Thrift.
+// It is not safe for concurrent use.
+func NewTStandardClient(inputProtocol, outputProtocol TProtocol) *TStandardClient {
+ return &TStandardClient{
+ iprot: inputProtocol,
+ oprot: outputProtocol,
+ }
+}
+
+func (p *TStandardClient) Send(ctx context.Context, oprot TProtocol, seqId int32, method string, args TStruct) error {
+ // Set headers from context object on THeaderProtocol
+ if headerProt, ok := oprot.(*THeaderProtocol); ok {
+ headerProt.ClearWriteHeaders()
+ for _, key := range GetWriteHeaderList(ctx) {
+ if value, ok := GetHeader(ctx, key); ok {
+ headerProt.SetWriteHeader(key, value)
+ }
+ }
+ }
+
+ if err := oprot.WriteMessageBegin(ctx, method, CALL, seqId); err != nil {
+ return err
+ }
+ if err := args.Write(ctx, oprot); err != nil {
+ return err
+ }
+ if err := oprot.WriteMessageEnd(ctx); err != nil {
+ return err
+ }
+ return oprot.Flush(ctx)
+}
+
+func (p *TStandardClient) Recv(ctx context.Context, iprot TProtocol, seqId int32, method string, result TStruct) error {
+ rMethod, rTypeId, rSeqId, err := iprot.ReadMessageBegin(ctx)
+ if err != nil {
+ return err
+ }
+
+ if method != rMethod {
+ return NewTApplicationException(WRONG_METHOD_NAME, fmt.Sprintf("%s: wrong method name", method))
+ } else if seqId != rSeqId {
+ return NewTApplicationException(BAD_SEQUENCE_ID, fmt.Sprintf("%s: out of order sequence response", method))
+ } else if rTypeId == EXCEPTION {
+ var exception tApplicationException
+ if err := exception.Read(ctx, iprot); err != nil {
+ return err
+ }
+
+ if err := iprot.ReadMessageEnd(ctx); err != nil {
+ return err
+ }
+
+ return &exception
+ } else if rTypeId != REPLY {
+ return NewTApplicationException(INVALID_MESSAGE_TYPE_EXCEPTION, fmt.Sprintf("%s: invalid message type", method))
+ }
+
+ if err := result.Read(ctx, iprot); err != nil {
+ return err
+ }
+
+ return iprot.ReadMessageEnd(ctx)
+}
+
+func (p *TStandardClient) Call(ctx context.Context, method string, args, result TStruct) (ResponseMeta, error) {
+ p.seqId++
+ seqId := p.seqId
+
+ if err := p.Send(ctx, p.oprot, seqId, method, args); err != nil {
+ return ResponseMeta{}, err
+ }
+
+ // method is oneway
+ if result == nil {
+ return ResponseMeta{}, nil
+ }
+
+ err := p.Recv(ctx, p.iprot, seqId, method, result)
+ var headers THeaderMap
+ if hp, ok := p.iprot.(*THeaderProtocol); ok {
+ headers = hp.transport.readHeaders
+ }
+ return ResponseMeta{
+ Headers: headers,
+ }, err
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/compact_protocol.go b/vendor/github.com/uber/jaeger-client-go/thrift/compact_protocol.go
index b9299f2fa..a49225dab 100644
--- a/vendor/github.com/uber/jaeger-client-go/thrift/compact_protocol.go
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/compact_protocol.go
@@ -20,7 +20,9 @@
package thrift
import (
+ "context"
"encoding/binary"
+ "errors"
"fmt"
"io"
"math"
@@ -73,20 +75,37 @@ func init() {
}
}
-type TCompactProtocolFactory struct{}
+type TCompactProtocolFactory struct {
+ cfg *TConfiguration
+}
+// Deprecated: Use NewTCompactProtocolFactoryConf instead.
func NewTCompactProtocolFactory() *TCompactProtocolFactory {
- return &TCompactProtocolFactory{}
+ return NewTCompactProtocolFactoryConf(&TConfiguration{
+ noPropagation: true,
+ })
+}
+
+func NewTCompactProtocolFactoryConf(conf *TConfiguration) *TCompactProtocolFactory {
+ return &TCompactProtocolFactory{
+ cfg: conf,
+ }
}
func (p *TCompactProtocolFactory) GetProtocol(trans TTransport) TProtocol {
- return NewTCompactProtocol(trans)
+ return NewTCompactProtocolConf(trans, p.cfg)
+}
+
+func (p *TCompactProtocolFactory) SetTConfiguration(conf *TConfiguration) {
+ p.cfg = conf
}
type TCompactProtocol struct {
trans TRichTransport
origTransport TTransport
+ cfg *TConfiguration
+
// Used to keep track of the last field for the current and previous structs,
// so we can do the delta stuff.
lastField []int
@@ -105,9 +124,19 @@ type TCompactProtocol struct {
buffer [64]byte
}
-// Create a TCompactProtocol given a TTransport
+// Deprecated: Use NewTCompactProtocolConf instead.
func NewTCompactProtocol(trans TTransport) *TCompactProtocol {
- p := &TCompactProtocol{origTransport: trans, lastField: []int{}}
+ return NewTCompactProtocolConf(trans, &TConfiguration{
+ noPropagation: true,
+ })
+}
+
+func NewTCompactProtocolConf(trans TTransport, conf *TConfiguration) *TCompactProtocol {
+ PropagateTConfiguration(trans, conf)
+ p := &TCompactProtocol{
+ origTransport: trans,
+ cfg: conf,
+ }
if et, ok := trans.(TRichTransport); ok {
p.trans = et
} else {
@@ -115,7 +144,6 @@ func NewTCompactProtocol(trans TTransport) *TCompactProtocol {
}
return p
-
}
//
@@ -124,7 +152,7 @@ func NewTCompactProtocol(trans TTransport) *TCompactProtocol {
// Write a message header to the wire. Compact Protocol messages contain the
// protocol version so we can migrate forwards in the future if need be.
-func (p *TCompactProtocol) WriteMessageBegin(name string, typeId TMessageType, seqid int32) error {
+func (p *TCompactProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqid int32) error {
err := p.writeByteDirect(COMPACT_PROTOCOL_ID)
if err != nil {
return NewTProtocolException(err)
@@ -137,17 +165,17 @@ func (p *TCompactProtocol) WriteMessageBegin(name string, typeId TMessageType, s
if err != nil {
return NewTProtocolException(err)
}
- e := p.WriteString(name)
+ e := p.WriteString(ctx, name)
return e
}
-func (p *TCompactProtocol) WriteMessageEnd() error { return nil }
+func (p *TCompactProtocol) WriteMessageEnd(ctx context.Context) error { return nil }
// Write a struct begin. This doesn't actually put anything on the wire. We
// use it as an opportunity to put special placeholder markers on the field
// stack so we can get the field id deltas correct.
-func (p *TCompactProtocol) WriteStructBegin(name string) error {
+func (p *TCompactProtocol) WriteStructBegin(ctx context.Context, name string) error {
p.lastField = append(p.lastField, p.lastFieldId)
p.lastFieldId = 0
return nil
@@ -156,26 +184,29 @@ func (p *TCompactProtocol) WriteStructBegin(name string) error {
// Write a struct end. This doesn't actually put anything on the wire. We use
// this as an opportunity to pop the last field from the current struct off
// of the field stack.
-func (p *TCompactProtocol) WriteStructEnd() error {
+func (p *TCompactProtocol) WriteStructEnd(ctx context.Context) error {
+ if len(p.lastField) <= 0 {
+ return NewTProtocolExceptionWithType(INVALID_DATA, errors.New("WriteStructEnd called without matching WriteStructBegin call before"))
+ }
p.lastFieldId = p.lastField[len(p.lastField)-1]
p.lastField = p.lastField[:len(p.lastField)-1]
return nil
}
-func (p *TCompactProtocol) WriteFieldBegin(name string, typeId TType, id int16) error {
+func (p *TCompactProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error {
if typeId == BOOL {
// we want to possibly include the value, so we'll wait.
p.booleanFieldName, p.booleanFieldId, p.booleanFieldPending = name, id, true
return nil
}
- _, err := p.writeFieldBeginInternal(name, typeId, id, 0xFF)
+ _, err := p.writeFieldBeginInternal(ctx, name, typeId, id, 0xFF)
return NewTProtocolException(err)
}
// The workhorse of writeFieldBegin. It has the option of doing a
// 'type override' of the type header. This is used specifically in the
// boolean field case.
-func (p *TCompactProtocol) writeFieldBeginInternal(name string, typeId TType, id int16, typeOverride byte) (int, error) {
+func (p *TCompactProtocol) writeFieldBeginInternal(ctx context.Context, name string, typeId TType, id int16, typeOverride byte) (int, error) {
// short lastField = lastField_.pop();
// if there's a type override, use that.
@@ -200,7 +231,7 @@ func (p *TCompactProtocol) writeFieldBeginInternal(name string, typeId TType, id
if err != nil {
return 0, err
}
- err = p.WriteI16(id)
+ err = p.WriteI16(ctx, id)
written = 1 + 2
if err != nil {
return 0, err
@@ -208,18 +239,17 @@ func (p *TCompactProtocol) writeFieldBeginInternal(name string, typeId TType, id
}
p.lastFieldId = fieldId
- // p.lastField.Push(field.id);
return written, nil
}
-func (p *TCompactProtocol) WriteFieldEnd() error { return nil }
+func (p *TCompactProtocol) WriteFieldEnd(ctx context.Context) error { return nil }
-func (p *TCompactProtocol) WriteFieldStop() error {
+func (p *TCompactProtocol) WriteFieldStop(ctx context.Context) error {
err := p.writeByteDirect(STOP)
return NewTProtocolException(err)
}
-func (p *TCompactProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error {
+func (p *TCompactProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error {
if size == 0 {
err := p.writeByteDirect(0)
return NewTProtocolException(err)
@@ -232,32 +262,32 @@ func (p *TCompactProtocol) WriteMapBegin(keyType TType, valueType TType, size in
return NewTProtocolException(err)
}
-func (p *TCompactProtocol) WriteMapEnd() error { return nil }
+func (p *TCompactProtocol) WriteMapEnd(ctx context.Context) error { return nil }
// Write a list header.
-func (p *TCompactProtocol) WriteListBegin(elemType TType, size int) error {
+func (p *TCompactProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error {
_, err := p.writeCollectionBegin(elemType, size)
return NewTProtocolException(err)
}
-func (p *TCompactProtocol) WriteListEnd() error { return nil }
+func (p *TCompactProtocol) WriteListEnd(ctx context.Context) error { return nil }
// Write a set header.
-func (p *TCompactProtocol) WriteSetBegin(elemType TType, size int) error {
+func (p *TCompactProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error {
_, err := p.writeCollectionBegin(elemType, size)
return NewTProtocolException(err)
}
-func (p *TCompactProtocol) WriteSetEnd() error { return nil }
+func (p *TCompactProtocol) WriteSetEnd(ctx context.Context) error { return nil }
-func (p *TCompactProtocol) WriteBool(value bool) error {
+func (p *TCompactProtocol) WriteBool(ctx context.Context, value bool) error {
v := byte(COMPACT_BOOLEAN_FALSE)
if value {
v = byte(COMPACT_BOOLEAN_TRUE)
}
if p.booleanFieldPending {
// we haven't written the field header yet
- _, err := p.writeFieldBeginInternal(p.booleanFieldName, BOOL, p.booleanFieldId, v)
+ _, err := p.writeFieldBeginInternal(ctx, p.booleanFieldName, BOOL, p.booleanFieldId, v)
p.booleanFieldPending = false
return NewTProtocolException(err)
}
@@ -267,31 +297,31 @@ func (p *TCompactProtocol) WriteBool(value bool) error {
}
// Write a byte. Nothing to see here!
-func (p *TCompactProtocol) WriteByte(value int8) error {
+func (p *TCompactProtocol) WriteByte(ctx context.Context, value int8) error {
err := p.writeByteDirect(byte(value))
return NewTProtocolException(err)
}
// Write an I16 as a zigzag varint.
-func (p *TCompactProtocol) WriteI16(value int16) error {
+func (p *TCompactProtocol) WriteI16(ctx context.Context, value int16) error {
_, err := p.writeVarint32(p.int32ToZigzag(int32(value)))
return NewTProtocolException(err)
}
// Write an i32 as a zigzag varint.
-func (p *TCompactProtocol) WriteI32(value int32) error {
+func (p *TCompactProtocol) WriteI32(ctx context.Context, value int32) error {
_, err := p.writeVarint32(p.int32ToZigzag(value))
return NewTProtocolException(err)
}
// Write an i64 as a zigzag varint.
-func (p *TCompactProtocol) WriteI64(value int64) error {
+func (p *TCompactProtocol) WriteI64(ctx context.Context, value int64) error {
_, err := p.writeVarint64(p.int64ToZigzag(value))
return NewTProtocolException(err)
}
// Write a double to the wire as 8 bytes.
-func (p *TCompactProtocol) WriteDouble(value float64) error {
+func (p *TCompactProtocol) WriteDouble(ctx context.Context, value float64) error {
buf := p.buffer[0:8]
binary.LittleEndian.PutUint64(buf, math.Float64bits(value))
_, err := p.trans.Write(buf)
@@ -299,7 +329,7 @@ func (p *TCompactProtocol) WriteDouble(value float64) error {
}
// Write a string to the wire with a varint size preceding.
-func (p *TCompactProtocol) WriteString(value string) error {
+func (p *TCompactProtocol) WriteString(ctx context.Context, value string) error {
_, e := p.writeVarint32(int32(len(value)))
if e != nil {
return NewTProtocolException(e)
@@ -311,7 +341,7 @@ func (p *TCompactProtocol) WriteString(value string) error {
}
// Write a byte array, using a varint for the size.
-func (p *TCompactProtocol) WriteBinary(bin []byte) error {
+func (p *TCompactProtocol) WriteBinary(ctx context.Context, bin []byte) error {
_, e := p.writeVarint32(int32(len(bin)))
if e != nil {
return NewTProtocolException(e)
@@ -328,9 +358,20 @@ func (p *TCompactProtocol) WriteBinary(bin []byte) error {
//
// Read a message header.
-func (p *TCompactProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) {
+func (p *TCompactProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) {
+ var protocolId byte
- protocolId, err := p.readByteDirect()
+ _, deadlineSet := ctx.Deadline()
+ for {
+ protocolId, err = p.readByteDirect()
+ if deadlineSet && isTimeoutError(err) && ctx.Err() == nil {
+ // keep retrying I/O timeout errors since we still have
+ // time left
+ continue
+ }
+ // For anything else, don't retry
+ break
+ }
if err != nil {
return
}
@@ -357,15 +398,15 @@ func (p *TCompactProtocol) ReadMessageBegin() (name string, typeId TMessageType,
err = NewTProtocolException(e)
return
}
- name, err = p.ReadString()
+ name, err = p.ReadString(ctx)
return
}
-func (p *TCompactProtocol) ReadMessageEnd() error { return nil }
+func (p *TCompactProtocol) ReadMessageEnd(ctx context.Context) error { return nil }
// Read a struct begin. There's nothing on the wire for this, but it is our
// opportunity to push a new struct begin marker onto the field stack.
-func (p *TCompactProtocol) ReadStructBegin() (name string, err error) {
+func (p *TCompactProtocol) ReadStructBegin(ctx context.Context) (name string, err error) {
p.lastField = append(p.lastField, p.lastFieldId)
p.lastFieldId = 0
return
@@ -373,15 +414,18 @@ func (p *TCompactProtocol) ReadStructBegin() (name string, err error) {
// Doesn't actually consume any wire data, just removes the last field for
// this struct from the field stack.
-func (p *TCompactProtocol) ReadStructEnd() error {
+func (p *TCompactProtocol) ReadStructEnd(ctx context.Context) error {
// consume the last field we read off the wire.
+ if len(p.lastField) <= 0 {
+ return NewTProtocolExceptionWithType(INVALID_DATA, errors.New("ReadStructEnd called without matching ReadStructBegin call before"))
+ }
p.lastFieldId = p.lastField[len(p.lastField)-1]
p.lastField = p.lastField[:len(p.lastField)-1]
return nil
}
// Read a field header off the wire.
-func (p *TCompactProtocol) ReadFieldBegin() (name string, typeId TType, id int16, err error) {
+func (p *TCompactProtocol) ReadFieldBegin(ctx context.Context) (name string, typeId TType, id int16, err error) {
t, err := p.readByteDirect()
if err != nil {
return
@@ -396,7 +440,7 @@ func (p *TCompactProtocol) ReadFieldBegin() (name string, typeId TType, id int16
modifier := int16((t & 0xf0) >> 4)
if modifier == 0 {
// not a delta. look ahead for the zigzag varint field id.
- id, err = p.ReadI16()
+ id, err = p.ReadI16(ctx)
if err != nil {
return
}
@@ -422,12 +466,12 @@ func (p *TCompactProtocol) ReadFieldBegin() (name string, typeId TType, id int16
return
}
-func (p *TCompactProtocol) ReadFieldEnd() error { return nil }
+func (p *TCompactProtocol) ReadFieldEnd(ctx context.Context) error { return nil }
// Read a map header off the wire. If the size is zero, skip reading the key
// and value type. This means that 0-length maps will yield TMaps without the
// "correct" types.
-func (p *TCompactProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, err error) {
+func (p *TCompactProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error) {
size32, e := p.readVarint32()
if e != nil {
err = NewTProtocolException(e)
@@ -451,13 +495,13 @@ func (p *TCompactProtocol) ReadMapBegin() (keyType TType, valueType TType, size
return
}
-func (p *TCompactProtocol) ReadMapEnd() error { return nil }
+func (p *TCompactProtocol) ReadMapEnd(ctx context.Context) error { return nil }
// Read a list header off the wire. If the list size is 0-14, the size will
// be packed into the element type header. If it's a longer list, the 4 MSB
// of the element type header will be 0xF, and a varint will follow with the
// true size.
-func (p *TCompactProtocol) ReadListBegin() (elemType TType, size int, err error) {
+func (p *TCompactProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) {
size_and_type, err := p.readByteDirect()
if err != nil {
return
@@ -483,22 +527,22 @@ func (p *TCompactProtocol) ReadListBegin() (elemType TType, size int, err error)
return
}
-func (p *TCompactProtocol) ReadListEnd() error { return nil }
+func (p *TCompactProtocol) ReadListEnd(ctx context.Context) error { return nil }
// Read a set header off the wire. If the set size is 0-14, the size will
// be packed into the element type header. If it's a longer set, the 4 MSB
// of the element type header will be 0xF, and a varint will follow with the
// true size.
-func (p *TCompactProtocol) ReadSetBegin() (elemType TType, size int, err error) {
- return p.ReadListBegin()
+func (p *TCompactProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) {
+ return p.ReadListBegin(ctx)
}
-func (p *TCompactProtocol) ReadSetEnd() error { return nil }
+func (p *TCompactProtocol) ReadSetEnd(ctx context.Context) error { return nil }
// Read a boolean off the wire. If this is a boolean field, the value should
// already have been read during readFieldBegin, so we'll just consume the
// pre-stored value. Otherwise, read a byte.
-func (p *TCompactProtocol) ReadBool() (value bool, err error) {
+func (p *TCompactProtocol) ReadBool(ctx context.Context) (value bool, err error) {
if p.boolValueIsNotNull {
p.boolValueIsNotNull = false
return p.boolValue, nil
@@ -508,7 +552,7 @@ func (p *TCompactProtocol) ReadBool() (value bool, err error) {
}
// Read a single byte off the wire. Nothing interesting here.
-func (p *TCompactProtocol) ReadByte() (int8, error) {
+func (p *TCompactProtocol) ReadByte(ctx context.Context) (int8, error) {
v, err := p.readByteDirect()
if err != nil {
return 0, NewTProtocolException(err)
@@ -517,13 +561,13 @@ func (p *TCompactProtocol) ReadByte() (int8, error) {
}
// Read an i16 from the wire as a zigzag varint.
-func (p *TCompactProtocol) ReadI16() (value int16, err error) {
- v, err := p.ReadI32()
+func (p *TCompactProtocol) ReadI16(ctx context.Context) (value int16, err error) {
+ v, err := p.ReadI32(ctx)
return int16(v), err
}
// Read an i32 from the wire as a zigzag varint.
-func (p *TCompactProtocol) ReadI32() (value int32, err error) {
+func (p *TCompactProtocol) ReadI32(ctx context.Context) (value int32, err error) {
v, e := p.readVarint32()
if e != nil {
return 0, NewTProtocolException(e)
@@ -533,7 +577,7 @@ func (p *TCompactProtocol) ReadI32() (value int32, err error) {
}
// Read an i64 from the wire as a zigzag varint.
-func (p *TCompactProtocol) ReadI64() (value int64, err error) {
+func (p *TCompactProtocol) ReadI64(ctx context.Context) (value int64, err error) {
v, e := p.readVarint64()
if e != nil {
return 0, NewTProtocolException(e)
@@ -543,7 +587,7 @@ func (p *TCompactProtocol) ReadI64() (value int64, err error) {
}
// No magic here - just read a double off the wire.
-func (p *TCompactProtocol) ReadDouble() (value float64, err error) {
+func (p *TCompactProtocol) ReadDouble(ctx context.Context) (value float64, err error) {
longBits := p.buffer[0:8]
_, e := io.ReadFull(p.trans, longBits)
if e != nil {
@@ -553,58 +597,53 @@ func (p *TCompactProtocol) ReadDouble() (value float64, err error) {
}
// Reads a []byte (via readBinary), and then UTF-8 decodes it.
-func (p *TCompactProtocol) ReadString() (value string, err error) {
+func (p *TCompactProtocol) ReadString(ctx context.Context) (value string, err error) {
length, e := p.readVarint32()
if e != nil {
return "", NewTProtocolException(e)
}
- if length < 0 {
- return "", invalidDataLength
- }
- if uint64(length) > p.trans.RemainingBytes() {
- return "", invalidDataLength
+ err = checkSizeForProtocol(length, p.cfg)
+ if err != nil {
+ return
}
-
if length == 0 {
return "", nil
}
- var buf []byte
- if length <= int32(len(p.buffer)) {
- buf = p.buffer[0:length]
- } else {
- buf = make([]byte, length)
+ if length < int32(len(p.buffer)) {
+ // Avoid allocation on small reads
+ buf := p.buffer[:length]
+ read, e := io.ReadFull(p.trans, buf)
+ return string(buf[:read]), NewTProtocolException(e)
}
- _, e = io.ReadFull(p.trans, buf)
+
+ buf, e := safeReadBytes(length, p.trans)
return string(buf), NewTProtocolException(e)
}
// Read a []byte from the wire.
-func (p *TCompactProtocol) ReadBinary() (value []byte, err error) {
+func (p *TCompactProtocol) ReadBinary(ctx context.Context) (value []byte, err error) {
length, e := p.readVarint32()
if e != nil {
return nil, NewTProtocolException(e)
}
+ err = checkSizeForProtocol(length, p.cfg)
+ if err != nil {
+ return
+ }
if length == 0 {
return []byte{}, nil
}
- if length < 0 {
- return nil, invalidDataLength
- }
- if uint64(length) > p.trans.RemainingBytes() {
- return nil, invalidDataLength
- }
- buf := make([]byte, length)
- _, e = io.ReadFull(p.trans, buf)
+ buf, e := safeReadBytes(length, p.trans)
return buf, NewTProtocolException(e)
}
-func (p *TCompactProtocol) Flush() (err error) {
- return NewTProtocolException(p.trans.Flush())
+func (p *TCompactProtocol) Flush(ctx context.Context) (err error) {
+ return NewTProtocolException(p.trans.Flush(ctx))
}
-func (p *TCompactProtocol) Skip(fieldType TType) (err error) {
- return SkipDefaultDepth(p, fieldType)
+func (p *TCompactProtocol) Skip(ctx context.Context, fieldType TType) (err error) {
+ return SkipDefaultDepth(ctx, p, fieldType)
}
func (p *TCompactProtocol) Transport() TTransport {
@@ -806,10 +845,21 @@ func (p *TCompactProtocol) getTType(t tCompactType) (TType, error) {
case COMPACT_STRUCT:
return STRUCT, nil
}
- return STOP, TException(fmt.Errorf("don't know what type: %d", t&0x0f))
+ return STOP, NewTProtocolException(fmt.Errorf("don't know what type: %v", t&0x0f))
}
// Given a TType value, find the appropriate TCompactProtocol.Types constant.
func (p *TCompactProtocol) getCompactType(t TType) tCompactType {
return ttypeToCompactType[t]
}
+
+func (p *TCompactProtocol) SetTConfiguration(conf *TConfiguration) {
+ PropagateTConfiguration(p.trans, conf)
+ PropagateTConfiguration(p.origTransport, conf)
+ p.cfg = conf
+}
+
+var (
+ _ TConfigurationSetter = (*TCompactProtocolFactory)(nil)
+ _ TConfigurationSetter = (*TCompactProtocol)(nil)
+)
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/configuration.go b/vendor/github.com/uber/jaeger-client-go/thrift/configuration.go
new file mode 100644
index 000000000..454d9f377
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/configuration.go
@@ -0,0 +1,378 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+ "crypto/tls"
+ "fmt"
+ "time"
+)
+
+// Default TConfiguration values.
+const (
+ DEFAULT_MAX_MESSAGE_SIZE = 100 * 1024 * 1024
+ DEFAULT_MAX_FRAME_SIZE = 16384000
+
+ DEFAULT_TBINARY_STRICT_READ = false
+ DEFAULT_TBINARY_STRICT_WRITE = true
+
+ DEFAULT_CONNECT_TIMEOUT = 0
+ DEFAULT_SOCKET_TIMEOUT = 0
+)
+
+// TConfiguration defines some configurations shared between TTransport,
+// TProtocol, TTransportFactory, TProtocolFactory, and other implementations.
+//
+// When constructing TConfiguration, you only need to specify the non-default
+// fields. All zero values have sane default values.
+//
+// Not all configurations defined are applicable to all implementations.
+// Implementations are free to ignore the configurations not applicable to them.
+//
+// All functions attached to this type are nil-safe.
+//
+// See [1] for spec.
+//
+// NOTE: When using TConfiguration, fill in all the configurations you want to
+// set across the stack, not only the ones you want to set in the immediate
+// TTransport/TProtocol.
+//
+// For example, say you want to migrate this old code into using TConfiguration:
+//
+// sccket := thrift.NewTSocketTimeout("host:port", time.Second)
+// transFactory := thrift.NewTFramedTransportFactoryMaxLength(
+// thrift.NewTTransportFactory(),
+// 1024 * 1024 * 256,
+// )
+// protoFactory := thrift.NewTBinaryProtocolFactory(true, true)
+//
+// This is the wrong way to do it because in the end the TConfiguration used by
+// socket and transFactory will be overwritten by the one used by protoFactory
+// because of TConfiguration propagation:
+//
+// // bad example, DO NOT USE
+// sccket := thrift.NewTSocketConf("host:port", &thrift.TConfiguration{
+// ConnectTimeout: time.Second,
+// SocketTimeout: time.Second,
+// })
+// transFactory := thrift.NewTFramedTransportFactoryConf(
+// thrift.NewTTransportFactory(),
+// &thrift.TConfiguration{
+// MaxFrameSize: 1024 * 1024 * 256,
+// },
+// )
+// protoFactory := thrift.NewTBinaryProtocolFactoryConf(&thrift.TConfiguration{
+// TBinaryStrictRead: thrift.BoolPtr(true),
+// TBinaryStrictWrite: thrift.BoolPtr(true),
+// })
+//
+// This is the correct way to do it:
+//
+// conf := &thrift.TConfiguration{
+// ConnectTimeout: time.Second,
+// SocketTimeout: time.Second,
+//
+// MaxFrameSize: 1024 * 1024 * 256,
+//
+// TBinaryStrictRead: thrift.BoolPtr(true),
+// TBinaryStrictWrite: thrift.BoolPtr(true),
+// }
+// sccket := thrift.NewTSocketConf("host:port", conf)
+// transFactory := thrift.NewTFramedTransportFactoryConf(thrift.NewTTransportFactory(), conf)
+// protoFactory := thrift.NewTBinaryProtocolFactoryConf(conf)
+//
+// [1]: https://github.com/apache/thrift/blob/master/doc/specs/thrift-tconfiguration.md
+type TConfiguration struct {
+ // If <= 0, DEFAULT_MAX_MESSAGE_SIZE will be used instead.
+ MaxMessageSize int32
+
+ // If <= 0, DEFAULT_MAX_FRAME_SIZE will be used instead.
+ //
+ // Also if MaxMessageSize < MaxFrameSize,
+ // MaxMessageSize will be used instead.
+ MaxFrameSize int32
+
+ // Connect and socket timeouts to be used by TSocket and TSSLSocket.
+ //
+ // 0 means no timeout.
+ //
+ // If <0, DEFAULT_CONNECT_TIMEOUT and DEFAULT_SOCKET_TIMEOUT will be
+ // used.
+ ConnectTimeout time.Duration
+ SocketTimeout time.Duration
+
+ // TLS config to be used by TSSLSocket.
+ TLSConfig *tls.Config
+
+ // Strict read/write configurations for TBinaryProtocol.
+ //
+ // BoolPtr helper function is available to use literal values.
+ TBinaryStrictRead *bool
+ TBinaryStrictWrite *bool
+
+ // The wrapped protocol id to be used in THeader transport/protocol.
+ //
+ // THeaderProtocolIDPtr and THeaderProtocolIDPtrMust helper functions
+ // are provided to help filling this value.
+ THeaderProtocolID *THeaderProtocolID
+
+ // Used internally by deprecated constructors, to avoid overriding
+ // underlying TTransport/TProtocol's cfg by accidental propagations.
+ //
+ // For external users this is always false.
+ noPropagation bool
+}
+
+// GetMaxMessageSize returns the max message size an implementation should
+// follow.
+//
+// It's nil-safe. DEFAULT_MAX_MESSAGE_SIZE will be returned if tc is nil.
+func (tc *TConfiguration) GetMaxMessageSize() int32 {
+ if tc == nil || tc.MaxMessageSize <= 0 {
+ return DEFAULT_MAX_MESSAGE_SIZE
+ }
+ return tc.MaxMessageSize
+}
+
+// GetMaxFrameSize returns the max frame size an implementation should follow.
+//
+// It's nil-safe. DEFAULT_MAX_FRAME_SIZE will be returned if tc is nil.
+//
+// If the configured max message size is smaller than the configured max frame
+// size, the smaller one will be returned instead.
+func (tc *TConfiguration) GetMaxFrameSize() int32 {
+ if tc == nil {
+ return DEFAULT_MAX_FRAME_SIZE
+ }
+ maxFrameSize := tc.MaxFrameSize
+ if maxFrameSize <= 0 {
+ maxFrameSize = DEFAULT_MAX_FRAME_SIZE
+ }
+ if maxMessageSize := tc.GetMaxMessageSize(); maxMessageSize < maxFrameSize {
+ return maxMessageSize
+ }
+ return maxFrameSize
+}
+
+// GetConnectTimeout returns the connect timeout should be used by TSocket and
+// TSSLSocket.
+//
+// It's nil-safe. If tc is nil, DEFAULT_CONNECT_TIMEOUT will be returned instead.
+func (tc *TConfiguration) GetConnectTimeout() time.Duration {
+ if tc == nil || tc.ConnectTimeout < 0 {
+ return DEFAULT_CONNECT_TIMEOUT
+ }
+ return tc.ConnectTimeout
+}
+
+// GetSocketTimeout returns the socket timeout should be used by TSocket and
+// TSSLSocket.
+//
+// It's nil-safe. If tc is nil, DEFAULT_SOCKET_TIMEOUT will be returned instead.
+func (tc *TConfiguration) GetSocketTimeout() time.Duration {
+ if tc == nil || tc.SocketTimeout < 0 {
+ return DEFAULT_SOCKET_TIMEOUT
+ }
+ return tc.SocketTimeout
+}
+
+// GetTLSConfig returns the tls config should be used by TSSLSocket.
+//
+// It's nil-safe. If tc is nil, nil will be returned instead.
+func (tc *TConfiguration) GetTLSConfig() *tls.Config {
+ if tc == nil {
+ return nil
+ }
+ return tc.TLSConfig
+}
+
+// GetTBinaryStrictRead returns the strict read configuration TBinaryProtocol
+// should follow.
+//
+// It's nil-safe. DEFAULT_TBINARY_STRICT_READ will be returned if either tc or
+// tc.TBinaryStrictRead is nil.
+func (tc *TConfiguration) GetTBinaryStrictRead() bool {
+ if tc == nil || tc.TBinaryStrictRead == nil {
+ return DEFAULT_TBINARY_STRICT_READ
+ }
+ return *tc.TBinaryStrictRead
+}
+
+// GetTBinaryStrictWrite returns the strict read configuration TBinaryProtocol
+// should follow.
+//
+// It's nil-safe. DEFAULT_TBINARY_STRICT_WRITE will be returned if either tc or
+// tc.TBinaryStrictWrite is nil.
+func (tc *TConfiguration) GetTBinaryStrictWrite() bool {
+ if tc == nil || tc.TBinaryStrictWrite == nil {
+ return DEFAULT_TBINARY_STRICT_WRITE
+ }
+ return *tc.TBinaryStrictWrite
+}
+
+// GetTHeaderProtocolID returns the THeaderProtocolID should be used by
+// THeaderProtocol clients (for servers, they always use the same one as the
+// client instead).
+//
+// It's nil-safe. If either tc or tc.THeaderProtocolID is nil,
+// THeaderProtocolDefault will be returned instead.
+// THeaderProtocolDefault will also be returned if configured value is invalid.
+func (tc *TConfiguration) GetTHeaderProtocolID() THeaderProtocolID {
+ if tc == nil || tc.THeaderProtocolID == nil {
+ return THeaderProtocolDefault
+ }
+ protoID := *tc.THeaderProtocolID
+ if err := protoID.Validate(); err != nil {
+ return THeaderProtocolDefault
+ }
+ return protoID
+}
+
+// THeaderProtocolIDPtr validates and returns the pointer to id.
+//
+// If id is not a valid THeaderProtocolID, a pointer to THeaderProtocolDefault
+// and the validation error will be returned.
+func THeaderProtocolIDPtr(id THeaderProtocolID) (*THeaderProtocolID, error) {
+ err := id.Validate()
+ if err != nil {
+ id = THeaderProtocolDefault
+ }
+ return &id, err
+}
+
+// THeaderProtocolIDPtrMust validates and returns the pointer to id.
+//
+// It's similar to THeaderProtocolIDPtr, but it panics on validation errors
+// instead of returning them.
+func THeaderProtocolIDPtrMust(id THeaderProtocolID) *THeaderProtocolID {
+ ptr, err := THeaderProtocolIDPtr(id)
+ if err != nil {
+ panic(err)
+ }
+ return ptr
+}
+
+// TConfigurationSetter is an optional interface TProtocol, TTransport,
+// TProtocolFactory, TTransportFactory, and other implementations can implement.
+//
+// It's intended to be called during intializations.
+// The behavior of calling SetTConfiguration on a TTransport/TProtocol in the
+// middle of a message is undefined:
+// It may or may not change the behavior of the current processing message,
+// and it may even cause the current message to fail.
+//
+// Note for implementations: SetTConfiguration might be called multiple times
+// with the same value in quick successions due to the implementation of the
+// propagation. Implementations should make SetTConfiguration as simple as
+// possible (usually just overwrite the stored configuration and propagate it to
+// the wrapped TTransports/TProtocols).
+type TConfigurationSetter interface {
+ SetTConfiguration(*TConfiguration)
+}
+
+// PropagateTConfiguration propagates cfg to impl if impl implements
+// TConfigurationSetter and cfg is non-nil, otherwise it does nothing.
+//
+// NOTE: nil cfg is not propagated. If you want to propagate a TConfiguration
+// with everything being default value, use &TConfiguration{} explicitly instead.
+func PropagateTConfiguration(impl interface{}, cfg *TConfiguration) {
+ if cfg == nil || cfg.noPropagation {
+ return
+ }
+
+ if setter, ok := impl.(TConfigurationSetter); ok {
+ setter.SetTConfiguration(cfg)
+ }
+}
+
+func checkSizeForProtocol(size int32, cfg *TConfiguration) error {
+ if size < 0 {
+ return NewTProtocolExceptionWithType(
+ NEGATIVE_SIZE,
+ fmt.Errorf("negative size: %d", size),
+ )
+ }
+ if size > cfg.GetMaxMessageSize() {
+ return NewTProtocolExceptionWithType(
+ SIZE_LIMIT,
+ fmt.Errorf("size exceeded max allowed: %d", size),
+ )
+ }
+ return nil
+}
+
+type tTransportFactoryConf struct {
+ delegate TTransportFactory
+ cfg *TConfiguration
+}
+
+func (f *tTransportFactoryConf) GetTransport(orig TTransport) (TTransport, error) {
+ trans, err := f.delegate.GetTransport(orig)
+ if err == nil {
+ PropagateTConfiguration(orig, f.cfg)
+ PropagateTConfiguration(trans, f.cfg)
+ }
+ return trans, err
+}
+
+func (f *tTransportFactoryConf) SetTConfiguration(cfg *TConfiguration) {
+ PropagateTConfiguration(f.delegate, f.cfg)
+ f.cfg = cfg
+}
+
+// TTransportFactoryConf wraps a TTransportFactory to propagate
+// TConfiguration on the factory's GetTransport calls.
+func TTransportFactoryConf(delegate TTransportFactory, conf *TConfiguration) TTransportFactory {
+ return &tTransportFactoryConf{
+ delegate: delegate,
+ cfg: conf,
+ }
+}
+
+type tProtocolFactoryConf struct {
+ delegate TProtocolFactory
+ cfg *TConfiguration
+}
+
+func (f *tProtocolFactoryConf) GetProtocol(trans TTransport) TProtocol {
+ proto := f.delegate.GetProtocol(trans)
+ PropagateTConfiguration(trans, f.cfg)
+ PropagateTConfiguration(proto, f.cfg)
+ return proto
+}
+
+func (f *tProtocolFactoryConf) SetTConfiguration(cfg *TConfiguration) {
+ PropagateTConfiguration(f.delegate, f.cfg)
+ f.cfg = cfg
+}
+
+// TProtocolFactoryConf wraps a TProtocolFactory to propagate
+// TConfiguration on the factory's GetProtocol calls.
+func TProtocolFactoryConf(delegate TProtocolFactory, conf *TConfiguration) TProtocolFactory {
+ return &tProtocolFactoryConf{
+ delegate: delegate,
+ cfg: conf,
+ }
+}
+
+var (
+ _ TConfigurationSetter = (*tTransportFactoryConf)(nil)
+ _ TConfigurationSetter = (*tProtocolFactoryConf)(nil)
+)
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/processor.go b/vendor/github.com/uber/jaeger-client-go/thrift/context.go
index ca0d3faf2..d15c1bcf8 100644
--- a/vendor/github.com/uber/jaeger-client-go/thrift/processor.go
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/context.go
@@ -19,12 +19,6 @@
package thrift
-// A processor is a generic object which operates upon an input stream and
-// writes to some output stream.
-type TProcessor interface {
- Process(in, out TProtocol) (bool, TException)
-}
+import "context"
-type TProcessorFunction interface {
- Process(seqId int32, in, out TProtocol) (bool, TException)
-}
+var defaultCtx = context.Background()
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/exception.go b/vendor/github.com/uber/jaeger-client-go/thrift/exception.go
index ea8d6f661..53bf862ea 100644
--- a/vendor/github.com/uber/jaeger-client-go/thrift/exception.go
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/exception.go
@@ -26,19 +26,91 @@ import (
// Generic Thrift exception
type TException interface {
error
+
+ TExceptionType() TExceptionType
}
// Prepends additional information to an error without losing the Thrift exception interface
func PrependError(prepend string, err error) error {
- if t, ok := err.(TTransportException); ok {
- return NewTTransportException(t.TypeId(), prepend+t.Error())
+ msg := prepend + err.Error()
+
+ var te TException
+ if errors.As(err, &te) {
+ switch te.TExceptionType() {
+ case TExceptionTypeTransport:
+ if t, ok := err.(TTransportException); ok {
+ return prependTTransportException(prepend, t)
+ }
+ case TExceptionTypeProtocol:
+ if t, ok := err.(TProtocolException); ok {
+ return prependTProtocolException(prepend, t)
+ }
+ case TExceptionTypeApplication:
+ var t TApplicationException
+ if errors.As(err, &t) {
+ return NewTApplicationException(t.TypeId(), msg)
+ }
+ }
+
+ return wrappedTException{
+ err: err,
+ msg: msg,
+ tExceptionType: te.TExceptionType(),
+ }
+ }
+
+ return errors.New(msg)
+}
+
+// TExceptionType is an enum type to categorize different "subclasses" of TExceptions.
+type TExceptionType byte
+
+// TExceptionType values
+const (
+ TExceptionTypeUnknown TExceptionType = iota
+ TExceptionTypeCompiled // TExceptions defined in thrift files and generated by thrift compiler
+ TExceptionTypeApplication // TApplicationExceptions
+ TExceptionTypeProtocol // TProtocolExceptions
+ TExceptionTypeTransport // TTransportExceptions
+)
+
+// WrapTException wraps an error into TException.
+//
+// If err is nil or already TException, it's returned as-is.
+// Otherwise it will be wraped into TException with TExceptionType() returning
+// TExceptionTypeUnknown, and Unwrap() returning the original error.
+func WrapTException(err error) TException {
+ if err == nil {
+ return nil
}
- if t, ok := err.(TProtocolException); ok {
- return NewTProtocolExceptionWithType(t.TypeId(), errors.New(prepend+err.Error()))
+
+ if te, ok := err.(TException); ok {
+ return te
}
- if t, ok := err.(TApplicationException); ok {
- return NewTApplicationException(t.TypeId(), prepend+t.Error())
+
+ return wrappedTException{
+ err: err,
+ msg: err.Error(),
+ tExceptionType: TExceptionTypeUnknown,
}
+}
+
+type wrappedTException struct {
+ err error
+ msg string
+ tExceptionType TExceptionType
+}
- return errors.New(prepend + err.Error())
+func (w wrappedTException) Error() string {
+ return w.msg
}
+
+func (w wrappedTException) TExceptionType() TExceptionType {
+ return w.tExceptionType
+}
+
+func (w wrappedTException) Unwrap() error {
+ return w.err
+}
+
+var _ TException = wrappedTException{}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/header_context.go b/vendor/github.com/uber/jaeger-client-go/thrift/header_context.go
new file mode 100644
index 000000000..ac9bd4882
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/header_context.go
@@ -0,0 +1,110 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+ "context"
+)
+
+// See https://godoc.org/context#WithValue on why do we need the unexported typedefs.
+type (
+ headerKey string
+ headerKeyList int
+)
+
+// Values for headerKeyList.
+const (
+ headerKeyListRead headerKeyList = iota
+ headerKeyListWrite
+)
+
+// SetHeader sets a header in the context.
+func SetHeader(ctx context.Context, key, value string) context.Context {
+ return context.WithValue(
+ ctx,
+ headerKey(key),
+ value,
+ )
+}
+
+// UnsetHeader unsets a previously set header in the context.
+func UnsetHeader(ctx context.Context, key string) context.Context {
+ return context.WithValue(
+ ctx,
+ headerKey(key),
+ nil,
+ )
+}
+
+// GetHeader returns a value of the given header from the context.
+func GetHeader(ctx context.Context, key string) (value string, ok bool) {
+ if v := ctx.Value(headerKey(key)); v != nil {
+ value, ok = v.(string)
+ }
+ return
+}
+
+// SetReadHeaderList sets the key list of read THeaders in the context.
+func SetReadHeaderList(ctx context.Context, keys []string) context.Context {
+ return context.WithValue(
+ ctx,
+ headerKeyListRead,
+ keys,
+ )
+}
+
+// GetReadHeaderList returns the key list of read THeaders from the context.
+func GetReadHeaderList(ctx context.Context) []string {
+ if v := ctx.Value(headerKeyListRead); v != nil {
+ if value, ok := v.([]string); ok {
+ return value
+ }
+ }
+ return nil
+}
+
+// SetWriteHeaderList sets the key list of THeaders to write in the context.
+func SetWriteHeaderList(ctx context.Context, keys []string) context.Context {
+ return context.WithValue(
+ ctx,
+ headerKeyListWrite,
+ keys,
+ )
+}
+
+// GetWriteHeaderList returns the key list of THeaders to write from the context.
+func GetWriteHeaderList(ctx context.Context) []string {
+ if v := ctx.Value(headerKeyListWrite); v != nil {
+ if value, ok := v.([]string); ok {
+ return value
+ }
+ }
+ return nil
+}
+
+// AddReadTHeaderToContext adds the whole THeader headers into context.
+func AddReadTHeaderToContext(ctx context.Context, headers THeaderMap) context.Context {
+ keys := make([]string, 0, len(headers))
+ for key, value := range headers {
+ ctx = SetHeader(ctx, key, value)
+ keys = append(keys, key)
+ }
+ return SetReadHeaderList(ctx, keys)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/header_protocol.go b/vendor/github.com/uber/jaeger-client-go/thrift/header_protocol.go
new file mode 100644
index 000000000..878041f8d
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/header_protocol.go
@@ -0,0 +1,351 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+ "context"
+ "errors"
+)
+
+// THeaderProtocol is a thrift protocol that implements THeader:
+// https://github.com/apache/thrift/blob/master/doc/specs/HeaderFormat.md
+//
+// It supports either binary or compact protocol as the wrapped protocol.
+//
+// Most of the THeader handlings are happening inside THeaderTransport.
+type THeaderProtocol struct {
+ transport *THeaderTransport
+
+ // Will be initialized on first read/write.
+ protocol TProtocol
+
+ cfg *TConfiguration
+}
+
+// Deprecated: Use NewTHeaderProtocolConf instead.
+func NewTHeaderProtocol(trans TTransport) *THeaderProtocol {
+ return newTHeaderProtocolConf(trans, &TConfiguration{
+ noPropagation: true,
+ })
+}
+
+// NewTHeaderProtocolConf creates a new THeaderProtocol from the underlying
+// transport with given TConfiguration.
+//
+// The passed in transport will be wrapped with THeaderTransport.
+//
+// Note that THeaderTransport handles frame and zlib by itself,
+// so the underlying transport should be a raw socket transports (TSocket or TSSLSocket),
+// instead of rich transports like TZlibTransport or TFramedTransport.
+func NewTHeaderProtocolConf(trans TTransport, conf *TConfiguration) *THeaderProtocol {
+ return newTHeaderProtocolConf(trans, conf)
+}
+
+func newTHeaderProtocolConf(trans TTransport, cfg *TConfiguration) *THeaderProtocol {
+ t := NewTHeaderTransportConf(trans, cfg)
+ p, _ := t.cfg.GetTHeaderProtocolID().GetProtocol(t)
+ PropagateTConfiguration(p, cfg)
+ return &THeaderProtocol{
+ transport: t,
+ protocol: p,
+ cfg: cfg,
+ }
+}
+
+type tHeaderProtocolFactory struct {
+ cfg *TConfiguration
+}
+
+func (f tHeaderProtocolFactory) GetProtocol(trans TTransport) TProtocol {
+ return newTHeaderProtocolConf(trans, f.cfg)
+}
+
+func (f *tHeaderProtocolFactory) SetTConfiguration(cfg *TConfiguration) {
+ f.cfg = cfg
+}
+
+// Deprecated: Use NewTHeaderProtocolFactoryConf instead.
+func NewTHeaderProtocolFactory() TProtocolFactory {
+ return NewTHeaderProtocolFactoryConf(&TConfiguration{
+ noPropagation: true,
+ })
+}
+
+// NewTHeaderProtocolFactoryConf creates a factory for THeader with given
+// TConfiguration.
+func NewTHeaderProtocolFactoryConf(conf *TConfiguration) TProtocolFactory {
+ return tHeaderProtocolFactory{
+ cfg: conf,
+ }
+}
+
+// Transport returns the underlying transport.
+//
+// It's guaranteed to be of type *THeaderTransport.
+func (p *THeaderProtocol) Transport() TTransport {
+ return p.transport
+}
+
+// GetReadHeaders returns the THeaderMap read from transport.
+func (p *THeaderProtocol) GetReadHeaders() THeaderMap {
+ return p.transport.GetReadHeaders()
+}
+
+// SetWriteHeader sets a header for write.
+func (p *THeaderProtocol) SetWriteHeader(key, value string) {
+ p.transport.SetWriteHeader(key, value)
+}
+
+// ClearWriteHeaders clears all write headers previously set.
+func (p *THeaderProtocol) ClearWriteHeaders() {
+ p.transport.ClearWriteHeaders()
+}
+
+// AddTransform add a transform for writing.
+func (p *THeaderProtocol) AddTransform(transform THeaderTransformID) error {
+ return p.transport.AddTransform(transform)
+}
+
+func (p *THeaderProtocol) Flush(ctx context.Context) error {
+ return p.transport.Flush(ctx)
+}
+
+func (p *THeaderProtocol) WriteMessageBegin(ctx context.Context, name string, typeID TMessageType, seqID int32) error {
+ newProto, err := p.transport.Protocol().GetProtocol(p.transport)
+ if err != nil {
+ return err
+ }
+ PropagateTConfiguration(newProto, p.cfg)
+ p.protocol = newProto
+ p.transport.SequenceID = seqID
+ return p.protocol.WriteMessageBegin(ctx, name, typeID, seqID)
+}
+
+func (p *THeaderProtocol) WriteMessageEnd(ctx context.Context) error {
+ if err := p.protocol.WriteMessageEnd(ctx); err != nil {
+ return err
+ }
+ return p.transport.Flush(ctx)
+}
+
+func (p *THeaderProtocol) WriteStructBegin(ctx context.Context, name string) error {
+ return p.protocol.WriteStructBegin(ctx, name)
+}
+
+func (p *THeaderProtocol) WriteStructEnd(ctx context.Context) error {
+ return p.protocol.WriteStructEnd(ctx)
+}
+
+func (p *THeaderProtocol) WriteFieldBegin(ctx context.Context, name string, typeID TType, id int16) error {
+ return p.protocol.WriteFieldBegin(ctx, name, typeID, id)
+}
+
+func (p *THeaderProtocol) WriteFieldEnd(ctx context.Context) error {
+ return p.protocol.WriteFieldEnd(ctx)
+}
+
+func (p *THeaderProtocol) WriteFieldStop(ctx context.Context) error {
+ return p.protocol.WriteFieldStop(ctx)
+}
+
+func (p *THeaderProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error {
+ return p.protocol.WriteMapBegin(ctx, keyType, valueType, size)
+}
+
+func (p *THeaderProtocol) WriteMapEnd(ctx context.Context) error {
+ return p.protocol.WriteMapEnd(ctx)
+}
+
+func (p *THeaderProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error {
+ return p.protocol.WriteListBegin(ctx, elemType, size)
+}
+
+func (p *THeaderProtocol) WriteListEnd(ctx context.Context) error {
+ return p.protocol.WriteListEnd(ctx)
+}
+
+func (p *THeaderProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error {
+ return p.protocol.WriteSetBegin(ctx, elemType, size)
+}
+
+func (p *THeaderProtocol) WriteSetEnd(ctx context.Context) error {
+ return p.protocol.WriteSetEnd(ctx)
+}
+
+func (p *THeaderProtocol) WriteBool(ctx context.Context, value bool) error {
+ return p.protocol.WriteBool(ctx, value)
+}
+
+func (p *THeaderProtocol) WriteByte(ctx context.Context, value int8) error {
+ return p.protocol.WriteByte(ctx, value)
+}
+
+func (p *THeaderProtocol) WriteI16(ctx context.Context, value int16) error {
+ return p.protocol.WriteI16(ctx, value)
+}
+
+func (p *THeaderProtocol) WriteI32(ctx context.Context, value int32) error {
+ return p.protocol.WriteI32(ctx, value)
+}
+
+func (p *THeaderProtocol) WriteI64(ctx context.Context, value int64) error {
+ return p.protocol.WriteI64(ctx, value)
+}
+
+func (p *THeaderProtocol) WriteDouble(ctx context.Context, value float64) error {
+ return p.protocol.WriteDouble(ctx, value)
+}
+
+func (p *THeaderProtocol) WriteString(ctx context.Context, value string) error {
+ return p.protocol.WriteString(ctx, value)
+}
+
+func (p *THeaderProtocol) WriteBinary(ctx context.Context, value []byte) error {
+ return p.protocol.WriteBinary(ctx, value)
+}
+
+// ReadFrame calls underlying THeaderTransport's ReadFrame function.
+func (p *THeaderProtocol) ReadFrame(ctx context.Context) error {
+ return p.transport.ReadFrame(ctx)
+}
+
+func (p *THeaderProtocol) ReadMessageBegin(ctx context.Context) (name string, typeID TMessageType, seqID int32, err error) {
+ if err = p.transport.ReadFrame(ctx); err != nil {
+ return
+ }
+
+ var newProto TProtocol
+ newProto, err = p.transport.Protocol().GetProtocol(p.transport)
+ if err != nil {
+ var tAppExc TApplicationException
+ if !errors.As(err, &tAppExc) {
+ return
+ }
+ if e := p.protocol.WriteMessageBegin(ctx, "", EXCEPTION, seqID); e != nil {
+ return
+ }
+ if e := tAppExc.Write(ctx, p.protocol); e != nil {
+ return
+ }
+ if e := p.protocol.WriteMessageEnd(ctx); e != nil {
+ return
+ }
+ if e := p.transport.Flush(ctx); e != nil {
+ return
+ }
+ return
+ }
+ PropagateTConfiguration(newProto, p.cfg)
+ p.protocol = newProto
+
+ return p.protocol.ReadMessageBegin(ctx)
+}
+
+func (p *THeaderProtocol) ReadMessageEnd(ctx context.Context) error {
+ return p.protocol.ReadMessageEnd(ctx)
+}
+
+func (p *THeaderProtocol) ReadStructBegin(ctx context.Context) (name string, err error) {
+ return p.protocol.ReadStructBegin(ctx)
+}
+
+func (p *THeaderProtocol) ReadStructEnd(ctx context.Context) error {
+ return p.protocol.ReadStructEnd(ctx)
+}
+
+func (p *THeaderProtocol) ReadFieldBegin(ctx context.Context) (name string, typeID TType, id int16, err error) {
+ return p.protocol.ReadFieldBegin(ctx)
+}
+
+func (p *THeaderProtocol) ReadFieldEnd(ctx context.Context) error {
+ return p.protocol.ReadFieldEnd(ctx)
+}
+
+func (p *THeaderProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error) {
+ return p.protocol.ReadMapBegin(ctx)
+}
+
+func (p *THeaderProtocol) ReadMapEnd(ctx context.Context) error {
+ return p.protocol.ReadMapEnd(ctx)
+}
+
+func (p *THeaderProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) {
+ return p.protocol.ReadListBegin(ctx)
+}
+
+func (p *THeaderProtocol) ReadListEnd(ctx context.Context) error {
+ return p.protocol.ReadListEnd(ctx)
+}
+
+func (p *THeaderProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) {
+ return p.protocol.ReadSetBegin(ctx)
+}
+
+func (p *THeaderProtocol) ReadSetEnd(ctx context.Context) error {
+ return p.protocol.ReadSetEnd(ctx)
+}
+
+func (p *THeaderProtocol) ReadBool(ctx context.Context) (value bool, err error) {
+ return p.protocol.ReadBool(ctx)
+}
+
+func (p *THeaderProtocol) ReadByte(ctx context.Context) (value int8, err error) {
+ return p.protocol.ReadByte(ctx)
+}
+
+func (p *THeaderProtocol) ReadI16(ctx context.Context) (value int16, err error) {
+ return p.protocol.ReadI16(ctx)
+}
+
+func (p *THeaderProtocol) ReadI32(ctx context.Context) (value int32, err error) {
+ return p.protocol.ReadI32(ctx)
+}
+
+func (p *THeaderProtocol) ReadI64(ctx context.Context) (value int64, err error) {
+ return p.protocol.ReadI64(ctx)
+}
+
+func (p *THeaderProtocol) ReadDouble(ctx context.Context) (value float64, err error) {
+ return p.protocol.ReadDouble(ctx)
+}
+
+func (p *THeaderProtocol) ReadString(ctx context.Context) (value string, err error) {
+ return p.protocol.ReadString(ctx)
+}
+
+func (p *THeaderProtocol) ReadBinary(ctx context.Context) (value []byte, err error) {
+ return p.protocol.ReadBinary(ctx)
+}
+
+func (p *THeaderProtocol) Skip(ctx context.Context, fieldType TType) error {
+ return p.protocol.Skip(ctx, fieldType)
+}
+
+// SetTConfiguration implements TConfigurationSetter.
+func (p *THeaderProtocol) SetTConfiguration(cfg *TConfiguration) {
+ PropagateTConfiguration(p.transport, cfg)
+ PropagateTConfiguration(p.protocol, cfg)
+ p.cfg = cfg
+}
+
+var (
+ _ TConfigurationSetter = (*tHeaderProtocolFactory)(nil)
+ _ TConfigurationSetter = (*THeaderProtocol)(nil)
+)
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/header_transport.go b/vendor/github.com/uber/jaeger-client-go/thrift/header_transport.go
new file mode 100644
index 000000000..f5736df42
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/header_transport.go
@@ -0,0 +1,810 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+ "bufio"
+ "bytes"
+ "compress/zlib"
+ "context"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+)
+
+// Size in bytes for 32-bit ints.
+const size32 = 4
+
+type headerMeta struct {
+ MagicFlags uint32
+ SequenceID int32
+ HeaderLength uint16
+}
+
+const headerMetaSize = 10
+
+type clientType int
+
+const (
+ clientUnknown clientType = iota
+ clientHeaders
+ clientFramedBinary
+ clientUnframedBinary
+ clientFramedCompact
+ clientUnframedCompact
+)
+
+// Constants defined in THeader format:
+// https://github.com/apache/thrift/blob/master/doc/specs/HeaderFormat.md
+const (
+ THeaderHeaderMagic uint32 = 0x0fff0000
+ THeaderHeaderMask uint32 = 0xffff0000
+ THeaderFlagsMask uint32 = 0x0000ffff
+ THeaderMaxFrameSize uint32 = 0x3fffffff
+)
+
+// THeaderMap is the type of the header map in THeader transport.
+type THeaderMap map[string]string
+
+// THeaderProtocolID is the wrapped protocol id used in THeader.
+type THeaderProtocolID int32
+
+// Supported THeaderProtocolID values.
+const (
+ THeaderProtocolBinary THeaderProtocolID = 0x00
+ THeaderProtocolCompact THeaderProtocolID = 0x02
+ THeaderProtocolDefault = THeaderProtocolBinary
+)
+
+// Declared globally to avoid repetitive allocations, not really used.
+var globalMemoryBuffer = NewTMemoryBuffer()
+
+// Validate checks whether the THeaderProtocolID is a valid/supported one.
+func (id THeaderProtocolID) Validate() error {
+ _, err := id.GetProtocol(globalMemoryBuffer)
+ return err
+}
+
+// GetProtocol gets the corresponding TProtocol from the wrapped protocol id.
+func (id THeaderProtocolID) GetProtocol(trans TTransport) (TProtocol, error) {
+ switch id {
+ default:
+ return nil, NewTApplicationException(
+ INVALID_PROTOCOL,
+ fmt.Sprintf("THeader protocol id %d not supported", id),
+ )
+ case THeaderProtocolBinary:
+ return NewTBinaryProtocolTransport(trans), nil
+ case THeaderProtocolCompact:
+ return NewTCompactProtocol(trans), nil
+ }
+}
+
+// THeaderTransformID defines the numeric id of the transform used.
+type THeaderTransformID int32
+
+// THeaderTransformID values.
+//
+// Values not defined here are not currently supported, namely HMAC and Snappy.
+const (
+ TransformNone THeaderTransformID = iota // 0, no special handling
+ TransformZlib // 1, zlib
+)
+
+var supportedTransformIDs = map[THeaderTransformID]bool{
+ TransformNone: true,
+ TransformZlib: true,
+}
+
+// TransformReader is an io.ReadCloser that handles transforms reading.
+type TransformReader struct {
+ io.Reader
+
+ closers []io.Closer
+}
+
+var _ io.ReadCloser = (*TransformReader)(nil)
+
+// NewTransformReaderWithCapacity initializes a TransformReader with expected
+// closers capacity.
+//
+// If you don't know the closers capacity beforehand, just use
+//
+// &TransformReader{Reader: baseReader}
+//
+// instead would be sufficient.
+func NewTransformReaderWithCapacity(baseReader io.Reader, capacity int) *TransformReader {
+ return &TransformReader{
+ Reader: baseReader,
+ closers: make([]io.Closer, 0, capacity),
+ }
+}
+
+// Close calls the underlying closers in appropriate order,
+// stops at and returns the first error encountered.
+func (tr *TransformReader) Close() error {
+ // Call closers in reversed order
+ for i := len(tr.closers) - 1; i >= 0; i-- {
+ if err := tr.closers[i].Close(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// AddTransform adds a transform.
+func (tr *TransformReader) AddTransform(id THeaderTransformID) error {
+ switch id {
+ default:
+ return NewTApplicationException(
+ INVALID_TRANSFORM,
+ fmt.Sprintf("THeaderTransformID %d not supported", id),
+ )
+ case TransformNone:
+ // no-op
+ case TransformZlib:
+ readCloser, err := zlib.NewReader(tr.Reader)
+ if err != nil {
+ return err
+ }
+ tr.Reader = readCloser
+ tr.closers = append(tr.closers, readCloser)
+ }
+ return nil
+}
+
+// TransformWriter is an io.WriteCloser that handles transforms writing.
+type TransformWriter struct {
+ io.Writer
+
+ closers []io.Closer
+}
+
+var _ io.WriteCloser = (*TransformWriter)(nil)
+
+// NewTransformWriter creates a new TransformWriter with base writer and transforms.
+func NewTransformWriter(baseWriter io.Writer, transforms []THeaderTransformID) (io.WriteCloser, error) {
+ writer := &TransformWriter{
+ Writer: baseWriter,
+ closers: make([]io.Closer, 0, len(transforms)),
+ }
+ for _, id := range transforms {
+ if err := writer.AddTransform(id); err != nil {
+ return nil, err
+ }
+ }
+ return writer, nil
+}
+
+// Close calls the underlying closers in appropriate order,
+// stops at and returns the first error encountered.
+func (tw *TransformWriter) Close() error {
+ // Call closers in reversed order
+ for i := len(tw.closers) - 1; i >= 0; i-- {
+ if err := tw.closers[i].Close(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// AddTransform adds a transform.
+func (tw *TransformWriter) AddTransform(id THeaderTransformID) error {
+ switch id {
+ default:
+ return NewTApplicationException(
+ INVALID_TRANSFORM,
+ fmt.Sprintf("THeaderTransformID %d not supported", id),
+ )
+ case TransformNone:
+ // no-op
+ case TransformZlib:
+ writeCloser := zlib.NewWriter(tw.Writer)
+ tw.Writer = writeCloser
+ tw.closers = append(tw.closers, writeCloser)
+ }
+ return nil
+}
+
+// THeaderInfoType is the type id of the info headers.
+type THeaderInfoType int32
+
+// Supported THeaderInfoType values.
+const (
+ _ THeaderInfoType = iota // Skip 0
+ InfoKeyValue // 1
+ // Rest of the info types are not supported.
+)
+
+// THeaderTransport is a Transport mode that implements THeader.
+//
+// Note that THeaderTransport handles frame and zlib by itself,
+// so the underlying transport should be a raw socket transports (TSocket or TSSLSocket),
+// instead of rich transports like TZlibTransport or TFramedTransport.
+type THeaderTransport struct {
+ SequenceID int32
+ Flags uint32
+
+ transport TTransport
+
+ // THeaderMap for read and write
+ readHeaders THeaderMap
+ writeHeaders THeaderMap
+
+ // Reading related variables.
+ reader *bufio.Reader
+ // When frame is detected, we read the frame fully into frameBuffer.
+ frameBuffer bytes.Buffer
+ // When it's non-nil, Read should read from frameReader instead of
+ // reader, and EOF error indicates end of frame instead of end of all
+ // transport.
+ frameReader io.ReadCloser
+
+ // Writing related variables
+ writeBuffer bytes.Buffer
+ writeTransforms []THeaderTransformID
+
+ clientType clientType
+ protocolID THeaderProtocolID
+ cfg *TConfiguration
+
+ // buffer is used in the following scenarios to avoid repetitive
+ // allocations, while 4 is big enough for all those scenarios:
+ //
+ // * header padding (max size 4)
+ // * write the frame size (size 4)
+ buffer [4]byte
+}
+
+var _ TTransport = (*THeaderTransport)(nil)
+
+// Deprecated: Use NewTHeaderTransportConf instead.
+func NewTHeaderTransport(trans TTransport) *THeaderTransport {
+ return NewTHeaderTransportConf(trans, &TConfiguration{
+ noPropagation: true,
+ })
+}
+
+// NewTHeaderTransportConf creates THeaderTransport from the
+// underlying transport, with given TConfiguration attached.
+//
+// If trans is already a *THeaderTransport, it will be returned as is,
+// but with TConfiguration overridden by the value passed in.
+//
+// The protocol ID in TConfiguration is only useful for client transports.
+// For servers,
+// the protocol ID will be overridden again to the one set by the client,
+// to ensure that servers always speak the same dialect as the client.
+func NewTHeaderTransportConf(trans TTransport, conf *TConfiguration) *THeaderTransport {
+ if ht, ok := trans.(*THeaderTransport); ok {
+ ht.SetTConfiguration(conf)
+ return ht
+ }
+ PropagateTConfiguration(trans, conf)
+ return &THeaderTransport{
+ transport: trans,
+ reader: bufio.NewReader(trans),
+ writeHeaders: make(THeaderMap),
+ protocolID: conf.GetTHeaderProtocolID(),
+ cfg: conf,
+ }
+}
+
+// Open calls the underlying transport's Open function.
+func (t *THeaderTransport) Open() error {
+ return t.transport.Open()
+}
+
+// IsOpen calls the underlying transport's IsOpen function.
+func (t *THeaderTransport) IsOpen() bool {
+ return t.transport.IsOpen()
+}
+
+// ReadFrame tries to read the frame header, guess the client type, and handle
+// unframed clients.
+func (t *THeaderTransport) ReadFrame(ctx context.Context) error {
+ if !t.needReadFrame() {
+ // No need to read frame, skipping.
+ return nil
+ }
+
+ // Peek and handle the first 32 bits.
+ // They could either be the length field of a framed message,
+ // or the first bytes of an unframed message.
+ var buf []byte
+ var err error
+ // This is also usually the first read from a connection,
+ // so handle retries around socket timeouts.
+ _, deadlineSet := ctx.Deadline()
+ for {
+ buf, err = t.reader.Peek(size32)
+ if deadlineSet && isTimeoutError(err) && ctx.Err() == nil {
+ // This is I/O timeout and we still have time,
+ // continue trying
+ continue
+ }
+ // For anything else, do not retry
+ break
+ }
+ if err != nil {
+ return err
+ }
+
+ frameSize := binary.BigEndian.Uint32(buf)
+ if frameSize&VERSION_MASK == VERSION_1 {
+ t.clientType = clientUnframedBinary
+ return nil
+ }
+ if buf[0] == COMPACT_PROTOCOL_ID && buf[1]&COMPACT_VERSION_MASK == COMPACT_VERSION {
+ t.clientType = clientUnframedCompact
+ return nil
+ }
+
+ // At this point it should be a framed message,
+ // sanity check on frameSize then discard the peeked part.
+ if frameSize > THeaderMaxFrameSize || frameSize > uint32(t.cfg.GetMaxFrameSize()) {
+ return NewTProtocolExceptionWithType(
+ SIZE_LIMIT,
+ errors.New("frame too large"),
+ )
+ }
+ t.reader.Discard(size32)
+
+ // Read the frame fully into frameBuffer.
+ _, err = io.CopyN(&t.frameBuffer, t.reader, int64(frameSize))
+ if err != nil {
+ return err
+ }
+ t.frameReader = ioutil.NopCloser(&t.frameBuffer)
+
+ // Peek and handle the next 32 bits.
+ buf = t.frameBuffer.Bytes()[:size32]
+ version := binary.BigEndian.Uint32(buf)
+ if version&THeaderHeaderMask == THeaderHeaderMagic {
+ t.clientType = clientHeaders
+ return t.parseHeaders(ctx, frameSize)
+ }
+ if version&VERSION_MASK == VERSION_1 {
+ t.clientType = clientFramedBinary
+ return nil
+ }
+ if buf[0] == COMPACT_PROTOCOL_ID && buf[1]&COMPACT_VERSION_MASK == COMPACT_VERSION {
+ t.clientType = clientFramedCompact
+ return nil
+ }
+ if err := t.endOfFrame(); err != nil {
+ return err
+ }
+ return NewTProtocolExceptionWithType(
+ NOT_IMPLEMENTED,
+ errors.New("unsupported client transport type"),
+ )
+}
+
+// endOfFrame does end of frame handling.
+//
+// It closes frameReader, and also resets frame related states.
+func (t *THeaderTransport) endOfFrame() error {
+ defer func() {
+ t.frameBuffer.Reset()
+ t.frameReader = nil
+ }()
+ return t.frameReader.Close()
+}
+
+func (t *THeaderTransport) parseHeaders(ctx context.Context, frameSize uint32) error {
+ if t.clientType != clientHeaders {
+ return nil
+ }
+
+ var err error
+ var meta headerMeta
+ if err = binary.Read(&t.frameBuffer, binary.BigEndian, &meta); err != nil {
+ return err
+ }
+ frameSize -= headerMetaSize
+ t.Flags = meta.MagicFlags & THeaderFlagsMask
+ t.SequenceID = meta.SequenceID
+ headerLength := int64(meta.HeaderLength) * 4
+ if int64(frameSize) < headerLength {
+ return NewTProtocolExceptionWithType(
+ SIZE_LIMIT,
+ errors.New("header size is larger than the whole frame"),
+ )
+ }
+ headerBuf := NewTMemoryBuffer()
+ _, err = io.CopyN(headerBuf, &t.frameBuffer, headerLength)
+ if err != nil {
+ return err
+ }
+ hp := NewTCompactProtocol(headerBuf)
+ hp.SetTConfiguration(t.cfg)
+
+ // At this point the header is already read into headerBuf,
+ // and t.frameBuffer starts from the actual payload.
+ protoID, err := hp.readVarint32()
+ if err != nil {
+ return err
+ }
+ t.protocolID = THeaderProtocolID(protoID)
+
+ var transformCount int32
+ transformCount, err = hp.readVarint32()
+ if err != nil {
+ return err
+ }
+ if transformCount > 0 {
+ reader := NewTransformReaderWithCapacity(
+ &t.frameBuffer,
+ int(transformCount),
+ )
+ t.frameReader = reader
+ transformIDs := make([]THeaderTransformID, transformCount)
+ for i := 0; i < int(transformCount); i++ {
+ id, err := hp.readVarint32()
+ if err != nil {
+ return err
+ }
+ transformIDs[i] = THeaderTransformID(id)
+ }
+ // The transform IDs on the wire was added based on the order of
+ // writing, so on the reading side we need to reverse the order.
+ for i := transformCount - 1; i >= 0; i-- {
+ id := transformIDs[i]
+ if err := reader.AddTransform(id); err != nil {
+ return err
+ }
+ }
+ }
+
+ // The info part does not use the transforms yet, so it's
+ // important to continue using headerBuf.
+ headers := make(THeaderMap)
+ for {
+ infoType, err := hp.readVarint32()
+ if errors.Is(err, io.EOF) {
+ break
+ }
+ if err != nil {
+ return err
+ }
+ if THeaderInfoType(infoType) == InfoKeyValue {
+ count, err := hp.readVarint32()
+ if err != nil {
+ return err
+ }
+ for i := 0; i < int(count); i++ {
+ key, err := hp.ReadString(ctx)
+ if err != nil {
+ return err
+ }
+ value, err := hp.ReadString(ctx)
+ if err != nil {
+ return err
+ }
+ headers[key] = value
+ }
+ } else {
+ // Skip reading info section on the first
+ // unsupported info type.
+ break
+ }
+ }
+ t.readHeaders = headers
+
+ return nil
+}
+
+func (t *THeaderTransport) needReadFrame() bool {
+ if t.clientType == clientUnknown {
+ // This is a new connection that's never read before.
+ return true
+ }
+ if t.isFramed() && t.frameReader == nil {
+ // We just finished the last frame.
+ return true
+ }
+ return false
+}
+
+func (t *THeaderTransport) Read(p []byte) (read int, err error) {
+ // Here using context.Background instead of a context passed in is safe.
+ // First is that there's no way to pass context into this function.
+ // Then, 99% of the case when calling this Read frame is already read
+ // into frameReader. ReadFrame here is more of preventing bugs that
+ // didn't call ReadFrame before calling Read.
+ err = t.ReadFrame(context.Background())
+ if err != nil {
+ return
+ }
+ if t.frameReader != nil {
+ read, err = t.frameReader.Read(p)
+ if err == nil && t.frameBuffer.Len() <= 0 {
+ // the last Read finished the frame, do endOfFrame
+ // handling here.
+ err = t.endOfFrame()
+ } else if err == io.EOF {
+ err = t.endOfFrame()
+ if err != nil {
+ return
+ }
+ if read == 0 {
+ // Try to read the next frame when we hit EOF
+ // (end of frame) immediately.
+ // When we got here, it means the last read
+ // finished the previous frame, but didn't
+ // do endOfFrame handling yet.
+ // We have to read the next frame here,
+ // as otherwise we would return 0 and nil,
+ // which is a case not handled well by most
+ // protocol implementations.
+ return t.Read(p)
+ }
+ }
+ return
+ }
+ return t.reader.Read(p)
+}
+
+// Write writes data to the write buffer.
+//
+// You need to call Flush to actually write them to the transport.
+func (t *THeaderTransport) Write(p []byte) (int, error) {
+ return t.writeBuffer.Write(p)
+}
+
+// Flush writes the appropriate header and the write buffer to the underlying transport.
+func (t *THeaderTransport) Flush(ctx context.Context) error {
+ if t.writeBuffer.Len() == 0 {
+ return nil
+ }
+
+ defer t.writeBuffer.Reset()
+
+ switch t.clientType {
+ default:
+ fallthrough
+ case clientUnknown:
+ t.clientType = clientHeaders
+ fallthrough
+ case clientHeaders:
+ headers := NewTMemoryBuffer()
+ hp := NewTCompactProtocol(headers)
+ hp.SetTConfiguration(t.cfg)
+ if _, err := hp.writeVarint32(int32(t.protocolID)); err != nil {
+ return NewTTransportExceptionFromError(err)
+ }
+ if _, err := hp.writeVarint32(int32(len(t.writeTransforms))); err != nil {
+ return NewTTransportExceptionFromError(err)
+ }
+ for _, transform := range t.writeTransforms {
+ if _, err := hp.writeVarint32(int32(transform)); err != nil {
+ return NewTTransportExceptionFromError(err)
+ }
+ }
+ if len(t.writeHeaders) > 0 {
+ if _, err := hp.writeVarint32(int32(InfoKeyValue)); err != nil {
+ return NewTTransportExceptionFromError(err)
+ }
+ if _, err := hp.writeVarint32(int32(len(t.writeHeaders))); err != nil {
+ return NewTTransportExceptionFromError(err)
+ }
+ for key, value := range t.writeHeaders {
+ if err := hp.WriteString(ctx, key); err != nil {
+ return NewTTransportExceptionFromError(err)
+ }
+ if err := hp.WriteString(ctx, value); err != nil {
+ return NewTTransportExceptionFromError(err)
+ }
+ }
+ }
+ padding := 4 - headers.Len()%4
+ if padding < 4 {
+ buf := t.buffer[:padding]
+ for i := range buf {
+ buf[i] = 0
+ }
+ if _, err := headers.Write(buf); err != nil {
+ return NewTTransportExceptionFromError(err)
+ }
+ }
+
+ var payload bytes.Buffer
+ meta := headerMeta{
+ MagicFlags: THeaderHeaderMagic + t.Flags&THeaderFlagsMask,
+ SequenceID: t.SequenceID,
+ HeaderLength: uint16(headers.Len() / 4),
+ }
+ if err := binary.Write(&payload, binary.BigEndian, meta); err != nil {
+ return NewTTransportExceptionFromError(err)
+ }
+ if _, err := io.Copy(&payload, headers); err != nil {
+ return NewTTransportExceptionFromError(err)
+ }
+
+ writer, err := NewTransformWriter(&payload, t.writeTransforms)
+ if err != nil {
+ return NewTTransportExceptionFromError(err)
+ }
+ if _, err := io.Copy(writer, &t.writeBuffer); err != nil {
+ return NewTTransportExceptionFromError(err)
+ }
+ if err := writer.Close(); err != nil {
+ return NewTTransportExceptionFromError(err)
+ }
+
+ // First write frame length
+ buf := t.buffer[:size32]
+ binary.BigEndian.PutUint32(buf, uint32(payload.Len()))
+ if _, err := t.transport.Write(buf); err != nil {
+ return NewTTransportExceptionFromError(err)
+ }
+ // Then write the payload
+ if _, err := io.Copy(t.transport, &payload); err != nil {
+ return NewTTransportExceptionFromError(err)
+ }
+
+ case clientFramedBinary, clientFramedCompact:
+ buf := t.buffer[:size32]
+ binary.BigEndian.PutUint32(buf, uint32(t.writeBuffer.Len()))
+ if _, err := t.transport.Write(buf); err != nil {
+ return NewTTransportExceptionFromError(err)
+ }
+ fallthrough
+ case clientUnframedBinary, clientUnframedCompact:
+ if _, err := io.Copy(t.transport, &t.writeBuffer); err != nil {
+ return NewTTransportExceptionFromError(err)
+ }
+ }
+
+ select {
+ default:
+ case <-ctx.Done():
+ return NewTTransportExceptionFromError(ctx.Err())
+ }
+
+ return t.transport.Flush(ctx)
+}
+
+// Close closes the transport, along with its underlying transport.
+func (t *THeaderTransport) Close() error {
+ if err := t.Flush(context.Background()); err != nil {
+ return err
+ }
+ return t.transport.Close()
+}
+
+// RemainingBytes calls underlying transport's RemainingBytes.
+//
+// Even in framed cases, because of all the possible compression transforms
+// involved, the remaining frame size is likely to be different from the actual
+// remaining readable bytes, so we don't bother to keep tracking the remaining
+// frame size by ourselves and just use the underlying transport's
+// RemainingBytes directly.
+func (t *THeaderTransport) RemainingBytes() uint64 {
+ return t.transport.RemainingBytes()
+}
+
+// GetReadHeaders returns the THeaderMap read from transport.
+func (t *THeaderTransport) GetReadHeaders() THeaderMap {
+ return t.readHeaders
+}
+
+// SetWriteHeader sets a header for write.
+func (t *THeaderTransport) SetWriteHeader(key, value string) {
+ t.writeHeaders[key] = value
+}
+
+// ClearWriteHeaders clears all write headers previously set.
+func (t *THeaderTransport) ClearWriteHeaders() {
+ t.writeHeaders = make(THeaderMap)
+}
+
+// AddTransform add a transform for writing.
+func (t *THeaderTransport) AddTransform(transform THeaderTransformID) error {
+ if !supportedTransformIDs[transform] {
+ return NewTProtocolExceptionWithType(
+ NOT_IMPLEMENTED,
+ fmt.Errorf("THeaderTransformID %d not supported", transform),
+ )
+ }
+ t.writeTransforms = append(t.writeTransforms, transform)
+ return nil
+}
+
+// Protocol returns the wrapped protocol id used in this THeaderTransport.
+func (t *THeaderTransport) Protocol() THeaderProtocolID {
+ switch t.clientType {
+ default:
+ return t.protocolID
+ case clientFramedBinary, clientUnframedBinary:
+ return THeaderProtocolBinary
+ case clientFramedCompact, clientUnframedCompact:
+ return THeaderProtocolCompact
+ }
+}
+
+func (t *THeaderTransport) isFramed() bool {
+ switch t.clientType {
+ default:
+ return false
+ case clientHeaders, clientFramedBinary, clientFramedCompact:
+ return true
+ }
+}
+
+// SetTConfiguration implements TConfigurationSetter.
+func (t *THeaderTransport) SetTConfiguration(cfg *TConfiguration) {
+ PropagateTConfiguration(t.transport, cfg)
+ t.cfg = cfg
+}
+
+// THeaderTransportFactory is a TTransportFactory implementation to create
+// THeaderTransport.
+//
+// It also implements TConfigurationSetter.
+type THeaderTransportFactory struct {
+ // The underlying factory, could be nil.
+ Factory TTransportFactory
+
+ cfg *TConfiguration
+}
+
+// Deprecated: Use NewTHeaderTransportFactoryConf instead.
+func NewTHeaderTransportFactory(factory TTransportFactory) TTransportFactory {
+ return NewTHeaderTransportFactoryConf(factory, &TConfiguration{
+ noPropagation: true,
+ })
+}
+
+// NewTHeaderTransportFactoryConf creates a new *THeaderTransportFactory with
+// the given *TConfiguration.
+func NewTHeaderTransportFactoryConf(factory TTransportFactory, conf *TConfiguration) TTransportFactory {
+ return &THeaderTransportFactory{
+ Factory: factory,
+
+ cfg: conf,
+ }
+}
+
+// GetTransport implements TTransportFactory.
+func (f *THeaderTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
+ if f.Factory != nil {
+ t, err := f.Factory.GetTransport(trans)
+ if err != nil {
+ return nil, err
+ }
+ return NewTHeaderTransportConf(t, f.cfg), nil
+ }
+ return NewTHeaderTransportConf(trans, f.cfg), nil
+}
+
+// SetTConfiguration implements TConfigurationSetter.
+func (f *THeaderTransportFactory) SetTConfiguration(cfg *TConfiguration) {
+ PropagateTConfiguration(f.Factory, f.cfg)
+ f.cfg = cfg
+}
+
+var (
+ _ TConfigurationSetter = (*THeaderTransportFactory)(nil)
+ _ TConfigurationSetter = (*THeaderTransport)(nil)
+)
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/logger.go b/vendor/github.com/uber/jaeger-client-go/thrift/logger.go
new file mode 100644
index 000000000..50d44ec8e
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/logger.go
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+ "log"
+ "os"
+)
+
+// Logger is a simple wrapper of a logging function.
+//
+// In reality the users might actually use different logging libraries, and they
+// are not always compatible with each other.
+//
+// Logger is meant to be a simple common ground that it's easy to wrap whatever
+// logging library they use into.
+//
+// See https://issues.apache.org/jira/browse/THRIFT-4985 for the design
+// discussion behind it.
+type Logger func(msg string)
+
+// NopLogger is a Logger implementation that does nothing.
+func NopLogger(msg string) {}
+
+// StdLogger wraps stdlib log package into a Logger.
+//
+// If logger passed in is nil, it will fallback to use stderr and default flags.
+func StdLogger(logger *log.Logger) Logger {
+ if logger == nil {
+ logger = log.New(os.Stderr, "", log.LstdFlags)
+ }
+ return func(msg string) {
+ logger.Print(msg)
+ }
+}
+
+func fallbackLogger(logger Logger) Logger {
+ if logger == nil {
+ return StdLogger(nil)
+ }
+ return logger
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/memory_buffer.go b/vendor/github.com/uber/jaeger-client-go/thrift/memory_buffer.go
index b62fd56f0..5936d2730 100644
--- a/vendor/github.com/uber/jaeger-client-go/thrift/memory_buffer.go
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/memory_buffer.go
@@ -21,6 +21,7 @@ package thrift
import (
"bytes"
+ "context"
)
// Memory buffer-based implementation of the TTransport interface.
@@ -33,14 +34,14 @@ type TMemoryBufferTransportFactory struct {
size int
}
-func (p *TMemoryBufferTransportFactory) GetTransport(trans TTransport) TTransport {
+func (p *TMemoryBufferTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
if trans != nil {
t, ok := trans.(*TMemoryBuffer)
if ok && t.size > 0 {
- return NewTMemoryBufferLen(t.size)
+ return NewTMemoryBufferLen(t.size), nil
}
}
- return NewTMemoryBufferLen(p.size)
+ return NewTMemoryBufferLen(p.size), nil
}
func NewTMemoryBufferTransportFactory(size int) *TMemoryBufferTransportFactory {
@@ -70,7 +71,7 @@ func (p *TMemoryBuffer) Close() error {
}
// Flushing a memory buffer is a no-op
-func (p *TMemoryBuffer) Flush() error {
+func (p *TMemoryBuffer) Flush(ctx context.Context) error {
return nil
}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/numeric.go b/vendor/github.com/uber/jaeger-client-go/thrift/numeric.go
index aa8daa9b5..e4512d204 100644
--- a/vendor/github.com/uber/jaeger-client-go/thrift/numeric.go
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/numeric.go
@@ -69,14 +69,14 @@ func NewNumericFromDouble(dValue float64) Numeric {
func NewNumericFromI64(iValue int64) Numeric {
dValue := float64(iValue)
- sValue := string(iValue)
+ sValue := strconv.FormatInt(iValue, 10)
isNil := false
return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil}
}
func NewNumericFromI32(iValue int32) Numeric {
dValue := float64(iValue)
- sValue := string(iValue)
+ sValue := strconv.FormatInt(int64(iValue), 10)
isNil := false
return &numeric{iValue: int64(iValue), dValue: dValue, sValue: sValue, isNil: isNil}
}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/processor_factory.go b/vendor/github.com/uber/jaeger-client-go/thrift/processor_factory.go
new file mode 100644
index 000000000..245a3ccfc
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/processor_factory.go
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import "context"
+
+// A processor is a generic object which operates upon an input stream and
+// writes to some output stream.
+type TProcessor interface {
+ Process(ctx context.Context, in, out TProtocol) (bool, TException)
+
+ // ProcessorMap returns a map of thrift method names to TProcessorFunctions.
+ ProcessorMap() map[string]TProcessorFunction
+
+ // AddToProcessorMap adds the given TProcessorFunction to the internal
+ // processor map at the given key.
+ //
+ // If one is already set at the given key, it will be replaced with the new
+ // TProcessorFunction.
+ AddToProcessorMap(string, TProcessorFunction)
+}
+
+type TProcessorFunction interface {
+ Process(ctx context.Context, seqId int32, in, out TProtocol) (bool, TException)
+}
+
+// The default processor factory just returns a singleton
+// instance.
+type TProcessorFactory interface {
+ GetProcessor(trans TTransport) TProcessor
+}
+
+type tProcessorFactory struct {
+ processor TProcessor
+}
+
+func NewTProcessorFactory(p TProcessor) TProcessorFactory {
+ return &tProcessorFactory{processor: p}
+}
+
+func (p *tProcessorFactory) GetProcessor(trans TTransport) TProcessor {
+ return p.processor
+}
+
+/**
+ * The default processor factory just returns a singleton
+ * instance.
+ */
+type TProcessorFunctionFactory interface {
+ GetProcessorFunction(trans TTransport) TProcessorFunction
+}
+
+type tProcessorFunctionFactory struct {
+ processor TProcessorFunction
+}
+
+func NewTProcessorFunctionFactory(p TProcessorFunction) TProcessorFunctionFactory {
+ return &tProcessorFunctionFactory{processor: p}
+}
+
+func (p *tProcessorFunctionFactory) GetProcessorFunction(trans TTransport) TProcessorFunction {
+ return p.processor
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/protocol.go b/vendor/github.com/uber/jaeger-client-go/thrift/protocol.go
index 45fa202e7..0a69bd416 100644
--- a/vendor/github.com/uber/jaeger-client-go/thrift/protocol.go
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/protocol.go
@@ -20,7 +20,9 @@
package thrift
import (
+ "context"
"errors"
+ "fmt"
)
const (
@@ -29,51 +31,51 @@ const (
)
type TProtocol interface {
- WriteMessageBegin(name string, typeId TMessageType, seqid int32) error
- WriteMessageEnd() error
- WriteStructBegin(name string) error
- WriteStructEnd() error
- WriteFieldBegin(name string, typeId TType, id int16) error
- WriteFieldEnd() error
- WriteFieldStop() error
- WriteMapBegin(keyType TType, valueType TType, size int) error
- WriteMapEnd() error
- WriteListBegin(elemType TType, size int) error
- WriteListEnd() error
- WriteSetBegin(elemType TType, size int) error
- WriteSetEnd() error
- WriteBool(value bool) error
- WriteByte(value int8) error
- WriteI16(value int16) error
- WriteI32(value int32) error
- WriteI64(value int64) error
- WriteDouble(value float64) error
- WriteString(value string) error
- WriteBinary(value []byte) error
+ WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqid int32) error
+ WriteMessageEnd(ctx context.Context) error
+ WriteStructBegin(ctx context.Context, name string) error
+ WriteStructEnd(ctx context.Context) error
+ WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error
+ WriteFieldEnd(ctx context.Context) error
+ WriteFieldStop(ctx context.Context) error
+ WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error
+ WriteMapEnd(ctx context.Context) error
+ WriteListBegin(ctx context.Context, elemType TType, size int) error
+ WriteListEnd(ctx context.Context) error
+ WriteSetBegin(ctx context.Context, elemType TType, size int) error
+ WriteSetEnd(ctx context.Context) error
+ WriteBool(ctx context.Context, value bool) error
+ WriteByte(ctx context.Context, value int8) error
+ WriteI16(ctx context.Context, value int16) error
+ WriteI32(ctx context.Context, value int32) error
+ WriteI64(ctx context.Context, value int64) error
+ WriteDouble(ctx context.Context, value float64) error
+ WriteString(ctx context.Context, value string) error
+ WriteBinary(ctx context.Context, value []byte) error
- ReadMessageBegin() (name string, typeId TMessageType, seqid int32, err error)
- ReadMessageEnd() error
- ReadStructBegin() (name string, err error)
- ReadStructEnd() error
- ReadFieldBegin() (name string, typeId TType, id int16, err error)
- ReadFieldEnd() error
- ReadMapBegin() (keyType TType, valueType TType, size int, err error)
- ReadMapEnd() error
- ReadListBegin() (elemType TType, size int, err error)
- ReadListEnd() error
- ReadSetBegin() (elemType TType, size int, err error)
- ReadSetEnd() error
- ReadBool() (value bool, err error)
- ReadByte() (value int8, err error)
- ReadI16() (value int16, err error)
- ReadI32() (value int32, err error)
- ReadI64() (value int64, err error)
- ReadDouble() (value float64, err error)
- ReadString() (value string, err error)
- ReadBinary() (value []byte, err error)
+ ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqid int32, err error)
+ ReadMessageEnd(ctx context.Context) error
+ ReadStructBegin(ctx context.Context) (name string, err error)
+ ReadStructEnd(ctx context.Context) error
+ ReadFieldBegin(ctx context.Context) (name string, typeId TType, id int16, err error)
+ ReadFieldEnd(ctx context.Context) error
+ ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error)
+ ReadMapEnd(ctx context.Context) error
+ ReadListBegin(ctx context.Context) (elemType TType, size int, err error)
+ ReadListEnd(ctx context.Context) error
+ ReadSetBegin(ctx context.Context) (elemType TType, size int, err error)
+ ReadSetEnd(ctx context.Context) error
+ ReadBool(ctx context.Context) (value bool, err error)
+ ReadByte(ctx context.Context) (value int8, err error)
+ ReadI16(ctx context.Context) (value int16, err error)
+ ReadI32(ctx context.Context) (value int32, err error)
+ ReadI64(ctx context.Context) (value int64, err error)
+ ReadDouble(ctx context.Context) (value float64, err error)
+ ReadString(ctx context.Context) (value string, err error)
+ ReadBinary(ctx context.Context) (value []byte, err error)
- Skip(fieldType TType) (err error)
- Flush() (err error)
+ Skip(ctx context.Context, fieldType TType) (err error)
+ Flush(ctx context.Context) (err error)
Transport() TTransport
}
@@ -82,94 +84,94 @@ type TProtocol interface {
const DEFAULT_RECURSION_DEPTH = 64
// Skips over the next data element from the provided input TProtocol object.
-func SkipDefaultDepth(prot TProtocol, typeId TType) (err error) {
- return Skip(prot, typeId, DEFAULT_RECURSION_DEPTH)
+func SkipDefaultDepth(ctx context.Context, prot TProtocol, typeId TType) (err error) {
+ return Skip(ctx, prot, typeId, DEFAULT_RECURSION_DEPTH)
}
// Skips over the next data element from the provided input TProtocol object.
-func Skip(self TProtocol, fieldType TType, maxDepth int) (err error) {
-
- if maxDepth <= 0 {
- return NewTProtocolExceptionWithType( DEPTH_LIMIT, errors.New("Depth limit exceeded"))
+func Skip(ctx context.Context, self TProtocol, fieldType TType, maxDepth int) (err error) {
+
+ if maxDepth <= 0 {
+ return NewTProtocolExceptionWithType(DEPTH_LIMIT, errors.New("Depth limit exceeded"))
}
switch fieldType {
- case STOP:
- return
case BOOL:
- _, err = self.ReadBool()
+ _, err = self.ReadBool(ctx)
return
case BYTE:
- _, err = self.ReadByte()
+ _, err = self.ReadByte(ctx)
return
case I16:
- _, err = self.ReadI16()
+ _, err = self.ReadI16(ctx)
return
case I32:
- _, err = self.ReadI32()
+ _, err = self.ReadI32(ctx)
return
case I64:
- _, err = self.ReadI64()
+ _, err = self.ReadI64(ctx)
return
case DOUBLE:
- _, err = self.ReadDouble()
+ _, err = self.ReadDouble(ctx)
return
case STRING:
- _, err = self.ReadString()
+ _, err = self.ReadString(ctx)
return
case STRUCT:
- if _, err = self.ReadStructBegin(); err != nil {
+ if _, err = self.ReadStructBegin(ctx); err != nil {
return err
}
for {
- _, typeId, _, _ := self.ReadFieldBegin()
+ _, typeId, _, _ := self.ReadFieldBegin(ctx)
if typeId == STOP {
break
}
- err := Skip(self, typeId, maxDepth-1)
+ err := Skip(ctx, self, typeId, maxDepth-1)
if err != nil {
return err
}
- self.ReadFieldEnd()
+ self.ReadFieldEnd(ctx)
}
- return self.ReadStructEnd()
+ return self.ReadStructEnd(ctx)
case MAP:
- keyType, valueType, size, err := self.ReadMapBegin()
+ keyType, valueType, size, err := self.ReadMapBegin(ctx)
if err != nil {
return err
}
for i := 0; i < size; i++ {
- err := Skip(self, keyType, maxDepth-1)
+ err := Skip(ctx, self, keyType, maxDepth-1)
if err != nil {
return err
}
- self.Skip(valueType)
+ self.Skip(ctx, valueType)
}
- return self.ReadMapEnd()
+ return self.ReadMapEnd(ctx)
case SET:
- elemType, size, err := self.ReadSetBegin()
+ elemType, size, err := self.ReadSetBegin(ctx)
if err != nil {
return err
}
for i := 0; i < size; i++ {
- err := Skip(self, elemType, maxDepth-1)
+ err := Skip(ctx, self, elemType, maxDepth-1)
if err != nil {
return err
}
}
- return self.ReadSetEnd()
+ return self.ReadSetEnd(ctx)
case LIST:
- elemType, size, err := self.ReadListBegin()
+ elemType, size, err := self.ReadListBegin(ctx)
if err != nil {
return err
}
for i := 0; i < size; i++ {
- err := Skip(self, elemType, maxDepth-1)
+ err := Skip(ctx, self, elemType, maxDepth-1)
if err != nil {
return err
}
}
- return self.ReadListEnd()
+ return self.ReadListEnd(ctx)
+ default:
+ return NewTProtocolExceptionWithType(INVALID_DATA, errors.New(fmt.Sprintf("Unknown data type %d", fieldType)))
}
return nil
}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/protocol_exception.go b/vendor/github.com/uber/jaeger-client-go/thrift/protocol_exception.go
index 6e357ee89..9dcf4bfd9 100644
--- a/vendor/github.com/uber/jaeger-client-go/thrift/protocol_exception.go
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/protocol_exception.go
@@ -21,6 +21,7 @@ package thrift
import (
"encoding/base64"
+ "errors"
)
// Thrift Protocol exception
@@ -40,8 +41,15 @@ const (
)
type tProtocolException struct {
- typeId int
- message string
+ typeId int
+ err error
+ msg string
+}
+
+var _ TProtocolException = (*tProtocolException)(nil)
+
+func (tProtocolException) TExceptionType() TExceptionType {
+ return TExceptionTypeProtocol
}
func (p *tProtocolException) TypeId() int {
@@ -49,30 +57,48 @@ func (p *tProtocolException) TypeId() int {
}
func (p *tProtocolException) String() string {
- return p.message
+ return p.msg
}
func (p *tProtocolException) Error() string {
- return p.message
+ return p.msg
+}
+
+func (p *tProtocolException) Unwrap() error {
+ return p.err
}
func NewTProtocolException(err error) TProtocolException {
if err == nil {
return nil
}
- if e,ok := err.(TProtocolException); ok {
+
+ if e, ok := err.(TProtocolException); ok {
return e
}
- if _, ok := err.(base64.CorruptInputError); ok {
- return &tProtocolException{INVALID_DATA, err.Error()}
+
+ if errors.As(err, new(base64.CorruptInputError)) {
+ return NewTProtocolExceptionWithType(INVALID_DATA, err)
}
- return &tProtocolException{UNKNOWN_PROTOCOL_EXCEPTION, err.Error()}
+
+ return NewTProtocolExceptionWithType(UNKNOWN_PROTOCOL_EXCEPTION, err)
}
func NewTProtocolExceptionWithType(errType int, err error) TProtocolException {
if err == nil {
return nil
}
- return &tProtocolException{errType, err.Error()}
+ return &tProtocolException{
+ typeId: errType,
+ err: err,
+ msg: err.Error(),
+ }
}
+func prependTProtocolException(prepend string, err TProtocolException) TProtocolException {
+ return &tProtocolException{
+ typeId: err.TypeId(),
+ err: err,
+ msg: prepend + err.Error(),
+ }
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/response_helper.go b/vendor/github.com/uber/jaeger-client-go/thrift/response_helper.go
new file mode 100644
index 000000000..d884c6ac6
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/response_helper.go
@@ -0,0 +1,94 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+ "context"
+)
+
+// See https://godoc.org/context#WithValue on why do we need the unexported typedefs.
+type responseHelperKey struct{}
+
+// TResponseHelper defines a object with a set of helper functions that can be
+// retrieved from the context object passed into server handler functions.
+//
+// Use GetResponseHelper to retrieve the injected TResponseHelper implementation
+// from the context object.
+//
+// The zero value of TResponseHelper is valid with all helper functions being
+// no-op.
+type TResponseHelper struct {
+ // THeader related functions
+ *THeaderResponseHelper
+}
+
+// THeaderResponseHelper defines THeader related TResponseHelper functions.
+//
+// The zero value of *THeaderResponseHelper is valid with all helper functions
+// being no-op.
+type THeaderResponseHelper struct {
+ proto *THeaderProtocol
+}
+
+// NewTHeaderResponseHelper creates a new THeaderResponseHelper from the
+// underlying TProtocol.
+func NewTHeaderResponseHelper(proto TProtocol) *THeaderResponseHelper {
+ if hp, ok := proto.(*THeaderProtocol); ok {
+ return &THeaderResponseHelper{
+ proto: hp,
+ }
+ }
+ return nil
+}
+
+// SetHeader sets a response header.
+//
+// It's no-op if the underlying protocol/transport does not support THeader.
+func (h *THeaderResponseHelper) SetHeader(key, value string) {
+ if h != nil && h.proto != nil {
+ h.proto.SetWriteHeader(key, value)
+ }
+}
+
+// ClearHeaders clears all the response headers previously set.
+//
+// It's no-op if the underlying protocol/transport does not support THeader.
+func (h *THeaderResponseHelper) ClearHeaders() {
+ if h != nil && h.proto != nil {
+ h.proto.ClearWriteHeaders()
+ }
+}
+
+// GetResponseHelper retrieves the TResponseHelper implementation injected into
+// the context object.
+//
+// If no helper was found in the context object, a nop helper with ok == false
+// will be returned.
+func GetResponseHelper(ctx context.Context) (helper TResponseHelper, ok bool) {
+ if v := ctx.Value(responseHelperKey{}); v != nil {
+ helper, ok = v.(TResponseHelper)
+ }
+ return
+}
+
+// SetResponseHelper injects TResponseHelper into the context object.
+func SetResponseHelper(ctx context.Context, helper TResponseHelper) context.Context {
+ return context.WithValue(ctx, responseHelperKey{}, helper)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/rich_transport.go b/vendor/github.com/uber/jaeger-client-go/thrift/rich_transport.go
index 8e296a99b..83fdf29f5 100644
--- a/vendor/github.com/uber/jaeger-client-go/thrift/rich_transport.go
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/rich_transport.go
@@ -19,7 +19,10 @@
package thrift
-import "io"
+import (
+ "errors"
+ "io"
+)
type RichTransport struct {
TTransport
@@ -49,7 +52,7 @@ func (r *RichTransport) RemainingBytes() (num_bytes uint64) {
func readByte(r io.Reader) (c byte, err error) {
v := [1]byte{0}
n, err := r.Read(v[0:1])
- if n > 0 && (err == nil || err == io.EOF) {
+ if n > 0 && (err == nil || errors.Is(err, io.EOF)) {
return v[0], nil
}
if n > 0 && err != nil {
@@ -66,4 +69,3 @@ func writeByte(w io.Writer, c byte) error {
_, err := w.Write(v[0:1])
return err
}
-
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/serializer.go b/vendor/github.com/uber/jaeger-client-go/thrift/serializer.go
index 771222999..c44979094 100644
--- a/vendor/github.com/uber/jaeger-client-go/thrift/serializer.go
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/serializer.go
@@ -19,57 +19,118 @@
package thrift
+import (
+ "context"
+ "sync"
+)
+
type TSerializer struct {
Transport *TMemoryBuffer
Protocol TProtocol
}
type TStruct interface {
- Write(p TProtocol) error
- Read(p TProtocol) error
+ Write(ctx context.Context, p TProtocol) error
+ Read(ctx context.Context, p TProtocol) error
}
func NewTSerializer() *TSerializer {
transport := NewTMemoryBufferLen(1024)
- protocol := NewTBinaryProtocolFactoryDefault().GetProtocol(transport)
+ protocol := NewTBinaryProtocolTransport(transport)
return &TSerializer{
- transport,
- protocol}
+ Transport: transport,
+ Protocol: protocol,
+ }
}
-func (t *TSerializer) WriteString(msg TStruct) (s string, err error) {
+func (t *TSerializer) WriteString(ctx context.Context, msg TStruct) (s string, err error) {
t.Transport.Reset()
- if err = msg.Write(t.Protocol); err != nil {
+ if err = msg.Write(ctx, t.Protocol); err != nil {
return
}
- if err = t.Protocol.Flush(); err != nil {
+ if err = t.Protocol.Flush(ctx); err != nil {
return
}
- if err = t.Transport.Flush(); err != nil {
+ if err = t.Transport.Flush(ctx); err != nil {
return
}
return t.Transport.String(), nil
}
-func (t *TSerializer) Write(msg TStruct) (b []byte, err error) {
+func (t *TSerializer) Write(ctx context.Context, msg TStruct) (b []byte, err error) {
t.Transport.Reset()
- if err = msg.Write(t.Protocol); err != nil {
+ if err = msg.Write(ctx, t.Protocol); err != nil {
return
}
- if err = t.Protocol.Flush(); err != nil {
+ if err = t.Protocol.Flush(ctx); err != nil {
return
}
- if err = t.Transport.Flush(); err != nil {
+ if err = t.Transport.Flush(ctx); err != nil {
return
}
b = append(b, t.Transport.Bytes()...)
return
}
+
+// TSerializerPool is the thread-safe version of TSerializer, it uses resource
+// pool of TSerializer under the hood.
+//
+// It must be initialized with either NewTSerializerPool or
+// NewTSerializerPoolSizeFactory.
+type TSerializerPool struct {
+ pool sync.Pool
+}
+
+// NewTSerializerPool creates a new TSerializerPool.
+//
+// NewTSerializer can be used as the arg here.
+func NewTSerializerPool(f func() *TSerializer) *TSerializerPool {
+ return &TSerializerPool{
+ pool: sync.Pool{
+ New: func() interface{} {
+ return f()
+ },
+ },
+ }
+}
+
+// NewTSerializerPoolSizeFactory creates a new TSerializerPool with the given
+// size and protocol factory.
+//
+// Note that the size is not the limit. The TMemoryBuffer underneath can grow
+// larger than that. It just dictates the initial size.
+func NewTSerializerPoolSizeFactory(size int, factory TProtocolFactory) *TSerializerPool {
+ return &TSerializerPool{
+ pool: sync.Pool{
+ New: func() interface{} {
+ transport := NewTMemoryBufferLen(size)
+ protocol := factory.GetProtocol(transport)
+
+ return &TSerializer{
+ Transport: transport,
+ Protocol: protocol,
+ }
+ },
+ },
+ }
+}
+
+func (t *TSerializerPool) WriteString(ctx context.Context, msg TStruct) (string, error) {
+ s := t.pool.Get().(*TSerializer)
+ defer t.pool.Put(s)
+ return s.WriteString(ctx, msg)
+}
+
+func (t *TSerializerPool) Write(ctx context.Context, msg TStruct) ([]byte, error) {
+ s := t.pool.Get().(*TSerializer)
+ defer t.pool.Put(s)
+ return s.Write(ctx, msg)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/server_transport.go b/vendor/github.com/uber/jaeger-client-go/thrift/server_transport.go
new file mode 100644
index 000000000..51c40b64a
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/server_transport.go
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+// Server transport. Object which provides client transports.
+type TServerTransport interface {
+ Listen() error
+ Accept() (TTransport, error)
+ Close() error
+
+ // Optional method implementation. This signals to the server transport
+ // that it should break out of any accept() or listen() that it is currently
+ // blocked on. This method, if implemented, MUST be thread safe, as it may
+ // be called from a different thread context than the other TServerTransport
+ // methods.
+ Interrupt() error
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/simple_json_protocol.go b/vendor/github.com/uber/jaeger-client-go/thrift/simple_json_protocol.go
index 412a482d0..d1a815453 100644
--- a/vendor/github.com/uber/jaeger-client-go/thrift/simple_json_protocol.go
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/simple_json_protocol.go
@@ -22,8 +22,10 @@ package thrift
import (
"bufio"
"bytes"
+ "context"
"encoding/base64"
"encoding/json"
+ "errors"
"fmt"
"io"
"math"
@@ -33,12 +35,13 @@ import (
type _ParseContext int
const (
- _CONTEXT_IN_TOPLEVEL _ParseContext = 1
- _CONTEXT_IN_LIST_FIRST _ParseContext = 2
- _CONTEXT_IN_LIST _ParseContext = 3
- _CONTEXT_IN_OBJECT_FIRST _ParseContext = 4
- _CONTEXT_IN_OBJECT_NEXT_KEY _ParseContext = 5
- _CONTEXT_IN_OBJECT_NEXT_VALUE _ParseContext = 6
+ _CONTEXT_INVALID _ParseContext = iota
+ _CONTEXT_IN_TOPLEVEL // 1
+ _CONTEXT_IN_LIST_FIRST // 2
+ _CONTEXT_IN_LIST // 3
+ _CONTEXT_IN_OBJECT_FIRST // 4
+ _CONTEXT_IN_OBJECT_NEXT_KEY // 5
+ _CONTEXT_IN_OBJECT_NEXT_VALUE // 6
)
func (p _ParseContext) String() string {
@@ -59,7 +62,33 @@ func (p _ParseContext) String() string {
return "UNKNOWN-PARSE-CONTEXT"
}
-// JSON protocol implementation for thrift.
+type jsonContextStack []_ParseContext
+
+func (s *jsonContextStack) push(v _ParseContext) {
+ *s = append(*s, v)
+}
+
+func (s jsonContextStack) peek() (v _ParseContext, ok bool) {
+ l := len(s)
+ if l <= 0 {
+ return
+ }
+ return s[l-1], true
+}
+
+func (s *jsonContextStack) pop() (v _ParseContext, ok bool) {
+ l := len(*s)
+ if l <= 0 {
+ return
+ }
+ v = (*s)[l-1]
+ *s = (*s)[0 : l-1]
+ return v, true
+}
+
+var errEmptyJSONContextStack = NewTProtocolExceptionWithType(INVALID_DATA, errors.New("Unexpected empty json protocol context stack"))
+
+// Simple JSON protocol implementation for thrift.
//
// This protocol produces/consumes a simple output format
// suitable for parsing by scripting languages. It should not be
@@ -68,8 +97,8 @@ func (p _ParseContext) String() string {
type TSimpleJSONProtocol struct {
trans TTransport
- parseContextStack []int
- dumpContext []int
+ parseContextStack jsonContextStack
+ dumpContext jsonContextStack
writer *bufio.Writer
reader *bufio.Reader
@@ -81,8 +110,8 @@ func NewTSimpleJSONProtocol(t TTransport) *TSimpleJSONProtocol {
writer: bufio.NewWriter(t),
reader: bufio.NewReader(t),
}
- v.parseContextStack = append(v.parseContextStack, int(_CONTEXT_IN_TOPLEVEL))
- v.dumpContext = append(v.dumpContext, int(_CONTEXT_IN_TOPLEVEL))
+ v.parseContextStack.push(_CONTEXT_IN_TOPLEVEL)
+ v.dumpContext.push(_CONTEXT_IN_TOPLEVEL)
return v
}
@@ -155,114 +184,113 @@ func mismatch(expected, actual string) error {
return fmt.Errorf("Expected '%s' but found '%s' while parsing JSON.", expected, actual)
}
-func (p *TSimpleJSONProtocol) WriteMessageBegin(name string, typeId TMessageType, seqId int32) error {
+func (p *TSimpleJSONProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqId int32) error {
p.resetContextStack() // THRIFT-3735
if e := p.OutputListBegin(); e != nil {
return e
}
- if e := p.WriteString(name); e != nil {
+ if e := p.WriteString(ctx, name); e != nil {
return e
}
- if e := p.WriteByte(int8(typeId)); e != nil {
+ if e := p.WriteByte(ctx, int8(typeId)); e != nil {
return e
}
- if e := p.WriteI32(seqId); e != nil {
+ if e := p.WriteI32(ctx, seqId); e != nil {
return e
}
return nil
}
-func (p *TSimpleJSONProtocol) WriteMessageEnd() error {
+func (p *TSimpleJSONProtocol) WriteMessageEnd(ctx context.Context) error {
return p.OutputListEnd()
}
-func (p *TSimpleJSONProtocol) WriteStructBegin(name string) error {
+func (p *TSimpleJSONProtocol) WriteStructBegin(ctx context.Context, name string) error {
if e := p.OutputObjectBegin(); e != nil {
return e
}
return nil
}
-func (p *TSimpleJSONProtocol) WriteStructEnd() error {
+func (p *TSimpleJSONProtocol) WriteStructEnd(ctx context.Context) error {
return p.OutputObjectEnd()
}
-func (p *TSimpleJSONProtocol) WriteFieldBegin(name string, typeId TType, id int16) error {
- if e := p.WriteString(name); e != nil {
+func (p *TSimpleJSONProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error {
+ if e := p.WriteString(ctx, name); e != nil {
return e
}
return nil
}
-func (p *TSimpleJSONProtocol) WriteFieldEnd() error {
- //return p.OutputListEnd()
+func (p *TSimpleJSONProtocol) WriteFieldEnd(ctx context.Context) error {
return nil
}
-func (p *TSimpleJSONProtocol) WriteFieldStop() error { return nil }
+func (p *TSimpleJSONProtocol) WriteFieldStop(ctx context.Context) error { return nil }
-func (p *TSimpleJSONProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error {
+func (p *TSimpleJSONProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error {
if e := p.OutputListBegin(); e != nil {
return e
}
- if e := p.WriteByte(int8(keyType)); e != nil {
+ if e := p.WriteByte(ctx, int8(keyType)); e != nil {
return e
}
- if e := p.WriteByte(int8(valueType)); e != nil {
+ if e := p.WriteByte(ctx, int8(valueType)); e != nil {
return e
}
- return p.WriteI32(int32(size))
+ return p.WriteI32(ctx, int32(size))
}
-func (p *TSimpleJSONProtocol) WriteMapEnd() error {
+func (p *TSimpleJSONProtocol) WriteMapEnd(ctx context.Context) error {
return p.OutputListEnd()
}
-func (p *TSimpleJSONProtocol) WriteListBegin(elemType TType, size int) error {
+func (p *TSimpleJSONProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error {
return p.OutputElemListBegin(elemType, size)
}
-func (p *TSimpleJSONProtocol) WriteListEnd() error {
+func (p *TSimpleJSONProtocol) WriteListEnd(ctx context.Context) error {
return p.OutputListEnd()
}
-func (p *TSimpleJSONProtocol) WriteSetBegin(elemType TType, size int) error {
+func (p *TSimpleJSONProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error {
return p.OutputElemListBegin(elemType, size)
}
-func (p *TSimpleJSONProtocol) WriteSetEnd() error {
+func (p *TSimpleJSONProtocol) WriteSetEnd(ctx context.Context) error {
return p.OutputListEnd()
}
-func (p *TSimpleJSONProtocol) WriteBool(b bool) error {
+func (p *TSimpleJSONProtocol) WriteBool(ctx context.Context, b bool) error {
return p.OutputBool(b)
}
-func (p *TSimpleJSONProtocol) WriteByte(b int8) error {
- return p.WriteI32(int32(b))
+func (p *TSimpleJSONProtocol) WriteByte(ctx context.Context, b int8) error {
+ return p.WriteI32(ctx, int32(b))
}
-func (p *TSimpleJSONProtocol) WriteI16(v int16) error {
- return p.WriteI32(int32(v))
+func (p *TSimpleJSONProtocol) WriteI16(ctx context.Context, v int16) error {
+ return p.WriteI32(ctx, int32(v))
}
-func (p *TSimpleJSONProtocol) WriteI32(v int32) error {
+func (p *TSimpleJSONProtocol) WriteI32(ctx context.Context, v int32) error {
return p.OutputI64(int64(v))
}
-func (p *TSimpleJSONProtocol) WriteI64(v int64) error {
+func (p *TSimpleJSONProtocol) WriteI64(ctx context.Context, v int64) error {
return p.OutputI64(int64(v))
}
-func (p *TSimpleJSONProtocol) WriteDouble(v float64) error {
+func (p *TSimpleJSONProtocol) WriteDouble(ctx context.Context, v float64) error {
return p.OutputF64(v)
}
-func (p *TSimpleJSONProtocol) WriteString(v string) error {
+func (p *TSimpleJSONProtocol) WriteString(ctx context.Context, v string) error {
return p.OutputString(v)
}
-func (p *TSimpleJSONProtocol) WriteBinary(v []byte) error {
+func (p *TSimpleJSONProtocol) WriteBinary(ctx context.Context, v []byte) error {
// JSON library only takes in a string,
// not an arbitrary byte array, to ensure bytes are transmitted
// efficiently we must convert this into a valid JSON string
@@ -288,39 +316,39 @@ func (p *TSimpleJSONProtocol) WriteBinary(v []byte) error {
}
// Reading methods.
-func (p *TSimpleJSONProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) {
+func (p *TSimpleJSONProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) {
p.resetContextStack() // THRIFT-3735
if isNull, err := p.ParseListBegin(); isNull || err != nil {
return name, typeId, seqId, err
}
- if name, err = p.ReadString(); err != nil {
+ if name, err = p.ReadString(ctx); err != nil {
return name, typeId, seqId, err
}
- bTypeId, err := p.ReadByte()
+ bTypeId, err := p.ReadByte(ctx)
typeId = TMessageType(bTypeId)
if err != nil {
return name, typeId, seqId, err
}
- if seqId, err = p.ReadI32(); err != nil {
+ if seqId, err = p.ReadI32(ctx); err != nil {
return name, typeId, seqId, err
}
return name, typeId, seqId, nil
}
-func (p *TSimpleJSONProtocol) ReadMessageEnd() error {
+func (p *TSimpleJSONProtocol) ReadMessageEnd(ctx context.Context) error {
return p.ParseListEnd()
}
-func (p *TSimpleJSONProtocol) ReadStructBegin() (name string, err error) {
+func (p *TSimpleJSONProtocol) ReadStructBegin(ctx context.Context) (name string, err error) {
_, err = p.ParseObjectStart()
return "", err
}
-func (p *TSimpleJSONProtocol) ReadStructEnd() error {
+func (p *TSimpleJSONProtocol) ReadStructEnd(ctx context.Context) error {
return p.ParseObjectEnd()
}
-func (p *TSimpleJSONProtocol) ReadFieldBegin() (string, TType, int16, error) {
+func (p *TSimpleJSONProtocol) ReadFieldBegin(ctx context.Context) (string, TType, int16, error) {
if err := p.ParsePreValue(); err != nil {
return "", STOP, 0, err
}
@@ -339,21 +367,6 @@ func (p *TSimpleJSONProtocol) ReadFieldBegin() (string, TType, int16, error) {
return name, STOP, 0, err
}
return name, STOP, -1, p.ParsePostValue()
- /*
- if err = p.ParsePostValue(); err != nil {
- return name, STOP, 0, err
- }
- if isNull, err := p.ParseListBegin(); isNull || err != nil {
- return name, STOP, 0, err
- }
- bType, err := p.ReadByte()
- thetype := TType(bType)
- if err != nil {
- return name, thetype, 0, err
- }
- id, err := p.ReadI16()
- return name, thetype, id, err
- */
}
e := fmt.Errorf("Expected \"}\" or '\"', but found: '%s'", string(b))
return "", STOP, 0, NewTProtocolExceptionWithType(INVALID_DATA, e)
@@ -361,57 +374,56 @@ func (p *TSimpleJSONProtocol) ReadFieldBegin() (string, TType, int16, error) {
return "", STOP, 0, NewTProtocolException(io.EOF)
}
-func (p *TSimpleJSONProtocol) ReadFieldEnd() error {
+func (p *TSimpleJSONProtocol) ReadFieldEnd(ctx context.Context) error {
return nil
- //return p.ParseListEnd()
}
-func (p *TSimpleJSONProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, e error) {
+func (p *TSimpleJSONProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, e error) {
if isNull, e := p.ParseListBegin(); isNull || e != nil {
return VOID, VOID, 0, e
}
// read keyType
- bKeyType, e := p.ReadByte()
+ bKeyType, e := p.ReadByte(ctx)
keyType = TType(bKeyType)
if e != nil {
return keyType, valueType, size, e
}
// read valueType
- bValueType, e := p.ReadByte()
+ bValueType, e := p.ReadByte(ctx)
valueType = TType(bValueType)
if e != nil {
return keyType, valueType, size, e
}
// read size
- iSize, err := p.ReadI64()
+ iSize, err := p.ReadI64(ctx)
size = int(iSize)
return keyType, valueType, size, err
}
-func (p *TSimpleJSONProtocol) ReadMapEnd() error {
+func (p *TSimpleJSONProtocol) ReadMapEnd(ctx context.Context) error {
return p.ParseListEnd()
}
-func (p *TSimpleJSONProtocol) ReadListBegin() (elemType TType, size int, e error) {
+func (p *TSimpleJSONProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, e error) {
return p.ParseElemListBegin()
}
-func (p *TSimpleJSONProtocol) ReadListEnd() error {
+func (p *TSimpleJSONProtocol) ReadListEnd(ctx context.Context) error {
return p.ParseListEnd()
}
-func (p *TSimpleJSONProtocol) ReadSetBegin() (elemType TType, size int, e error) {
+func (p *TSimpleJSONProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, e error) {
return p.ParseElemListBegin()
}
-func (p *TSimpleJSONProtocol) ReadSetEnd() error {
+func (p *TSimpleJSONProtocol) ReadSetEnd(ctx context.Context) error {
return p.ParseListEnd()
}
-func (p *TSimpleJSONProtocol) ReadBool() (bool, error) {
+func (p *TSimpleJSONProtocol) ReadBool(ctx context.Context) (bool, error) {
var value bool
if err := p.ParsePreValue(); err != nil {
@@ -466,32 +478,32 @@ func (p *TSimpleJSONProtocol) ReadBool() (bool, error) {
return value, p.ParsePostValue()
}
-func (p *TSimpleJSONProtocol) ReadByte() (int8, error) {
- v, err := p.ReadI64()
+func (p *TSimpleJSONProtocol) ReadByte(ctx context.Context) (int8, error) {
+ v, err := p.ReadI64(ctx)
return int8(v), err
}
-func (p *TSimpleJSONProtocol) ReadI16() (int16, error) {
- v, err := p.ReadI64()
+func (p *TSimpleJSONProtocol) ReadI16(ctx context.Context) (int16, error) {
+ v, err := p.ReadI64(ctx)
return int16(v), err
}
-func (p *TSimpleJSONProtocol) ReadI32() (int32, error) {
- v, err := p.ReadI64()
+func (p *TSimpleJSONProtocol) ReadI32(ctx context.Context) (int32, error) {
+ v, err := p.ReadI64(ctx)
return int32(v), err
}
-func (p *TSimpleJSONProtocol) ReadI64() (int64, error) {
+func (p *TSimpleJSONProtocol) ReadI64(ctx context.Context) (int64, error) {
v, _, err := p.ParseI64()
return v, err
}
-func (p *TSimpleJSONProtocol) ReadDouble() (float64, error) {
+func (p *TSimpleJSONProtocol) ReadDouble(ctx context.Context) (float64, error) {
v, _, err := p.ParseF64()
return v, err
}
-func (p *TSimpleJSONProtocol) ReadString() (string, error) {
+func (p *TSimpleJSONProtocol) ReadString(ctx context.Context) (string, error) {
var v string
if err := p.ParsePreValue(); err != nil {
return v, err
@@ -521,7 +533,7 @@ func (p *TSimpleJSONProtocol) ReadString() (string, error) {
return v, p.ParsePostValue()
}
-func (p *TSimpleJSONProtocol) ReadBinary() ([]byte, error) {
+func (p *TSimpleJSONProtocol) ReadBinary(ctx context.Context) ([]byte, error) {
var v []byte
if err := p.ParsePreValue(); err != nil {
return nil, err
@@ -552,12 +564,12 @@ func (p *TSimpleJSONProtocol) ReadBinary() ([]byte, error) {
return v, p.ParsePostValue()
}
-func (p *TSimpleJSONProtocol) Flush() (err error) {
+func (p *TSimpleJSONProtocol) Flush(ctx context.Context) (err error) {
return NewTProtocolException(p.writer.Flush())
}
-func (p *TSimpleJSONProtocol) Skip(fieldType TType) (err error) {
- return SkipDefaultDepth(p, fieldType)
+func (p *TSimpleJSONProtocol) Skip(ctx context.Context, fieldType TType) (err error) {
+ return SkipDefaultDepth(ctx, p, fieldType)
}
func (p *TSimpleJSONProtocol) Transport() TTransport {
@@ -565,41 +577,41 @@ func (p *TSimpleJSONProtocol) Transport() TTransport {
}
func (p *TSimpleJSONProtocol) OutputPreValue() error {
- cxt := _ParseContext(p.dumpContext[len(p.dumpContext)-1])
+ cxt, ok := p.dumpContext.peek()
+ if !ok {
+ return errEmptyJSONContextStack
+ }
switch cxt {
case _CONTEXT_IN_LIST, _CONTEXT_IN_OBJECT_NEXT_KEY:
if _, e := p.write(JSON_COMMA); e != nil {
return NewTProtocolException(e)
}
- break
case _CONTEXT_IN_OBJECT_NEXT_VALUE:
if _, e := p.write(JSON_COLON); e != nil {
return NewTProtocolException(e)
}
- break
}
return nil
}
func (p *TSimpleJSONProtocol) OutputPostValue() error {
- cxt := _ParseContext(p.dumpContext[len(p.dumpContext)-1])
+ cxt, ok := p.dumpContext.peek()
+ if !ok {
+ return errEmptyJSONContextStack
+ }
switch cxt {
case _CONTEXT_IN_LIST_FIRST:
- p.dumpContext = p.dumpContext[:len(p.dumpContext)-1]
- p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_LIST))
- break
+ p.dumpContext.pop()
+ p.dumpContext.push(_CONTEXT_IN_LIST)
case _CONTEXT_IN_OBJECT_FIRST:
- p.dumpContext = p.dumpContext[:len(p.dumpContext)-1]
- p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_NEXT_VALUE))
- break
+ p.dumpContext.pop()
+ p.dumpContext.push(_CONTEXT_IN_OBJECT_NEXT_VALUE)
case _CONTEXT_IN_OBJECT_NEXT_KEY:
- p.dumpContext = p.dumpContext[:len(p.dumpContext)-1]
- p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_NEXT_VALUE))
- break
+ p.dumpContext.pop()
+ p.dumpContext.push(_CONTEXT_IN_OBJECT_NEXT_VALUE)
case _CONTEXT_IN_OBJECT_NEXT_VALUE:
- p.dumpContext = p.dumpContext[:len(p.dumpContext)-1]
- p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_NEXT_KEY))
- break
+ p.dumpContext.pop()
+ p.dumpContext.push(_CONTEXT_IN_OBJECT_NEXT_KEY)
}
return nil
}
@@ -614,10 +626,13 @@ func (p *TSimpleJSONProtocol) OutputBool(value bool) error {
} else {
v = string(JSON_FALSE)
}
- switch _ParseContext(p.dumpContext[len(p.dumpContext)-1]) {
+ cxt, ok := p.dumpContext.peek()
+ if !ok {
+ return errEmptyJSONContextStack
+ }
+ switch cxt {
case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY:
v = jsonQuote(v)
- default:
}
if e := p.OutputStringData(v); e != nil {
return e
@@ -647,11 +662,14 @@ func (p *TSimpleJSONProtocol) OutputF64(value float64) error {
} else if math.IsInf(value, -1) {
v = string(JSON_QUOTE) + JSON_NEGATIVE_INFINITY + string(JSON_QUOTE)
} else {
+ cxt, ok := p.dumpContext.peek()
+ if !ok {
+ return errEmptyJSONContextStack
+ }
v = strconv.FormatFloat(value, 'g', -1, 64)
- switch _ParseContext(p.dumpContext[len(p.dumpContext)-1]) {
+ switch cxt {
case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY:
v = string(JSON_QUOTE) + v + string(JSON_QUOTE)
- default:
}
}
if e := p.OutputStringData(v); e != nil {
@@ -664,11 +682,14 @@ func (p *TSimpleJSONProtocol) OutputI64(value int64) error {
if e := p.OutputPreValue(); e != nil {
return e
}
+ cxt, ok := p.dumpContext.peek()
+ if !ok {
+ return errEmptyJSONContextStack
+ }
v := strconv.FormatInt(value, 10)
- switch _ParseContext(p.dumpContext[len(p.dumpContext)-1]) {
+ switch cxt {
case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY:
v = jsonQuote(v)
- default:
}
if e := p.OutputStringData(v); e != nil {
return e
@@ -698,7 +719,7 @@ func (p *TSimpleJSONProtocol) OutputObjectBegin() error {
if _, e := p.write(JSON_LBRACE); e != nil {
return NewTProtocolException(e)
}
- p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_FIRST))
+ p.dumpContext.push(_CONTEXT_IN_OBJECT_FIRST)
return nil
}
@@ -706,7 +727,10 @@ func (p *TSimpleJSONProtocol) OutputObjectEnd() error {
if _, e := p.write(JSON_RBRACE); e != nil {
return NewTProtocolException(e)
}
- p.dumpContext = p.dumpContext[:len(p.dumpContext)-1]
+ _, ok := p.dumpContext.pop()
+ if !ok {
+ return errEmptyJSONContextStack
+ }
if e := p.OutputPostValue(); e != nil {
return e
}
@@ -720,7 +744,7 @@ func (p *TSimpleJSONProtocol) OutputListBegin() error {
if _, e := p.write(JSON_LBRACKET); e != nil {
return NewTProtocolException(e)
}
- p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_LIST_FIRST))
+ p.dumpContext.push(_CONTEXT_IN_LIST_FIRST)
return nil
}
@@ -728,7 +752,10 @@ func (p *TSimpleJSONProtocol) OutputListEnd() error {
if _, e := p.write(JSON_RBRACKET); e != nil {
return NewTProtocolException(e)
}
- p.dumpContext = p.dumpContext[:len(p.dumpContext)-1]
+ _, ok := p.dumpContext.pop()
+ if !ok {
+ return errEmptyJSONContextStack
+ }
if e := p.OutputPostValue(); e != nil {
return e
}
@@ -739,10 +766,10 @@ func (p *TSimpleJSONProtocol) OutputElemListBegin(elemType TType, size int) erro
if e := p.OutputListBegin(); e != nil {
return e
}
- if e := p.WriteByte(int8(elemType)); e != nil {
+ if e := p.OutputI64(int64(elemType)); e != nil {
return e
}
- if e := p.WriteI64(int64(size)); e != nil {
+ if e := p.OutputI64(int64(size)); e != nil {
return e
}
return nil
@@ -752,7 +779,10 @@ func (p *TSimpleJSONProtocol) ParsePreValue() error {
if e := p.readNonSignificantWhitespace(); e != nil {
return NewTProtocolException(e)
}
- cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1])
+ cxt, ok := p.parseContextStack.peek()
+ if !ok {
+ return errEmptyJSONContextStack
+ }
b, _ := p.reader.Peek(1)
switch cxt {
case _CONTEXT_IN_LIST:
@@ -771,7 +801,6 @@ func (p *TSimpleJSONProtocol) ParsePreValue() error {
return NewTProtocolExceptionWithType(INVALID_DATA, e)
}
}
- break
case _CONTEXT_IN_OBJECT_NEXT_KEY:
if len(b) > 0 {
switch b[0] {
@@ -788,7 +817,6 @@ func (p *TSimpleJSONProtocol) ParsePreValue() error {
return NewTProtocolExceptionWithType(INVALID_DATA, e)
}
}
- break
case _CONTEXT_IN_OBJECT_NEXT_VALUE:
if len(b) > 0 {
switch b[0] {
@@ -803,7 +831,6 @@ func (p *TSimpleJSONProtocol) ParsePreValue() error {
return NewTProtocolExceptionWithType(INVALID_DATA, e)
}
}
- break
}
return nil
}
@@ -812,20 +839,20 @@ func (p *TSimpleJSONProtocol) ParsePostValue() error {
if e := p.readNonSignificantWhitespace(); e != nil {
return NewTProtocolException(e)
}
- cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1])
+ cxt, ok := p.parseContextStack.peek()
+ if !ok {
+ return errEmptyJSONContextStack
+ }
switch cxt {
case _CONTEXT_IN_LIST_FIRST:
- p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1]
- p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_LIST))
- break
+ p.parseContextStack.pop()
+ p.parseContextStack.push(_CONTEXT_IN_LIST)
case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY:
- p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1]
- p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_OBJECT_NEXT_VALUE))
- break
+ p.parseContextStack.pop()
+ p.parseContextStack.push(_CONTEXT_IN_OBJECT_NEXT_VALUE)
case _CONTEXT_IN_OBJECT_NEXT_VALUE:
- p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1]
- p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_OBJECT_NEXT_KEY))
- break
+ p.parseContextStack.pop()
+ p.parseContextStack.push(_CONTEXT_IN_OBJECT_NEXT_KEY)
}
return nil
}
@@ -978,7 +1005,7 @@ func (p *TSimpleJSONProtocol) ParseObjectStart() (bool, error) {
}
if len(b) > 0 && b[0] == JSON_LBRACE[0] {
p.reader.ReadByte()
- p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_OBJECT_FIRST))
+ p.parseContextStack.push(_CONTEXT_IN_OBJECT_FIRST)
return false, nil
} else if p.safePeekContains(JSON_NULL) {
return true, nil
@@ -991,7 +1018,7 @@ func (p *TSimpleJSONProtocol) ParseObjectEnd() error {
if isNull, err := p.readIfNull(); isNull || err != nil {
return err
}
- cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1])
+ cxt, _ := p.parseContextStack.peek()
if (cxt != _CONTEXT_IN_OBJECT_FIRST) && (cxt != _CONTEXT_IN_OBJECT_NEXT_KEY) {
e := fmt.Errorf("Expected to be in the Object Context, but not in Object Context (%d)", cxt)
return NewTProtocolExceptionWithType(INVALID_DATA, e)
@@ -1009,7 +1036,7 @@ func (p *TSimpleJSONProtocol) ParseObjectEnd() error {
break
}
}
- p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1]
+ p.parseContextStack.pop()
return p.ParsePostValue()
}
@@ -1023,7 +1050,7 @@ func (p *TSimpleJSONProtocol) ParseListBegin() (isNull bool, err error) {
return false, err
}
if len(b) >= 1 && b[0] == JSON_LBRACKET[0] {
- p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_LIST_FIRST))
+ p.parseContextStack.push(_CONTEXT_IN_LIST_FIRST)
p.reader.ReadByte()
isNull = false
} else if p.safePeekContains(JSON_NULL) {
@@ -1038,12 +1065,12 @@ func (p *TSimpleJSONProtocol) ParseElemListBegin() (elemType TType, size int, e
if isNull, e := p.ParseListBegin(); isNull || e != nil {
return VOID, 0, e
}
- bElemType, err := p.ReadByte()
+ bElemType, _, err := p.ParseI64()
elemType = TType(bElemType)
if err != nil {
return elemType, size, err
}
- nSize, err2 := p.ReadI64()
+ nSize, _, err2 := p.ParseI64()
size = int(nSize)
return elemType, size, err2
}
@@ -1052,7 +1079,7 @@ func (p *TSimpleJSONProtocol) ParseListEnd() error {
if isNull, err := p.readIfNull(); isNull || err != nil {
return err
}
- cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1])
+ cxt, _ := p.parseContextStack.peek()
if cxt != _CONTEXT_IN_LIST {
e := fmt.Errorf("Expected to be in the List Context, but not in List Context (%d)", cxt)
return NewTProtocolExceptionWithType(INVALID_DATA, e)
@@ -1064,14 +1091,16 @@ func (p *TSimpleJSONProtocol) ParseListEnd() error {
for _, char := range line {
switch char {
default:
- e := fmt.Errorf("Expecting end of list \"]\", but found: \"%s\"", line)
+ e := fmt.Errorf("Expecting end of list \"]\", but found: \"%v\"", line)
return NewTProtocolExceptionWithType(INVALID_DATA, e)
case ' ', '\n', '\r', '\t', rune(JSON_RBRACKET[0]):
break
}
}
- p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1]
- if _ParseContext(p.parseContextStack[len(p.parseContextStack)-1]) == _CONTEXT_IN_TOPLEVEL {
+ p.parseContextStack.pop()
+ if cxt, ok := p.parseContextStack.peek(); !ok {
+ return errEmptyJSONContextStack
+ } else if cxt == _CONTEXT_IN_TOPLEVEL {
return nil
}
return p.ParsePostValue()
@@ -1315,7 +1344,7 @@ func (p *TSimpleJSONProtocol) readNumeric() (Numeric, error) {
func (p *TSimpleJSONProtocol) safePeekContains(b []byte) bool {
for i := 0; i < len(b); i++ {
a, _ := p.reader.Peek(i + 1)
- if len(a) == 0 || a[i] != b[i] {
+ if len(a) < (i+1) || a[i] != b[i] {
return false
}
}
@@ -1324,8 +1353,8 @@ func (p *TSimpleJSONProtocol) safePeekContains(b []byte) bool {
// Reset the context stack to its initial state.
func (p *TSimpleJSONProtocol) resetContextStack() {
- p.parseContextStack = []int{int(_CONTEXT_IN_TOPLEVEL)}
- p.dumpContext = []int{int(_CONTEXT_IN_TOPLEVEL)}
+ p.parseContextStack = jsonContextStack{_CONTEXT_IN_TOPLEVEL}
+ p.dumpContext = jsonContextStack{_CONTEXT_IN_TOPLEVEL}
}
func (p *TSimpleJSONProtocol) write(b []byte) (int, error) {
@@ -1335,3 +1364,10 @@ func (p *TSimpleJSONProtocol) write(b []byte) (int, error) {
}
return n, err
}
+
+// SetTConfiguration implements TConfigurationSetter for propagation.
+func (p *TSimpleJSONProtocol) SetTConfiguration(conf *TConfiguration) {
+ PropagateTConfiguration(p.trans, conf)
+}
+
+var _ TConfigurationSetter = (*TSimpleJSONProtocol)(nil)
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/simple_server.go b/vendor/github.com/uber/jaeger-client-go/thrift/simple_server.go
new file mode 100644
index 000000000..563cbfc69
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/simple_server.go
@@ -0,0 +1,332 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+// ErrAbandonRequest is a special error server handler implementations can
+// return to indicate that the request has been abandoned.
+//
+// TSimpleServer will check for this error, and close the client connection
+// instead of writing the response/error back to the client.
+//
+// It shall only be used when the server handler implementation know that the
+// client already abandoned the request (by checking that the passed in context
+// is already canceled, for example).
+var ErrAbandonRequest = errors.New("request abandoned")
+
+// ServerConnectivityCheckInterval defines the ticker interval used by
+// connectivity check in thrift compiled TProcessorFunc implementations.
+//
+// It's defined as a variable instead of constant, so that thrift server
+// implementations can change its value to control the behavior.
+//
+// If it's changed to <=0, the feature will be disabled.
+var ServerConnectivityCheckInterval = time.Millisecond * 5
+
+/*
+ * This is not a typical TSimpleServer as it is not blocked after accept a socket.
+ * It is more like a TThreadedServer that can handle different connections in different goroutines.
+ * This will work if golang user implements a conn-pool like thing in client side.
+ */
+type TSimpleServer struct {
+ closed int32
+ wg sync.WaitGroup
+ mu sync.Mutex
+
+ processorFactory TProcessorFactory
+ serverTransport TServerTransport
+ inputTransportFactory TTransportFactory
+ outputTransportFactory TTransportFactory
+ inputProtocolFactory TProtocolFactory
+ outputProtocolFactory TProtocolFactory
+
+ // Headers to auto forward in THeaderProtocol
+ forwardHeaders []string
+
+ logger Logger
+}
+
+func NewTSimpleServer2(processor TProcessor, serverTransport TServerTransport) *TSimpleServer {
+ return NewTSimpleServerFactory2(NewTProcessorFactory(processor), serverTransport)
+}
+
+func NewTSimpleServer4(processor TProcessor, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer {
+ return NewTSimpleServerFactory4(NewTProcessorFactory(processor),
+ serverTransport,
+ transportFactory,
+ protocolFactory,
+ )
+}
+
+func NewTSimpleServer6(processor TProcessor, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer {
+ return NewTSimpleServerFactory6(NewTProcessorFactory(processor),
+ serverTransport,
+ inputTransportFactory,
+ outputTransportFactory,
+ inputProtocolFactory,
+ outputProtocolFactory,
+ )
+}
+
+func NewTSimpleServerFactory2(processorFactory TProcessorFactory, serverTransport TServerTransport) *TSimpleServer {
+ return NewTSimpleServerFactory6(processorFactory,
+ serverTransport,
+ NewTTransportFactory(),
+ NewTTransportFactory(),
+ NewTBinaryProtocolFactoryDefault(),
+ NewTBinaryProtocolFactoryDefault(),
+ )
+}
+
+func NewTSimpleServerFactory4(processorFactory TProcessorFactory, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer {
+ return NewTSimpleServerFactory6(processorFactory,
+ serverTransport,
+ transportFactory,
+ transportFactory,
+ protocolFactory,
+ protocolFactory,
+ )
+}
+
+func NewTSimpleServerFactory6(processorFactory TProcessorFactory, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer {
+ return &TSimpleServer{
+ processorFactory: processorFactory,
+ serverTransport: serverTransport,
+ inputTransportFactory: inputTransportFactory,
+ outputTransportFactory: outputTransportFactory,
+ inputProtocolFactory: inputProtocolFactory,
+ outputProtocolFactory: outputProtocolFactory,
+ }
+}
+
+func (p *TSimpleServer) ProcessorFactory() TProcessorFactory {
+ return p.processorFactory
+}
+
+func (p *TSimpleServer) ServerTransport() TServerTransport {
+ return p.serverTransport
+}
+
+func (p *TSimpleServer) InputTransportFactory() TTransportFactory {
+ return p.inputTransportFactory
+}
+
+func (p *TSimpleServer) OutputTransportFactory() TTransportFactory {
+ return p.outputTransportFactory
+}
+
+func (p *TSimpleServer) InputProtocolFactory() TProtocolFactory {
+ return p.inputProtocolFactory
+}
+
+func (p *TSimpleServer) OutputProtocolFactory() TProtocolFactory {
+ return p.outputProtocolFactory
+}
+
+func (p *TSimpleServer) Listen() error {
+ return p.serverTransport.Listen()
+}
+
+// SetForwardHeaders sets the list of header keys that will be auto forwarded
+// while using THeaderProtocol.
+//
+// "forward" means that when the server is also a client to other upstream
+// thrift servers, the context object user gets in the processor functions will
+// have both read and write headers set, with write headers being forwarded.
+// Users can always override the write headers by calling SetWriteHeaderList
+// before calling thrift client functions.
+func (p *TSimpleServer) SetForwardHeaders(headers []string) {
+ size := len(headers)
+ if size == 0 {
+ p.forwardHeaders = nil
+ return
+ }
+
+ keys := make([]string, size)
+ copy(keys, headers)
+ p.forwardHeaders = keys
+}
+
+// SetLogger sets the logger used by this TSimpleServer.
+//
+// If no logger was set before Serve is called, a default logger using standard
+// log library will be used.
+func (p *TSimpleServer) SetLogger(logger Logger) {
+ p.logger = logger
+}
+
+func (p *TSimpleServer) innerAccept() (int32, error) {
+ client, err := p.serverTransport.Accept()
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ closed := atomic.LoadInt32(&p.closed)
+ if closed != 0 {
+ return closed, nil
+ }
+ if err != nil {
+ return 0, err
+ }
+ if client != nil {
+ p.wg.Add(1)
+ go func() {
+ defer p.wg.Done()
+ if err := p.processRequests(client); err != nil {
+ p.logger(fmt.Sprintf("error processing request: %v", err))
+ }
+ }()
+ }
+ return 0, nil
+}
+
+func (p *TSimpleServer) AcceptLoop() error {
+ for {
+ closed, err := p.innerAccept()
+ if err != nil {
+ return err
+ }
+ if closed != 0 {
+ return nil
+ }
+ }
+}
+
+func (p *TSimpleServer) Serve() error {
+ p.logger = fallbackLogger(p.logger)
+
+ err := p.Listen()
+ if err != nil {
+ return err
+ }
+ p.AcceptLoop()
+ return nil
+}
+
+func (p *TSimpleServer) Stop() error {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if atomic.LoadInt32(&p.closed) != 0 {
+ return nil
+ }
+ atomic.StoreInt32(&p.closed, 1)
+ p.serverTransport.Interrupt()
+ p.wg.Wait()
+ return nil
+}
+
+// If err is actually EOF, return nil, otherwise return err as-is.
+func treatEOFErrorsAsNil(err error) error {
+ if err == nil {
+ return nil
+ }
+ if errors.Is(err, io.EOF) {
+ return nil
+ }
+ var te TTransportException
+ if errors.As(err, &te) && te.TypeId() == END_OF_FILE {
+ return nil
+ }
+ return err
+}
+
+func (p *TSimpleServer) processRequests(client TTransport) (err error) {
+ defer func() {
+ err = treatEOFErrorsAsNil(err)
+ }()
+
+ processor := p.processorFactory.GetProcessor(client)
+ inputTransport, err := p.inputTransportFactory.GetTransport(client)
+ if err != nil {
+ return err
+ }
+ inputProtocol := p.inputProtocolFactory.GetProtocol(inputTransport)
+ var outputTransport TTransport
+ var outputProtocol TProtocol
+
+ // for THeaderProtocol, we must use the same protocol instance for
+ // input and output so that the response is in the same dialect that
+ // the server detected the request was in.
+ headerProtocol, ok := inputProtocol.(*THeaderProtocol)
+ if ok {
+ outputProtocol = inputProtocol
+ } else {
+ oTrans, err := p.outputTransportFactory.GetTransport(client)
+ if err != nil {
+ return err
+ }
+ outputTransport = oTrans
+ outputProtocol = p.outputProtocolFactory.GetProtocol(outputTransport)
+ }
+
+ if inputTransport != nil {
+ defer inputTransport.Close()
+ }
+ if outputTransport != nil {
+ defer outputTransport.Close()
+ }
+ for {
+ if atomic.LoadInt32(&p.closed) != 0 {
+ return nil
+ }
+
+ ctx := SetResponseHelper(
+ defaultCtx,
+ TResponseHelper{
+ THeaderResponseHelper: NewTHeaderResponseHelper(outputProtocol),
+ },
+ )
+ if headerProtocol != nil {
+ // We need to call ReadFrame here, otherwise we won't
+ // get any headers on the AddReadTHeaderToContext call.
+ //
+ // ReadFrame is safe to be called multiple times so it
+ // won't break when it's called again later when we
+ // actually start to read the message.
+ if err := headerProtocol.ReadFrame(ctx); err != nil {
+ return err
+ }
+ ctx = AddReadTHeaderToContext(ctx, headerProtocol.GetReadHeaders())
+ ctx = SetWriteHeaderList(ctx, p.forwardHeaders)
+ }
+
+ ok, err := processor.Process(ctx, inputProtocol, outputProtocol)
+ if errors.Is(err, ErrAbandonRequest) {
+ return client.Close()
+ }
+ if errors.As(err, new(TTransportException)) && err != nil {
+ return err
+ }
+ var tae TApplicationException
+ if errors.As(err, &tae) && tae.TypeId() == UNKNOWN_METHOD {
+ continue
+ }
+ if !ok {
+ break
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/transport.go b/vendor/github.com/uber/jaeger-client-go/thrift/transport.go
index 453899651..ba2738a8d 100644
--- a/vendor/github.com/uber/jaeger-client-go/thrift/transport.go
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/transport.go
@@ -20,6 +20,7 @@
package thrift
import (
+ "context"
"errors"
"io"
)
@@ -30,15 +31,18 @@ type Flusher interface {
Flush() (err error)
}
+type ContextFlusher interface {
+ Flush(ctx context.Context) (err error)
+}
+
type ReadSizeProvider interface {
RemainingBytes() (num_bytes uint64)
}
-
// Encapsulates the I/O layer
type TTransport interface {
io.ReadWriteCloser
- Flusher
+ ContextFlusher
ReadSizeProvider
// Opens the transport for communication
@@ -52,7 +56,6 @@ type stringWriter interface {
WriteString(s string) (n int, err error)
}
-
// This is "enchanced" transport with extra capabilities. You need to use one of these
// to construct protocol.
// Notably, TSocket does not implement this interface, and it is always a mistake to use
@@ -62,7 +65,6 @@ type TRichTransport interface {
io.ByteReader
io.ByteWriter
stringWriter
- Flusher
+ ContextFlusher
ReadSizeProvider
}
-
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/transport_exception.go b/vendor/github.com/uber/jaeger-client-go/thrift/transport_exception.go
index 9505b4461..0a3f07646 100644
--- a/vendor/github.com/uber/jaeger-client-go/thrift/transport_exception.go
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/transport_exception.go
@@ -46,6 +46,13 @@ const (
type tTransportException struct {
typeId int
err error
+ msg string
+}
+
+var _ TTransportException = (*tTransportException)(nil)
+
+func (tTransportException) TExceptionType() TExceptionType {
+ return TExceptionTypeTransport
}
func (p *tTransportException) TypeId() int {
@@ -53,15 +60,27 @@ func (p *tTransportException) TypeId() int {
}
func (p *tTransportException) Error() string {
- return p.err.Error()
+ return p.msg
}
func (p *tTransportException) Err() error {
return p.err
}
+func (p *tTransportException) Unwrap() error {
+ return p.err
+}
+
+func (p *tTransportException) Timeout() bool {
+ return p.typeId == TIMED_OUT
+}
+
func NewTTransportException(t int, e string) TTransportException {
- return &tTransportException{typeId: t, err: errors.New(e)}
+ return &tTransportException{
+ typeId: t,
+ err: errors.New(e),
+ msg: e,
+ }
}
func NewTTransportExceptionFromError(e error) TTransportException {
@@ -73,18 +92,40 @@ func NewTTransportExceptionFromError(e error) TTransportException {
return t
}
- switch v := e.(type) {
- case TTransportException:
- return v
- case timeoutable:
- if v.Timeout() {
- return &tTransportException{typeId: TIMED_OUT, err: e}
- }
+ te := &tTransportException{
+ typeId: UNKNOWN_TRANSPORT_EXCEPTION,
+ err: e,
+ msg: e.Error(),
}
- if e == io.EOF {
- return &tTransportException{typeId: END_OF_FILE, err: e}
+ if isTimeoutError(e) {
+ te.typeId = TIMED_OUT
+ return te
}
- return &tTransportException{typeId: UNKNOWN_TRANSPORT_EXCEPTION, err: e}
+ if errors.Is(e, io.EOF) {
+ te.typeId = END_OF_FILE
+ return te
+ }
+
+ return te
+}
+
+func prependTTransportException(prepend string, e TTransportException) TTransportException {
+ return &tTransportException{
+ typeId: e.TypeId(),
+ err: e,
+ msg: prepend + e.Error(),
+ }
+}
+
+// isTimeoutError returns true when err is an error caused by timeout.
+//
+// Note that this also includes TTransportException wrapped timeout errors.
+func isTimeoutError(err error) bool {
+ var t timeoutable
+ if errors.As(err, &t) {
+ return t.Timeout()
+ }
+ return false
}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/transport_factory.go b/vendor/github.com/uber/jaeger-client-go/thrift/transport_factory.go
index 533d1b437..c80580794 100644
--- a/vendor/github.com/uber/jaeger-client-go/thrift/transport_factory.go
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/transport_factory.go
@@ -24,14 +24,14 @@ package thrift
// a ServerTransport and then may want to mutate them (i.e. create
// a BufferedTransport from the underlying base transport)
type TTransportFactory interface {
- GetTransport(trans TTransport) TTransport
+ GetTransport(trans TTransport) (TTransport, error)
}
type tTransportFactory struct{}
// Return a wrapped instance of the base Transport.
-func (p *tTransportFactory) GetTransport(trans TTransport) TTransport {
- return trans
+func (p *tTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
+ return trans, nil
}
func NewTTransportFactory() TTransportFactory {
diff --git a/vendor/github.com/uber/jaeger-client-go/utils/udp_client.go b/vendor/github.com/uber/jaeger-client-go/utils/udp_client.go
index 2352643ce..4c59ae9dd 100644
--- a/vendor/github.com/uber/jaeger-client-go/utils/udp_client.go
+++ b/vendor/github.com/uber/jaeger-client-go/utils/udp_client.go
@@ -15,6 +15,7 @@
package utils
import (
+ "context"
"errors"
"fmt"
"io"
@@ -124,15 +125,14 @@ func NewAgentClientUDP(hostPort string, maxPacketSize int) (*AgentClientUDP, err
}
// EmitZipkinBatch implements EmitZipkinBatch() of Agent interface
-func (a *AgentClientUDP) EmitZipkinBatch(spans []*zipkincore.Span) error {
+func (a *AgentClientUDP) EmitZipkinBatch(context.Context, []*zipkincore.Span) error {
return errors.New("Not implemented")
}
// EmitBatch implements EmitBatch() of Agent interface
-func (a *AgentClientUDP) EmitBatch(batch *jaeger.Batch) error {
+func (a *AgentClientUDP) EmitBatch(ctx context.Context, batch *jaeger.Batch) error {
a.thriftBuffer.Reset()
- a.client.SeqId = 0 // we have no need for distinct SeqIds for our one-way UDP messages
- if err := a.client.EmitBatch(batch); err != nil {
+ if err := a.client.EmitBatch(ctx, batch); err != nil {
return err
}
if a.thriftBuffer.Len() > a.maxPacketSize {
diff --git a/vendor/github.com/willf/bitset/go.mod b/vendor/github.com/willf/bitset/go.mod
deleted file mode 100644
index 583ecab78..000000000
--- a/vendor/github.com/willf/bitset/go.mod
+++ /dev/null
@@ -1,3 +0,0 @@
-module github.com/willf/bitset
-
-go 1.14
diff --git a/vendor/go.etcd.io/bbolt/.gitignore b/vendor/go.etcd.io/bbolt/.gitignore
index 3bcd8cbaf..18312f004 100644
--- a/vendor/go.etcd.io/bbolt/.gitignore
+++ b/vendor/go.etcd.io/bbolt/.gitignore
@@ -3,3 +3,5 @@
*.swp
/bin/
cover.out
+/.idea
+*.iml
diff --git a/vendor/go.etcd.io/bbolt/.travis.yml b/vendor/go.etcd.io/bbolt/.travis.yml
index 257dfdfee..452601e49 100644
--- a/vendor/go.etcd.io/bbolt/.travis.yml
+++ b/vendor/go.etcd.io/bbolt/.travis.yml
@@ -4,9 +4,10 @@ go_import_path: go.etcd.io/bbolt
sudo: false
go:
-- 1.12
+- 1.15
before_install:
+- go get -v golang.org/x/sys/unix
- go get -v honnef.co/go/tools/...
- go get -v github.com/kisielk/errcheck
diff --git a/vendor/go.etcd.io/bbolt/Makefile b/vendor/go.etcd.io/bbolt/Makefile
index 2968aaa61..21ecf48f6 100644
--- a/vendor/go.etcd.io/bbolt/Makefile
+++ b/vendor/go.etcd.io/bbolt/Makefile
@@ -2,8 +2,6 @@ BRANCH=`git rev-parse --abbrev-ref HEAD`
COMMIT=`git rev-parse --short HEAD`
GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)"
-default: build
-
race:
@TEST_FREELIST_TYPE=hashmap go test -v -race -test.run="TestSimulate_(100op|1000op)"
@echo "array freelist test"
diff --git a/vendor/go.etcd.io/bbolt/README.md b/vendor/go.etcd.io/bbolt/README.md
index c9e64b1a6..f1b4a7b2b 100644
--- a/vendor/go.etcd.io/bbolt/README.md
+++ b/vendor/go.etcd.io/bbolt/README.md
@@ -908,12 +908,14 @@ Below is a list of public, open source projects that use Bolt:
* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt.
* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners.
* [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files.
+* [BoltDB Viewer](https://github.com/zc310/rich_boltdb) - A BoltDB Viewer Can run on Windows、Linux、Android system.
* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend.
* [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet.
* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining
simple tx and key scans.
* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend.
* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations.
+* [🌰 Chestnut](https://github.com/jrapoport/chestnut) - Chestnut is encrypted storage for Go.
* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware.
* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb.
* [dcrwallet](https://github.com/decred/dcrwallet) - A wallet for the Decred cryptocurrency.
@@ -938,9 +940,8 @@ Below is a list of public, open source projects that use Bolt:
* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite.
* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files.
* [NATS](https://github.com/nats-io/nats-streaming-server) - NATS Streaming uses bbolt for message and metadata storage.
-* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard.
-* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site.
* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system.
+* [Rain](https://github.com/cenkalti/rain) - BitTorrent client and library.
* [reef-pi](https://github.com/reef-pi/reef-pi) - reef-pi is an award winning, modular, DIY reef tank controller using easy to learn electronics based on a Raspberry Pi.
* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service
* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read.
diff --git a/vendor/go.etcd.io/bbolt/bolt_unix.go b/vendor/go.etcd.io/bbolt/bolt_unix.go
index 2938fed58..4e5f65ccc 100644
--- a/vendor/go.etcd.io/bbolt/bolt_unix.go
+++ b/vendor/go.etcd.io/bbolt/bolt_unix.go
@@ -7,6 +7,8 @@ import (
"syscall"
"time"
"unsafe"
+
+ "golang.org/x/sys/unix"
)
// flock acquires an advisory lock on a file descriptor.
@@ -49,13 +51,13 @@ func funlock(db *DB) error {
// mmap memory maps a DB's data file.
func mmap(db *DB, sz int) error {
// Map the data file to memory.
- b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
+ b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
if err != nil {
return err
}
// Advise the kernel that the mmap is accessed randomly.
- err = madvise(b, syscall.MADV_RANDOM)
+ err = unix.Madvise(b, syscall.MADV_RANDOM)
if err != nil && err != syscall.ENOSYS {
// Ignore not implemented error in kernel because it still works.
return fmt.Errorf("madvise: %s", err)
@@ -76,18 +78,9 @@ func munmap(db *DB) error {
}
// Unmap using the original byte slice.
- err := syscall.Munmap(db.dataref)
+ err := unix.Munmap(db.dataref)
db.dataref = nil
db.data = nil
db.datasz = 0
return err
}
-
-// NOTE: This function is copied from stdlib because it is not available on darwin.
-func madvise(b []byte, advice int) (err error) {
- _, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), uintptr(advice))
- if e1 != 0 {
- err = e1
- }
- return
-}
diff --git a/vendor/go.etcd.io/bbolt/compact.go b/vendor/go.etcd.io/bbolt/compact.go
new file mode 100644
index 000000000..e4fe91b04
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/compact.go
@@ -0,0 +1,114 @@
+package bbolt
+
+// Compact will create a copy of the source DB and in the destination DB. This may
+// reclaim space that the source database no longer has use for. txMaxSize can be
+// used to limit the transactions size of this process and may trigger intermittent
+// commits. A value of zero will ignore transaction sizes.
+// TODO: merge with: https://github.com/etcd-io/etcd/blob/b7f0f52a16dbf83f18ca1d803f7892d750366a94/mvcc/backend/backend.go#L349
+func Compact(dst, src *DB, txMaxSize int64) error {
+ // commit regularly, or we'll run out of memory for large datasets if using one transaction.
+ var size int64
+ tx, err := dst.Begin(true)
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+
+ if err := walk(src, func(keys [][]byte, k, v []byte, seq uint64) error {
+ // On each key/value, check if we have exceeded tx size.
+ sz := int64(len(k) + len(v))
+ if size+sz > txMaxSize && txMaxSize != 0 {
+ // Commit previous transaction.
+ if err := tx.Commit(); err != nil {
+ return err
+ }
+
+ // Start new transaction.
+ tx, err = dst.Begin(true)
+ if err != nil {
+ return err
+ }
+ size = 0
+ }
+ size += sz
+
+ // Create bucket on the root transaction if this is the first level.
+ nk := len(keys)
+ if nk == 0 {
+ bkt, err := tx.CreateBucket(k)
+ if err != nil {
+ return err
+ }
+ if err := bkt.SetSequence(seq); err != nil {
+ return err
+ }
+ return nil
+ }
+
+ // Create buckets on subsequent levels, if necessary.
+ b := tx.Bucket(keys[0])
+ if nk > 1 {
+ for _, k := range keys[1:] {
+ b = b.Bucket(k)
+ }
+ }
+
+ // Fill the entire page for best compaction.
+ b.FillPercent = 1.0
+
+ // If there is no value then this is a bucket call.
+ if v == nil {
+ bkt, err := b.CreateBucket(k)
+ if err != nil {
+ return err
+ }
+ if err := bkt.SetSequence(seq); err != nil {
+ return err
+ }
+ return nil
+ }
+
+ // Otherwise treat it as a key/value pair.
+ return b.Put(k, v)
+ }); err != nil {
+ return err
+ }
+
+ return tx.Commit()
+}
+
+// walkFunc is the type of the function called for keys (buckets and "normal"
+// values) discovered by Walk. keys is the list of keys to descend to the bucket
+// owning the discovered key/value pair k/v.
+type walkFunc func(keys [][]byte, k, v []byte, seq uint64) error
+
+// walk walks recursively the bolt database db, calling walkFn for each key it finds.
+func walk(db *DB, walkFn walkFunc) error {
+ return db.View(func(tx *Tx) error {
+ return tx.ForEach(func(name []byte, b *Bucket) error {
+ return walkBucket(b, nil, name, nil, b.Sequence(), walkFn)
+ })
+ })
+}
+
+func walkBucket(b *Bucket, keypath [][]byte, k, v []byte, seq uint64, fn walkFunc) error {
+ // Execute callback.
+ if err := fn(keypath, k, v, seq); err != nil {
+ return err
+ }
+
+ // If this is not a bucket then stop.
+ if v != nil {
+ return nil
+ }
+
+ // Iterate over each child key/value.
+ keypath = append(keypath, k)
+ return b.ForEach(func(k, v []byte) error {
+ if v == nil {
+ bkt := b.Bucket(k)
+ return walkBucket(bkt, keypath, k, nil, bkt.Sequence(), fn)
+ }
+ return walkBucket(b, keypath, k, v, b.Sequence(), fn)
+ })
+}
diff --git a/vendor/go.etcd.io/bbolt/db.go b/vendor/go.etcd.io/bbolt/db.go
index 80b0095cc..a798c390a 100644
--- a/vendor/go.etcd.io/bbolt/db.go
+++ b/vendor/go.etcd.io/bbolt/db.go
@@ -120,6 +120,12 @@ type DB struct {
// of truncate() and fsync() when growing the data file.
AllocSize int
+ // Mlock locks database file in memory when set to true.
+ // It prevents major page faults, however used memory can't be reclaimed.
+ //
+ // Supported only on Unix via mlock/munlock syscalls.
+ Mlock bool
+
path string
openFile func(string, int, os.FileMode) (*os.File, error)
file *os.File
@@ -188,6 +194,7 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
db.MmapFlags = options.MmapFlags
db.NoFreelistSync = options.NoFreelistSync
db.FreelistType = options.FreelistType
+ db.Mlock = options.Mlock
// Set default values for later DB operations.
db.MaxBatchSize = DefaultMaxBatchSize
@@ -337,7 +344,8 @@ func (db *DB) mmap(minsz int) error {
}
// Ensure the size is at least the minimum size.
- var size = int(info.Size())
+ fileSize := int(info.Size())
+ var size = fileSize
if size < minsz {
size = minsz
}
@@ -346,6 +354,13 @@ func (db *DB) mmap(minsz int) error {
return err
}
+ if db.Mlock {
+ // Unlock db memory
+ if err := db.munlock(fileSize); err != nil {
+ return err
+ }
+ }
+
// Dereference all mmap references before unmapping.
if db.rwtx != nil {
db.rwtx.root.dereference()
@@ -361,6 +376,13 @@ func (db *DB) mmap(minsz int) error {
return err
}
+ if db.Mlock {
+ // Don't allow swapping of data file
+ if err := db.mlock(fileSize); err != nil {
+ return err
+ }
+ }
+
// Save references to the meta pages.
db.meta0 = db.page(0).meta()
db.meta1 = db.page(1).meta()
@@ -422,12 +444,36 @@ func (db *DB) mmapSize(size int) (int, error) {
return int(sz), nil
}
+func (db *DB) munlock(fileSize int) error {
+ if err := munlock(db, fileSize); err != nil {
+ return fmt.Errorf("munlock error: " + err.Error())
+ }
+ return nil
+}
+
+func (db *DB) mlock(fileSize int) error {
+ if err := mlock(db, fileSize); err != nil {
+ return fmt.Errorf("mlock error: " + err.Error())
+ }
+ return nil
+}
+
+func (db *DB) mrelock(fileSizeFrom, fileSizeTo int) error {
+ if err := db.munlock(fileSizeFrom); err != nil {
+ return err
+ }
+ if err := db.mlock(fileSizeTo); err != nil {
+ return err
+ }
+ return nil
+}
+
// init creates a new database file and initializes its meta pages.
func (db *DB) init() error {
// Create two meta pages on a buffer.
buf := make([]byte, db.pageSize*4)
for i := 0; i < 2; i++ {
- p := db.pageInBuffer(buf[:], pgid(i))
+ p := db.pageInBuffer(buf, pgid(i))
p.id = pgid(i)
p.flags = metaPageFlag
@@ -444,13 +490,13 @@ func (db *DB) init() error {
}
// Write an empty freelist at page 3.
- p := db.pageInBuffer(buf[:], pgid(2))
+ p := db.pageInBuffer(buf, pgid(2))
p.id = pgid(2)
p.flags = freelistPageFlag
p.count = 0
// Write an empty leaf page at page 4.
- p = db.pageInBuffer(buf[:], pgid(3))
+ p = db.pageInBuffer(buf, pgid(3))
p.id = pgid(3)
p.flags = leafPageFlag
p.count = 0
@@ -462,6 +508,7 @@ func (db *DB) init() error {
if err := fdatasync(db); err != nil {
return err
}
+ db.filesz = len(buf)
return nil
}
@@ -973,6 +1020,12 @@ func (db *DB) grow(sz int) error {
if err := db.file.Sync(); err != nil {
return fmt.Errorf("file sync error: %s", err)
}
+ if db.Mlock {
+ // unlock old file and lock new one
+ if err := db.mrelock(db.filesz, sz); err != nil {
+ return fmt.Errorf("mlock/munlock error: %s", err)
+ }
+ }
}
db.filesz = sz
@@ -1064,6 +1117,11 @@ type Options struct {
// OpenFile is used to open files. It defaults to os.OpenFile. This option
// is useful for writing hermetic tests.
OpenFile func(string, int, os.FileMode) (*os.File, error)
+
+ // Mlock locks database file in memory when set to true.
+ // It prevents potential page faults, however
+ // used memory can't be reclaimed. (UNIX only)
+ Mlock bool
}
// DefaultOptions represent the options used if nil options are passed into Open().
diff --git a/vendor/go.etcd.io/bbolt/freelist_hmap.go b/vendor/go.etcd.io/bbolt/freelist_hmap.go
index 02ef2be04..dbd67a1e7 100644
--- a/vendor/go.etcd.io/bbolt/freelist_hmap.go
+++ b/vendor/go.etcd.io/bbolt/freelist_hmap.go
@@ -4,7 +4,7 @@ import "sort"
// hashmapFreeCount returns count of free pages(hashmap version)
func (f *freelist) hashmapFreeCount() int {
- // use the forwardmap to get the total count
+ // use the forwardMap to get the total count
count := 0
for _, size := range f.forwardMap {
count += int(size)
@@ -41,7 +41,7 @@ func (f *freelist) hashmapAllocate(txid txid, n int) pgid {
for pid := range bm {
// remove the initial
- f.delSpan(pid, uint64(size))
+ f.delSpan(pid, size)
f.allocs[pid] = txid
@@ -51,7 +51,7 @@ func (f *freelist) hashmapAllocate(txid txid, n int) pgid {
f.addSpan(pid+pgid(n), remain)
for i := pgid(0); i < pgid(n); i++ {
- delete(f.cache, pid+pgid(i))
+ delete(f.cache, pid+i)
}
return pid
}
diff --git a/vendor/go.etcd.io/bbolt/go.mod b/vendor/go.etcd.io/bbolt/go.mod
index c2366daef..96355a69b 100644
--- a/vendor/go.etcd.io/bbolt/go.mod
+++ b/vendor/go.etcd.io/bbolt/go.mod
@@ -2,4 +2,4 @@ module go.etcd.io/bbolt
go 1.12
-require golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5
+require golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d
diff --git a/vendor/go.etcd.io/bbolt/go.sum b/vendor/go.etcd.io/bbolt/go.sum
index 4ad15a488..c13f8f470 100644
--- a/vendor/go.etcd.io/bbolt/go.sum
+++ b/vendor/go.etcd.io/bbolt/go.sum
@@ -1,2 +1,2 @@
-golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5 h1:LfCXLvNmTYH9kEmVgqbnsWfruoXZIrh4YBgqVHtDvw0=
-golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d h1:L/IKR6COd7ubZrs2oTnTi73IhgqJ71c9s80WsQnh0Es=
+golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
diff --git a/vendor/go.etcd.io/bbolt/mlock_unix.go b/vendor/go.etcd.io/bbolt/mlock_unix.go
new file mode 100644
index 000000000..6a6c7b353
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/mlock_unix.go
@@ -0,0 +1,36 @@
+// +build !windows
+
+package bbolt
+
+import "golang.org/x/sys/unix"
+
+// mlock locks memory of db file
+func mlock(db *DB, fileSize int) error {
+ sizeToLock := fileSize
+ if sizeToLock > db.datasz {
+ // Can't lock more than mmaped slice
+ sizeToLock = db.datasz
+ }
+ if err := unix.Mlock(db.dataref[:sizeToLock]); err != nil {
+ return err
+ }
+ return nil
+}
+
+//munlock unlocks memory of db file
+func munlock(db *DB, fileSize int) error {
+ if db.dataref == nil {
+ return nil
+ }
+
+ sizeToUnlock := fileSize
+ if sizeToUnlock > db.datasz {
+ // Can't unlock more than mmaped slice
+ sizeToUnlock = db.datasz
+ }
+
+ if err := unix.Munlock(db.dataref[:sizeToUnlock]); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/go.etcd.io/bbolt/mlock_windows.go b/vendor/go.etcd.io/bbolt/mlock_windows.go
new file mode 100644
index 000000000..b4a36a493
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/mlock_windows.go
@@ -0,0 +1,11 @@
+package bbolt
+
+// mlock locks memory of db file
+func mlock(_ *DB, _ int) error {
+ panic("mlock is supported only on UNIX systems")
+}
+
+//munlock unlocks memory of db file
+func munlock(_ *DB, _ int) error {
+ panic("munlock is supported only on UNIX systems")
+}
diff --git a/vendor/go.etcd.io/bbolt/tx.go b/vendor/go.etcd.io/bbolt/tx.go
index 4b1a64a8b..869d41200 100644
--- a/vendor/go.etcd.io/bbolt/tx.go
+++ b/vendor/go.etcd.io/bbolt/tx.go
@@ -188,7 +188,6 @@ func (tx *Tx) Commit() error {
}
// If strict mode is enabled then perform a consistency check.
- // Only the first consistency error is reported in the panic.
if tx.db.StrictMode {
ch := tx.Check()
var errs []string
@@ -393,7 +392,7 @@ func (tx *Tx) CopyFile(path string, mode os.FileMode) error {
return err
}
- err = tx.Copy(f)
+ _, err = tx.WriteTo(f)
if err != nil {
_ = f.Close()
return err
diff --git a/vendor/modules.txt b/vendor/modules.txt
index c5d79a9b7..786096f45 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -36,6 +36,8 @@ github.com/VividCortex/ewma
github.com/acarl005/stripansi
# github.com/beorn7/perks v1.0.1
github.com/beorn7/perks/quantile
+# github.com/bits-and-blooms/bitset v1.2.0
+github.com/bits-and-blooms/bitset
# github.com/blang/semver v3.5.1+incompatible
github.com/blang/semver
# github.com/buger/goterm v0.0.0-20181115115552-c206103e1f37
@@ -91,7 +93,7 @@ github.com/containers/buildah/pkg/overlay
github.com/containers/buildah/pkg/parse
github.com/containers/buildah/pkg/rusage
github.com/containers/buildah/util
-# github.com/containers/common v0.38.4
+# github.com/containers/common v0.39.1-0.20210527140106-e5800a20386a
github.com/containers/common/libimage
github.com/containers/common/libimage/manifests
github.com/containers/common/pkg/apparmor
@@ -102,6 +104,7 @@ github.com/containers/common/pkg/cgroupv2
github.com/containers/common/pkg/chown
github.com/containers/common/pkg/completion
github.com/containers/common/pkg/config
+github.com/containers/common/pkg/defaultnet
github.com/containers/common/pkg/filters
github.com/containers/common/pkg/manifests
github.com/containers/common/pkg/parse
@@ -192,7 +195,7 @@ github.com/containers/psgo/internal/dev
github.com/containers/psgo/internal/host
github.com/containers/psgo/internal/proc
github.com/containers/psgo/internal/process
-# github.com/containers/storage v1.31.2
+# github.com/containers/storage v1.32.1
github.com/containers/storage
github.com/containers/storage/drivers
github.com/containers/storage/drivers/aufs
@@ -277,7 +280,7 @@ github.com/docker/distribution/registry/client/auth/challenge
github.com/docker/distribution/registry/client/transport
github.com/docker/distribution/registry/storage/cache
github.com/docker/distribution/registry/storage/cache/memory
-# github.com/docker/docker v20.10.6+incompatible
+# github.com/docker/docker v20.10.7+incompatible
github.com/docker/docker/api
github.com/docker/docker/api/types
github.com/docker/docker/api/types/blkiodev
@@ -394,7 +397,7 @@ github.com/json-iterator/go
# github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a
github.com/juju/ansiterm
github.com/juju/ansiterm/tabwriter
-# github.com/klauspost/compress v1.12.2
+# github.com/klauspost/compress v1.12.3
github.com/klauspost/compress/flate
github.com/klauspost/compress/fse
github.com/klauspost/compress/huff0
@@ -445,7 +448,7 @@ github.com/nxadm/tail/ratelimiter
github.com/nxadm/tail/util
github.com/nxadm/tail/watch
github.com/nxadm/tail/winfile
-# github.com/onsi/ginkgo v1.16.2
+# github.com/onsi/ginkgo v1.16.4
github.com/onsi/ginkgo
github.com/onsi/ginkgo/config
github.com/onsi/ginkgo/extensions/table
@@ -475,13 +478,14 @@ github.com/onsi/ginkgo/reporters/stenographer
github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable
github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty
github.com/onsi/ginkgo/types
-# github.com/onsi/gomega v1.12.0
+# github.com/onsi/gomega v1.13.0
github.com/onsi/gomega
github.com/onsi/gomega/format
github.com/onsi/gomega/gbytes
github.com/onsi/gomega/gexec
github.com/onsi/gomega/internal/assertion
github.com/onsi/gomega/internal/asyncassertion
+github.com/onsi/gomega/internal/defaults
github.com/onsi/gomega/internal/oraclematcher
github.com/onsi/gomega/internal/testingtsupport
github.com/onsi/gomega/matchers
@@ -514,7 +518,7 @@ github.com/opencontainers/runtime-tools/generate
github.com/opencontainers/runtime-tools/generate/seccomp
github.com/opencontainers/runtime-tools/specerror
github.com/opencontainers/runtime-tools/validate
-# github.com/opencontainers/selinux v1.8.1
+# github.com/opencontainers/selinux v1.8.2
github.com/opencontainers/selinux/go-selinux
github.com/opencontainers/selinux/go-selinux/label
github.com/opencontainers/selinux/pkg/pwalk
@@ -580,7 +584,7 @@ github.com/stretchr/testify/require
github.com/syndtr/gocapability/capability
# github.com/tchap/go-patricia v2.3.0+incompatible
github.com/tchap/go-patricia/patricia
-# github.com/uber/jaeger-client-go v2.28.0+incompatible
+# github.com/uber/jaeger-client-go v2.29.1+incompatible
github.com/uber/jaeger-client-go/log
github.com/uber/jaeger-client-go/thrift
github.com/uber/jaeger-client-go/thrift-gen/agent
@@ -606,15 +610,13 @@ github.com/vishvananda/netlink
github.com/vishvananda/netlink/nl
# github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae
github.com/vishvananda/netns
-# github.com/willf/bitset v1.1.11
-github.com/willf/bitset
# github.com/xeipuuv/gojsonpointer v0.0.0-20190809123943-df4f5c81cb3b
github.com/xeipuuv/gojsonpointer
# github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415
github.com/xeipuuv/gojsonreference
# github.com/xeipuuv/gojsonschema v1.2.0
github.com/xeipuuv/gojsonschema
-# go.etcd.io/bbolt v1.3.5
+# go.etcd.io/bbolt v1.3.6
go.etcd.io/bbolt
# go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1
go.mozilla.org/pkcs7
diff --git a/version/version.go b/version/version.go
index 1cbd9e309..71292305d 100644
--- a/version/version.go
+++ b/version/version.go
@@ -27,7 +27,7 @@ const (
// NOTE: remember to bump the version at the top
// of the top-level README.md file when this is
// bumped.
-var Version = semver.MustParse("3.2.0-dev")
+var Version = semver.MustParse("3.3.0-dev")
// See https://docs.docker.com/engine/api/v1.40/
// libpod compat handlers are expected to honor docker API versions