summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.cirrus.yml50
-rw-r--r--Dockerfile.ubuntu2
-rw-r--r--Makefile28
-rw-r--r--README.md10
-rw-r--r--RELEASE_NOTES.md14
-rw-r--r--changelog.txt343
-rw-r--r--cmd/podman/auto-update.go20
-rw-r--r--cmd/podman/common/specgen.go28
-rw-r--r--cmd/podman/containers/attach.go4
-rw-r--r--cmd/podman/containers/checkpoint.go2
-rw-r--r--cmd/podman/containers/cleanup.go24
-rw-r--r--cmd/podman/containers/create.go7
-rw-r--r--cmd/podman/containers/exec.go35
-rw-r--r--cmd/podman/containers/port.go4
-rw-r--r--cmd/podman/containers/rm.go11
-rw-r--r--cmd/podman/containers/run.go4
-rw-r--r--cmd/podman/containers/start.go4
-rw-r--r--cmd/podman/containers/top.go20
-rw-r--r--cmd/podman/images/list.go14
-rw-r--r--cmd/podman/images/prune.go6
-rw-r--r--cmd/podman/images/sign.go55
-rw-r--r--cmd/podman/networks/create.go8
-rw-r--r--cmd/podman/networks/inspect.go30
-rw-r--r--cmd/podman/networks/list.go27
-rw-r--r--cmd/podman/networks/network.go2
-rw-r--r--cmd/podman/networks/rm.go2
-rw-r--r--cmd/podman/pods/create.go9
-rw-r--r--cmd/podman/pods/top.go20
-rw-r--r--cmd/podman/root.go17
-rw-r--r--cmd/podman/syslog_linux.go25
-rw-r--r--cmd/podman/syslog_unsupported.go17
-rw-r--r--cmd/podman/system/df.go2
-rw-r--r--cmd/podman/system/events.go4
-rw-r--r--cmd/podman/system/reset.go10
-rw-r--r--cmd/podman/system/service.go2
-rw-r--r--cmd/podman/system/version.go21
-rw-r--r--completions/bash/podman26
-rw-r--r--contrib/cirrus/container_test.sh1
-rwxr-xr-xcontrib/cirrus/integration_test.sh1
-rw-r--r--contrib/cirrus/lib.sh12
-rw-r--r--contrib/cirrus/packer/libpod_images.yml4
-rw-r--r--contrib/cirrus/packer/ubuntu_packaging.sh36
-rwxr-xr-xcontrib/cirrus/setup_environment.sh6
-rw-r--r--contrib/spec/podman.spec.in8
-rw-r--r--docs/source/markdown/links/podman-image-diff.11
-rw-r--r--docs/source/markdown/links/podman-image-search.11
-rw-r--r--docs/source/markdown/podman-auto-update.1.md16
-rw-r--r--docs/source/markdown/podman-container-cleanup.1.md7
-rw-r--r--docs/source/markdown/podman-events.1.md2
-rw-r--r--docs/source/markdown/podman-exec.1.md4
-rw-r--r--docs/source/markdown/podman-info.1.md294
-rw-r--r--docs/source/markdown/podman-network-inspect.1.md14
-rw-r--r--docs/source/markdown/podman-network-ls.1.md18
-rw-r--r--docs/source/markdown/podman-version.1.md13
-rw-r--r--docs/tutorials/podman-derivative-api.md18
-rw-r--r--go.mod35
-rw-r--r--go.sum117
-rwxr-xr-xhack/get_ci_vm.sh34
-rwxr-xr-xhack/podman-registry231
-rw-r--r--hack/podman-registry-go/registry.go98
-rw-r--r--hack/podman-registry-go/registry_test.go40
-rw-r--r--libpod.conf181
-rw-r--r--libpod/boltdb_state_internal.go8
-rw-r--r--libpod/container_api.go5
-rw-r--r--libpod/container_exec.go329
-rw-r--r--libpod/container_internal.go29
-rw-r--r--libpod/container_internal_linux.go2
-rw-r--r--libpod/container_internal_test.go2
-rw-r--r--libpod/define/config.go10
-rw-r--r--libpod/define/healthchecks.go36
-rw-r--r--libpod/define/version.go27
-rw-r--r--libpod/healthcheck.go65
-rw-r--r--libpod/image/filters.go3
-rw-r--r--libpod/oci.go26
-rw-r--r--libpod/oci_conmon_exec_linux.go599
-rw-r--r--libpod/oci_conmon_linux.go323
-rw-r--r--libpod/oci_missing.go12
-rw-r--r--libpod/options.go26
-rw-r--r--libpod/runtime_ctr.go21
-rw-r--r--libpod/util.go9
-rw-r--r--nix/default.nix2
-rw-r--r--pkg/api/handlers/compat/containers_attach.go49
-rw-r--r--pkg/api/handlers/compat/containers_start.go9
-rw-r--r--pkg/api/handlers/compat/containers_stop.go (renamed from pkg/api/handlers/compat/container_start.go)0
-rw-r--r--pkg/api/handlers/compat/exec.go72
-rw-r--r--pkg/api/handlers/compat/ping.go8
-rw-r--r--pkg/api/handlers/compat/resize.go68
-rw-r--r--pkg/api/handlers/compat/version.go42
-rw-r--r--pkg/api/handlers/handler.go6
-rw-r--r--pkg/api/handlers/libpod/containers_create.go3
-rw-r--r--pkg/api/handlers/libpod/healthcheck.go24
-rw-r--r--pkg/api/handlers/libpod/images.go19
-rw-r--r--pkg/api/handlers/libpod/networks.go68
-rw-r--r--pkg/api/handlers/libpod/swagger.go28
-rw-r--r--pkg/api/handlers/libpod/system.go23
-rw-r--r--pkg/api/handlers/types.go5
-rw-r--r--pkg/api/handlers/utils/errors.go8
-rw-r--r--pkg/api/handlers/utils/handler.go86
-rw-r--r--pkg/api/handlers/utils/handler_test.go139
-rw-r--r--pkg/api/handlers/utils/images.go33
-rw-r--r--pkg/api/server/register_containers.go6
-rw-r--r--pkg/api/server/register_exec.go24
-rw-r--r--pkg/api/server/register_images.go6
-rw-r--r--pkg/api/server/register_networks.go100
-rw-r--r--pkg/api/server/register_system.go31
-rw-r--r--pkg/api/server/server.go1
-rw-r--r--pkg/api/server/swagger.go27
-rw-r--r--pkg/api/tags.yaml6
-rw-r--r--pkg/autoupdate/autoupdate.go34
-rw-r--r--pkg/bindings/bindings.go13
-rw-r--r--pkg/bindings/connection.go90
-rw-r--r--pkg/bindings/containers/containers.go243
-rw-r--r--pkg/bindings/images/images.go20
-rw-r--r--pkg/bindings/images/rm.go2
-rw-r--r--pkg/bindings/network/network.go62
-rw-r--r--pkg/bindings/system/system.go40
-rw-r--r--pkg/bindings/test/attach_test.go110
-rw-r--r--pkg/bindings/test/common_test.go2
-rw-r--r--pkg/bindings/test/containers_test.go104
-rw-r--r--pkg/bindings/test/exec_test.go4
-rw-r--r--pkg/bindings/test/images_test.go10
-rw-r--r--pkg/bindings/test/pods_test.go6
-rw-r--r--pkg/bindings/test/system_test.go58
-rw-r--r--pkg/bindings/test/test_suite_test.go5
-rw-r--r--pkg/bindings/test/volumes_test.go2
-rw-r--r--pkg/bindings/version.go3
-rw-r--r--pkg/cgroups/cgroups.go4
-rw-r--r--pkg/domain/entities/auto-update.go6
-rw-r--r--pkg/domain/entities/containers.go4
-rw-r--r--pkg/domain/entities/engine_container.go5
-rw-r--r--pkg/domain/entities/engine_image.go1
-rw-r--r--pkg/domain/entities/engine_system.go2
-rw-r--r--pkg/domain/entities/images.go21
-rw-r--r--pkg/domain/entities/network.go3
-rw-r--r--pkg/domain/infra/abi/auto-update.go8
-rw-r--r--pkg/domain/infra/abi/containers.go85
-rw-r--r--pkg/domain/infra/abi/healthcheck.go7
-rw-r--r--pkg/domain/infra/abi/images.go154
-rw-r--r--pkg/domain/infra/abi/images_list.go24
-rw-r--r--pkg/domain/infra/abi/network.go102
-rw-r--r--pkg/domain/infra/abi/system.go2
-rw-r--r--pkg/domain/infra/abi/terminal/terminal_linux.go14
-rw-r--r--pkg/domain/infra/tunnel/auto-update.go2
-rw-r--r--pkg/domain/infra/tunnel/containers.go132
-rw-r--r--pkg/domain/infra/tunnel/helpers.go4
-rw-r--r--pkg/domain/infra/tunnel/images.go25
-rw-r--r--pkg/domain/infra/tunnel/manifest.go31
-rw-r--r--pkg/domain/infra/tunnel/network.go27
-rw-r--r--pkg/domain/infra/tunnel/runtime.go5
-rw-r--r--pkg/domain/infra/tunnel/system.go7
-rw-r--r--pkg/network/devices.go7
-rw-r--r--pkg/network/files.go31
-rw-r--r--pkg/network/network.go17
-rw-r--r--pkg/specgen/generate/container_create.go21
-rw-r--r--pkg/specgen/namespaces.go3
-rw-r--r--pkg/util/mountOpts_linux.go2
-rw-r--r--pkg/varlinkapi/containers.go8
-rw-r--r--pkg/varlinkapi/system.go2
-rw-r--r--pkg/varlinkapi/transfers.go2
-rw-r--r--test/apiv2/01-basic.at14
-rw-r--r--test/e2e/common_test.go22
-rw-r--r--test/e2e/config.go4
-rw-r--r--test/e2e/container_inspect_test.go2
-rw-r--r--test/e2e/create_test.go24
-rw-r--r--test/e2e/events_test.go16
-rw-r--r--test/e2e/exec_test.go28
-rw-r--r--test/e2e/export_test.go2
-rw-r--r--test/e2e/healthcheck_run_test.go1
-rw-r--r--test/e2e/images_test.go39
-rw-r--r--test/e2e/info_test.go1
-rw-r--r--test/e2e/init_test.go5
-rw-r--r--test/e2e/inspect_test.go10
-rw-r--r--test/e2e/kill_test.go1
-rw-r--r--test/e2e/libpod_suite_remote_test.go214
-rw-r--r--test/e2e/libpod_suite_test.go6
-rw-r--r--test/e2e/libpod_suite_varlink_test.go (renamed from test/e2e/libpod_suite_remoteclient_test.go)29
-rw-r--r--test/e2e/logs_test.go19
-rw-r--r--test/e2e/manifest_test.go3
-rw-r--r--test/e2e/network_test.go39
-rw-r--r--test/e2e/pod_create_test.go26
-rw-r--r--test/e2e/pod_infra_container_test.go2
-rw-r--r--test/e2e/pod_inspect_test.go2
-rw-r--r--test/e2e/pod_kill_test.go3
-rw-r--r--test/e2e/pod_pause_test.go2
-rw-r--r--test/e2e/pod_pod_namespaces.go2
-rw-r--r--test/e2e/pod_prune_test.go2
-rw-r--r--test/e2e/pod_ps_test.go3
-rw-r--r--test/e2e/pod_restart_test.go3
-rw-r--r--test/e2e/pod_rm_test.go3
-rw-r--r--test/e2e/pod_start_test.go3
-rw-r--r--test/e2e/pod_stats_test.go2
-rw-r--r--test/e2e/pod_stop_test.go3
-rw-r--r--test/e2e/pod_top_test.go2
-rw-r--r--test/e2e/prune_test.go28
-rw-r--r--test/e2e/ps_test.go1
-rw-r--r--test/e2e/restart_test.go1
-rw-r--r--test/e2e/rm_test.go1
-rw-r--r--test/e2e/rmi_test.go2
-rw-r--r--test/e2e/run_networking_test.go5
-rw-r--r--test/e2e/run_volume_test.go2
-rw-r--r--test/e2e/start_test.go6
-rw-r--r--test/e2e/stop_test.go1
-rw-r--r--test/e2e/system_reset_test.go3
-rw-r--r--test/e2e/untag_test.go2
-rw-r--r--test/endpoint/endpoint.go2
-rw-r--r--test/system/001-basic.bats2
-rw-r--r--test/system/030-run.bats4
-rw-r--r--test/system/160-volumes.bats244
-rw-r--r--test/system/200-pod-top.bats40
-rw-r--r--test/system/200-pod.bats174
-rw-r--r--test/system/250-systemd.bats7
-rw-r--r--test/utils/utils.go11
-rw-r--r--troubleshooting.md34
-rw-r--r--vendor/github.com/Microsoft/go-winio/vhd/vhd.go151
-rw-r--r--vendor/github.com/Microsoft/go-winio/vhd/zvhd.go99
-rw-r--r--vendor/github.com/Microsoft/hcsshim/CODEOWNERS3
-rw-r--r--vendor/github.com/Microsoft/hcsshim/README.md7
-rw-r--r--vendor/github.com/Microsoft/hcsshim/appveyor.yml20
-rw-r--r--vendor/github.com/Microsoft/hcsshim/go.mod28
-rw-r--r--vendor/github.com/Microsoft/hcsshim/go.sum64
-rw-r--r--vendor/github.com/Microsoft/hcsshim/hnspolicy.go3
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/hcs/cgo.go7
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/hcs/syscall.go5
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go50
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go28
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/hcs/zsyscall_windows.go54
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go21
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go10
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/schema1/schema1.go7
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/schema2/devices.go4
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/schema2/memory_2.go19
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_pci_device.go16
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_pci_function.go18
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go23
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayer.go17
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go26
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go28
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go23
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go23
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go31
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go60
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go29
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go22
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go26
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go60
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go25
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerid.go13
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go5
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go31
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go27
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/wclayer/processimage.go32
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go23
-rw-r--r--vendor/github.com/Microsoft/hcsshim/layer.go41
-rw-r--r--vendor/github.com/blang/semver/.travis.yml21
-rw-r--r--vendor/github.com/blang/semver/README.md5
-rw-r--r--vendor/github.com/blang/semver/package.json17
-rw-r--r--vendor/github.com/blang/semver/range.go200
-rw-r--r--vendor/github.com/blang/semver/semver.go23
-rw-r--r--vendor/github.com/containernetworking/plugins/pkg/ip/link_linux.go13
-rw-r--r--vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go15
-rw-r--r--vendor/github.com/containernetworking/plugins/pkg/utils/sysctl/sysctl_linux.go80
-rw-r--r--vendor/github.com/containers/common/pkg/config/config.go11
-rw-r--r--vendor/github.com/containers/image/v5/copy/copy.go11
-rw-r--r--vendor/github.com/containers/image/v5/docker/docker_client.go6
-rw-r--r--vendor/github.com/containers/image/v5/docker/docker_image.go4
-rw-r--r--vendor/github.com/containers/image/v5/docker/docker_image_dest.go53
-rw-r--r--vendor/github.com/containers/image/v5/docker/docker_image_src.go39
-rw-r--r--vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go91
-rw-r--r--vendor/github.com/containers/image/v5/internal/uploadreader/upload_reader.go61
-rw-r--r--vendor/github.com/containers/image/v5/manifest/common.go118
-rw-r--r--vendor/github.com/containers/image/v5/manifest/docker_schema2.go97
-rw-r--r--vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go2
-rw-r--r--vendor/github.com/containers/image/v5/manifest/list.go24
-rw-r--r--vendor/github.com/containers/image/v5/manifest/manifest.go10
-rw-r--r--vendor/github.com/containers/image/v5/manifest/oci.go106
-rw-r--r--vendor/github.com/containers/image/v5/manifest/oci_index.go5
-rw-r--r--vendor/github.com/containers/image/v5/version/version.go2
-rw-r--r--vendor/github.com/containers/storage/.cirrus.yml19
-rw-r--r--vendor/github.com/containers/storage/SECURITY.md3
-rw-r--r--vendor/github.com/containers/storage/VERSION2
-rw-r--r--vendor/github.com/containers/storage/drivers/devmapper/deviceset.go6
-rw-r--r--vendor/github.com/containers/storage/drivers/devmapper/driver.go4
-rw-r--r--vendor/github.com/containers/storage/drivers/overlay/check.go2
-rw-r--r--vendor/github.com/containers/storage/drivers/overlay/overlay.go43
-rw-r--r--vendor/github.com/containers/storage/drivers/vfs/driver.go16
-rw-r--r--vendor/github.com/containers/storage/go.mod10
-rw-r--r--vendor/github.com/containers/storage/go.sum64
-rw-r--r--vendor/github.com/containers/storage/pkg/fileutils/fileutils.go28
-rw-r--r--vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go18
-rw-r--r--vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go2
-rw-r--r--vendor/github.com/containers/storage/pkg/ioutils/fswriters.go48
-rw-r--r--vendor/github.com/containers/storage/pkg/unshare/unshare.c114
-rw-r--r--vendor/github.com/containers/storage/store.go6
-rw-r--r--vendor/github.com/containers/storage/userns.go2
-rw-r--r--vendor/github.com/cri-o/ocicni/pkg/ocicni/ocicni.go11
-rw-r--r--vendor/github.com/golang/protobuf/proto/buffer.go324
-rw-r--r--vendor/github.com/golang/protobuf/proto/clone.go253
-rw-r--r--vendor/github.com/golang/protobuf/proto/decode.go427
-rw-r--r--vendor/github.com/golang/protobuf/proto/defaults.go63
-rw-r--r--vendor/github.com/golang/protobuf/proto/deprecated.go126
-rw-r--r--vendor/github.com/golang/protobuf/proto/discard.go356
-rw-r--r--vendor/github.com/golang/protobuf/proto/encode.go203
-rw-r--r--vendor/github.com/golang/protobuf/proto/equal.go301
-rw-r--r--vendor/github.com/golang/protobuf/proto/extensions.go771
-rw-r--r--vendor/github.com/golang/protobuf/proto/lib.go965
-rw-r--r--vendor/github.com/golang/protobuf/proto/message_set.go181
-rw-r--r--vendor/github.com/golang/protobuf/proto/pointer_reflect.go360
-rw-r--r--vendor/github.com/golang/protobuf/proto/pointer_unsafe.go313
-rw-r--r--vendor/github.com/golang/protobuf/proto/properties.go648
-rw-r--r--vendor/github.com/golang/protobuf/proto/proto.go167
-rw-r--r--vendor/github.com/golang/protobuf/proto/registry.go323
-rw-r--r--vendor/github.com/golang/protobuf/proto/table_marshal.go2776
-rw-r--r--vendor/github.com/golang/protobuf/proto/table_merge.go654
-rw-r--r--vendor/github.com/golang/protobuf/proto/table_unmarshal.go2053
-rw-r--r--vendor/github.com/golang/protobuf/proto/text.go843
-rw-r--r--vendor/github.com/golang/protobuf/proto/text_decode.go801
-rw-r--r--vendor/github.com/golang/protobuf/proto/text_encode.go560
-rw-r--r--vendor/github.com/golang/protobuf/proto/text_parser.go880
-rw-r--r--vendor/github.com/golang/protobuf/proto/wire.go78
-rw-r--r--vendor/github.com/golang/protobuf/proto/wrappers.go34
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/any.go214
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/any/any.pb.go230
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/any/any.proto154
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/doc.go37
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/duration.go116
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go192
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/duration/duration.proto117
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/timestamp.go109
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go211
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto135
-rw-r--r--vendor/github.com/klauspost/pgzip/LICENSE3
-rw-r--r--vendor/github.com/klauspost/pgzip/gzip.go2
-rw-r--r--vendor/github.com/nxadm/tail/.gitignore2
-rw-r--r--vendor/github.com/nxadm/tail/.travis.yml16
-rw-r--r--vendor/github.com/nxadm/tail/CHANGES.md46
-rw-r--r--vendor/github.com/nxadm/tail/Dockerfile19
-rw-r--r--vendor/github.com/nxadm/tail/LICENSE21
-rw-r--r--vendor/github.com/nxadm/tail/README.md36
-rw-r--r--vendor/github.com/nxadm/tail/appveyor.yml11
-rw-r--r--vendor/github.com/nxadm/tail/go.mod9
-rw-r--r--vendor/github.com/nxadm/tail/go.sum6
-rw-r--r--vendor/github.com/nxadm/tail/ratelimiter/Licence7
-rw-r--r--vendor/github.com/nxadm/tail/ratelimiter/leakybucket.go97
-rw-r--r--vendor/github.com/nxadm/tail/ratelimiter/memory.go60
-rw-r--r--vendor/github.com/nxadm/tail/ratelimiter/storage.go6
-rw-r--r--vendor/github.com/nxadm/tail/tail.go440
-rw-r--r--vendor/github.com/nxadm/tail/tail_posix.go11
-rw-r--r--vendor/github.com/nxadm/tail/tail_windows.go12
-rw-r--r--vendor/github.com/nxadm/tail/util/util.go48
-rw-r--r--vendor/github.com/nxadm/tail/watch/filechanges.go36
-rw-r--r--vendor/github.com/nxadm/tail/watch/inotify.go135
-rw-r--r--vendor/github.com/nxadm/tail/watch/inotify_tracker.go248
-rw-r--r--vendor/github.com/nxadm/tail/watch/polling.go118
-rw-r--r--vendor/github.com/nxadm/tail/watch/watch.go20
-rw-r--r--vendor/github.com/nxadm/tail/winfile/winfile.go92
-rw-r--r--vendor/github.com/onsi/ginkgo/.travis.yml2
-rw-r--r--vendor/github.com/onsi/ginkgo/CHANGELOG.md26
-rw-r--r--vendor/github.com/onsi/ginkgo/README.md52
-rw-r--r--vendor/github.com/onsi/ginkgo/config/config.go2
-rw-r--r--vendor/github.com/onsi/ginkgo/ginkgo/build_command.go2
-rw-r--r--vendor/github.com/onsi/ginkgo/ginkgo/convert/package_rewriter.go8
-rw-r--r--vendor/github.com/onsi/ginkgo/ginkgo/convert/testfile_rewriter.go5
-rw-r--r--vendor/github.com/onsi/ginkgo/ginkgo/run_command.go15
-rw-r--r--vendor/github.com/onsi/ginkgo/ginkgo/testrunner/test_runner.go8
-rw-r--r--vendor/github.com/onsi/ginkgo/ginkgo/unfocus_command.go173
-rw-r--r--vendor/github.com/onsi/ginkgo/go.mod10
-rw-r--r--vendor/github.com/onsi/ginkgo/go.sum41
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go6
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go13
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go11
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_riscv64.go11
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_solaris.go9
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go12
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/suite/suite.go3
-rw-r--r--vendor/github.com/onsi/ginkgo/reporters/default_reporter.go6
-rw-r--r--vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go12
-rw-r--r--vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go12
-rw-r--r--vendor/github.com/onsi/gomega/CHANGELOG.md5
-rw-r--r--vendor/github.com/onsi/gomega/go.mod18
-rw-r--r--vendor/github.com/onsi/gomega/go.sum38
-rw-r--r--vendor/github.com/onsi/gomega/gomega_dsl.go2
-rw-r--r--vendor/github.com/opencontainers/go-digest/.mailmap3
-rw-r--r--vendor/github.com/opencontainers/go-digest/.pullapprove.yml38
-rw-r--r--vendor/github.com/opencontainers/go-digest/.travis.yml3
-rw-r--r--vendor/github.com/opencontainers/go-digest/LICENSE (renamed from vendor/github.com/opencontainers/go-digest/LICENSE.code)1
-rw-r--r--vendor/github.com/opencontainers/go-digest/MAINTAINERS12
-rw-r--r--vendor/github.com/opencontainers/go-digest/README.md70
-rw-r--r--vendor/github.com/opencontainers/go-digest/algorithm.go1
-rw-r--r--vendor/github.com/opencontainers/go-digest/digest.go1
-rw-r--r--vendor/github.com/opencontainers/go-digest/digester.go1
-rw-r--r--vendor/github.com/opencontainers/go-digest/doc.go10
-rw-r--r--vendor/github.com/opencontainers/go-digest/go.mod3
-rw-r--r--vendor/github.com/opencontainers/go-digest/verifiers.go1
-rw-r--r--vendor/github.com/opencontainers/runtime-spec/specs-go/config.go55
-rw-r--r--vendor/github.com/opencontainers/runtime-spec/specs-go/version.go2
-rw-r--r--vendor/github.com/seccomp/containers-golang/.gitignore4
-rw-r--r--vendor/github.com/seccomp/containers-golang/LICENSE190
-rw-r--r--vendor/github.com/seccomp/containers-golang/Makefile22
-rw-r--r--vendor/github.com/seccomp/containers-golang/README.md12
-rw-r--r--vendor/github.com/seccomp/containers-golang/go.mod16
-rw-r--r--vendor/github.com/seccomp/containers-golang/go.sum48
-rw-r--r--vendor/github.com/seccomp/containers-golang/seccomp.json45
-rw-r--r--vendor/github.com/seccomp/containers-golang/seccomp_default_linux.go36
-rw-r--r--vendor/github.com/seccomp/containers-golang/seccomp_linux.go46
-rw-r--r--vendor/github.com/seccomp/containers-golang/seccomp_unsupported.go19
-rw-r--r--vendor/github.com/seccomp/containers-golang/types.go5
-rw-r--r--vendor/github.com/seccomp/containers-golang/vendor.conf9
-rw-r--r--vendor/github.com/vbauerster/mpb/v5/bar.go12
-rw-r--r--vendor/github.com/vbauerster/mpb/v5/bar_filler.go4
-rw-r--r--vendor/github.com/vbauerster/mpb/v5/go.mod4
-rw-r--r--vendor/github.com/vbauerster/mpb/v5/go.sum8
-rw-r--r--vendor/github.com/vbauerster/mpb/v5/internal/percentage.go3
-rw-r--r--vendor/golang.org/x/crypto/chacha20/chacha_generic.go119
-rw-r--r--vendor/golang.org/x/crypto/chacha20/xor.go17
-rw-r--r--vendor/golang.org/x/crypto/poly1305/mac_noasm.go2
-rw-r--r--vendor/golang.org/x/crypto/poly1305/poly1305.go22
-rw-r--r--vendor/golang.org/x/crypto/poly1305/sum_amd64.go11
-rw-r--r--vendor/golang.org/x/crypto/poly1305/sum_generic.go18
-rw-r--r--vendor/golang.org/x/crypto/poly1305/sum_noasm.go11
-rw-r--r--vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go11
-rw-r--r--vendor/golang.org/x/crypto/ssh/cipher.go2
-rw-r--r--vendor/golang.org/x/crypto/ssh/terminal/terminal.go8
-rw-r--r--vendor/golang.org/x/net/http2/client_conn_pool.go8
-rw-r--r--vendor/golang.org/x/net/http2/flow.go2
-rw-r--r--vendor/golang.org/x/net/http2/hpack/huffman.go7
-rw-r--r--vendor/golang.org/x/net/http2/http2.go7
-rw-r--r--vendor/golang.org/x/net/http2/server.go8
-rw-r--r--vendor/golang.org/x/net/http2/transport.go12
-rw-r--r--vendor/golang.org/x/sys/cpu/cpu_aix.go (renamed from vendor/golang.org/x/sys/cpu/cpu_aix_ppc64.go)2
-rw-r--r--vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go27
-rw-r--r--vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go30
-rw-r--r--vendor/golang.org/x/sys/unix/README.md4
-rw-r--r--vendor/golang.org/x/sys/unix/mkerrors.sh4
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go21
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_darwin.go1
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_darwin_386.go11
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go11
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_darwin_arm.go11
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go11
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_linux.go13
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_linux_386.go2
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_linux_amd64.go2
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_linux_arm.go2
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_linux_arm64.go28
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go2
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go2
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go2
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go8
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_linux_s390x.go2
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go2
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_unix.go17
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux.go23
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_386.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_arm.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_mips.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go22
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go32
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go22
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go32
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go22
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go32
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_11.go22
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go32
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_linux.go31
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_linux_386.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go4
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go3
-rw-r--r--vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go1
-rw-r--r--vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go1
-rw-r--r--vendor/golang.org/x/sys/unix/zsysnum_linux_386.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go2
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux.go39
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_386.go1
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go1
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_arm.go1
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go1
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_mips.go1
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go1
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go1
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go1
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go1
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go1
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go1
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go1
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go1
-rw-r--r--vendor/golang.org/x/sys/windows/dll_windows.go29
-rw-r--r--vendor/golang.org/x/sys/windows/env_windows.go11
-rw-r--r--vendor/golang.org/x/sys/windows/security_windows.go20
-rw-r--r--vendor/golang.org/x/sys/windows/syscall_windows.go37
-rw-r--r--vendor/google.golang.org/protobuf/AUTHORS3
-rw-r--r--vendor/google.golang.org/protobuf/CONTRIBUTORS3
-rw-r--r--vendor/google.golang.org/protobuf/LICENSE27
-rw-r--r--vendor/google.golang.org/protobuf/PATENTS22
-rw-r--r--vendor/google.golang.org/protobuf/encoding/prototext/decode.go789
-rw-r--r--vendor/google.golang.org/protobuf/encoding/prototext/doc.go7
-rw-r--r--vendor/google.golang.org/protobuf/encoding/prototext/encode.go426
-rw-r--r--vendor/google.golang.org/protobuf/encoding/protowire/wire.go538
-rw-r--r--vendor/google.golang.org/protobuf/internal/descfmt/stringer.go316
-rw-r--r--vendor/google.golang.org/protobuf/internal/descopts/options.go29
-rw-r--r--vendor/google.golang.org/protobuf/internal/detrand/rand.go61
-rw-r--r--vendor/google.golang.org/protobuf/internal/encoding/defval/default.go213
-rw-r--r--vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go258
-rw-r--r--vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go207
-rw-r--r--vendor/google.golang.org/protobuf/internal/encoding/text/decode.go665
-rw-r--r--vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go190
-rw-r--r--vendor/google.golang.org/protobuf/internal/encoding/text/decode_string.go161
-rw-r--r--vendor/google.golang.org/protobuf/internal/encoding/text/decode_token.go373
-rw-r--r--vendor/google.golang.org/protobuf/internal/encoding/text/doc.go29
-rw-r--r--vendor/google.golang.org/protobuf/internal/encoding/text/encode.go267
-rw-r--r--vendor/google.golang.org/protobuf/internal/errors/errors.go89
-rw-r--r--vendor/google.golang.org/protobuf/internal/errors/is_go112.go39
-rw-r--r--vendor/google.golang.org/protobuf/internal/errors/is_go113.go12
-rw-r--r--vendor/google.golang.org/protobuf/internal/fieldnum/any_gen.go13
-rw-r--r--vendor/google.golang.org/protobuf/internal/fieldnum/api_gen.go35
-rw-r--r--vendor/google.golang.org/protobuf/internal/fieldnum/descriptor_gen.go240
-rw-r--r--vendor/google.golang.org/protobuf/internal/fieldnum/doc.go7
-rw-r--r--vendor/google.golang.org/protobuf/internal/fieldnum/duration_gen.go13
-rw-r--r--vendor/google.golang.org/protobuf/internal/fieldnum/empty_gen.go10
-rw-r--r--vendor/google.golang.org/protobuf/internal/fieldnum/field_mask_gen.go12
-rw-r--r--vendor/google.golang.org/protobuf/internal/fieldnum/source_context_gen.go12
-rw-r--r--vendor/google.golang.org/protobuf/internal/fieldnum/struct_gen.go33
-rw-r--r--vendor/google.golang.org/protobuf/internal/fieldnum/timestamp_gen.go13
-rw-r--r--vendor/google.golang.org/protobuf/internal/fieldnum/type_gen.go53
-rw-r--r--vendor/google.golang.org/protobuf/internal/fieldnum/wrappers_gen.go52
-rw-r--r--vendor/google.golang.org/protobuf/internal/fieldsort/fieldsort.go40
-rw-r--r--vendor/google.golang.org/protobuf/internal/filedesc/build.go155
-rw-r--r--vendor/google.golang.org/protobuf/internal/filedesc/desc.go613
-rw-r--r--vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go471
-rw-r--r--vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go704
-rw-r--r--vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go286
-rw-r--r--vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go345
-rw-r--r--vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go107
-rw-r--r--vendor/google.golang.org/protobuf/internal/filetype/build.go297
-rw-r--r--vendor/google.golang.org/protobuf/internal/flags/flags.go24
-rw-r--r--vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go9
-rw-r--r--vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go9
-rw-r--r--vendor/google.golang.org/protobuf/internal/genname/name.go25
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/api_export.go170
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/checkinit.go141
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/codec_extension.go223
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/codec_field.go828
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/codec_gen.go5637
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/codec_map.go388
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go37
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go11
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/codec_message.go159
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go120
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go209
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/codec_tables.go557
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go17
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/convert.go467
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/convert_list.go141
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/convert_map.go121
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/decode.go274
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/encode.go199
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/enum.go21
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/extension.go156
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go219
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/legacy_export.go92
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go175
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/legacy_file.go81
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/legacy_message.go502
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/merge.go176
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/merge_gen.go209
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/message.go215
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/message_reflect.go364
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go466
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go249
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go177
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go173
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/validate.go575
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/weak.go74
-rw-r--r--vendor/google.golang.org/protobuf/internal/mapsort/mapsort.go43
-rw-r--r--vendor/google.golang.org/protobuf/internal/pragma/pragma.go29
-rw-r--r--vendor/google.golang.org/protobuf/internal/set/ints.go58
-rw-r--r--vendor/google.golang.org/protobuf/internal/strs/strings.go196
-rw-r--r--vendor/google.golang.org/protobuf/internal/strs/strings_pure.go27
-rw-r--r--vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go94
-rw-r--r--vendor/google.golang.org/protobuf/internal/version/version.go79
-rw-r--r--vendor/google.golang.org/protobuf/proto/checkinit.go71
-rw-r--r--vendor/google.golang.org/protobuf/proto/decode.go270
-rw-r--r--vendor/google.golang.org/protobuf/proto/decode_gen.go603
-rw-r--r--vendor/google.golang.org/protobuf/proto/doc.go94
-rw-r--r--vendor/google.golang.org/protobuf/proto/encode.go343
-rw-r--r--vendor/google.golang.org/protobuf/proto/encode_gen.go97
-rw-r--r--vendor/google.golang.org/protobuf/proto/equal.go154
-rw-r--r--vendor/google.golang.org/protobuf/proto/extension.go92
-rw-r--r--vendor/google.golang.org/protobuf/proto/merge.go139
-rw-r--r--vendor/google.golang.org/protobuf/proto/messageset.go88
-rw-r--r--vendor/google.golang.org/protobuf/proto/proto.go34
-rw-r--r--vendor/google.golang.org/protobuf/proto/proto_methods.go19
-rw-r--r--vendor/google.golang.org/protobuf/proto/proto_reflect.go19
-rw-r--r--vendor/google.golang.org/protobuf/proto/reset.go43
-rw-r--r--vendor/google.golang.org/protobuf/proto/size.go94
-rw-r--r--vendor/google.golang.org/protobuf/proto/size_gen.go55
-rw-r--r--vendor/google.golang.org/protobuf/proto/wrappers.go29
-rw-r--r--vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go77
-rw-r--r--vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go478
-rw-r--r--vendor/google.golang.org/protobuf/reflect/protoreflect/source.go52
-rw-r--r--vendor/google.golang.org/protobuf/reflect/protoreflect/type.go631
-rw-r--r--vendor/google.golang.org/protobuf/reflect/protoreflect/value.go285
-rw-r--r--vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go59
-rw-r--r--vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go409
-rw-r--r--vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go98
-rw-r--r--vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go768
-rw-r--r--vendor/google.golang.org/protobuf/runtime/protoiface/legacy.go15
-rw-r--r--vendor/google.golang.org/protobuf/runtime/protoiface/methods.go167
-rw-r--r--vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go44
-rw-r--r--vendor/google.golang.org/protobuf/runtime/protoimpl/version.go56
-rw-r--r--vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go287
-rw-r--r--vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go249
-rw-r--r--vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go271
-rw-r--r--vendor/gopkg.in/yaml.v2/apic.go1
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/json/json.go25
-rw-r--r--vendor/modules.txt87
-rw-r--r--version/version.go4
647 files changed, 42316 insertions, 15884 deletions
diff --git a/.cirrus.yml b/.cirrus.yml
index e53788c6c..c21bc74db 100644
--- a/.cirrus.yml
+++ b/.cirrus.yml
@@ -36,10 +36,10 @@ env:
###
FEDORA_NAME: "fedora-32"
PRIOR_FEDORA_NAME: "fedora-31"
- UBUNTU_NAME: "ubuntu-19"
- PRIOR_UBUNTU_NAME: "ubuntu-18"
+ UBUNTU_NAME: "ubuntu-20"
+ PRIOR_UBUNTU_NAME: "ubuntu-19"
- _BUILT_IMAGE_SUFFIX: "libpod-6224667180531712" # From the packer output of 'build_vm_images_script'
+ _BUILT_IMAGE_SUFFIX: "libpod-6268069335007232" # From the packer output of 'build_vm_images_script'
FEDORA_CACHE_IMAGE_NAME: "${FEDORA_NAME}-${_BUILT_IMAGE_SUFFIX}"
PRIOR_FEDORA_CACHE_IMAGE_NAME: "${PRIOR_FEDORA_NAME}-${_BUILT_IMAGE_SUFFIX}"
UBUNTU_CACHE_IMAGE_NAME: "${UBUNTU_NAME}-${_BUILT_IMAGE_SUFFIX}"
@@ -520,6 +520,48 @@ integration_test_temporary_task:
path: "*.log.html"
type: "text/html"
+# This task only temporary as we creep up on making
+# all tests passing for v2 remote. Once all tests pass, we
+# should immediately remove this and re-enable the
+# testing matrix.
+remote_integration_test_temporary_task:
+
+ depends_on:
+ - "gating"
+ - "varlink_api"
+ - "vendor"
+ - "build_each_commit"
+ - "build_without_cgo"
+
+ only_if: >-
+ $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:IMG.*' &&
+ $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:DOCS.*'
+
+ env:
+ ADD_SECOND_PARTITION: 'true'
+ TEST_REMOTE_CLIENT: 'true'
+
+ timeout_in: 60m
+
+ networking_script: '${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/networking.sh'
+ setup_environment_script: '$SCRIPT_BASE/setup_environment.sh |& ${TIMESTAMP}'
+ integration_test_script: '$SCRIPT_BASE/integration_test.sh |& ${TIMESTAMP} | ${LOGFORMAT} integration_test'
+
+ on_failure:
+ failed_branch_script: '$CIRRUS_WORKING_DIR/$SCRIPT_BASE/notice_branch_failure.sh'
+
+ always:
+ package_versions_script: '$SCRIPT_BASE/logcollector.sh packages'
+ ginkgo_node_logs_script: '$SCRIPT_BASE/logcollector.sh ginkgo'
+ df_script: '$SCRIPT_BASE/logcollector.sh df'
+ audit_log_script: '$SCRIPT_BASE/logcollector.sh audit'
+ journal_script: '$SCRIPT_BASE/logcollector.sh journal'
+ varlink_script: '$SCRIPT_BASE/logcollector.sh varlink'
+ podman_system_info_script: '$SCRIPT_BASE/logcollector.sh podman'
+ html_artifacts:
+ path: "*.log.html"
+ type: "text/html"
+
# This task executes tests under unique environments/conditions
special_testing_rootless_task:
@@ -604,7 +646,6 @@ special_testing_in_podman_task:
special_testing_cross_task:
- skip: $CI == 'true'
alias: "special_testing_cross"
depends_on:
- "gating"
@@ -823,6 +864,7 @@ success_task:
- "static_build"
# FIXME remove when all v2 tests pass
- "integration_test_temporary"
+ - "remote_integration_test_temporary"
env:
CIRRUS_WORKING_DIR: "/usr/src/libpod"
diff --git a/Dockerfile.ubuntu b/Dockerfile.ubuntu
index 3a8f837b9..160c1469c 100644
--- a/Dockerfile.ubuntu
+++ b/Dockerfile.ubuntu
@@ -1,5 +1,5 @@
# Must resemble $UBUNTU_BASE_IMAGE in ./contrib/cirrus/lib.sh
-FROM ubuntu:latest
+FROM ubuntu:20.04
# This container image is intended for building and testing libpod
# from inside a container environment. It is assumed that the source
diff --git a/Makefile b/Makefile
index e991d4b35..16a09fb59 100644
--- a/Makefile
+++ b/Makefile
@@ -22,6 +22,7 @@ ETCDIR ?= /etc
TMPFILESDIR ?= ${PREFIX}/lib/tmpfiles.d
SYSTEMDDIR ?= ${PREFIX}/lib/systemd/system
USERSYSTEMDDIR ?= ${PREFIX}/lib/systemd/user
+REMOTETAGS := !ABISupport remoteclient exclude_graphdriver_btrfs btrfs_noversion exclude_graphdriver_devicemapper containers_image_openpgp
BUILDTAGS ?= \
$(shell hack/apparmor_tag.sh) \
$(shell hack/btrfs_installed_tag.sh) \
@@ -189,11 +190,11 @@ podman: bin/podman
.PHONY: bin/podman-remote
bin/podman-remote: .gopathok $(SOURCES) go.mod go.sum $(PODMAN_VARLINK_DEPENDENCIES) ## Build with podman on remote environment
- $(GO_BUILD) $(BUILDFLAGS) -gcflags '$(GCFLAGS)' -asmflags '$(ASMFLAGS)' -ldflags '$(LDFLAGS_PODMAN)' -tags "!ABISupport remoteclient" -o $@ $(PROJECT)/cmd/podman
+ $(GO_BUILD) $(BUILDFLAGS) -gcflags '$(GCFLAGS)' -asmflags '$(ASMFLAGS)' -ldflags '$(LDFLAGS_PODMAN)' -tags "${REMOTETAGS}" -o $@ $(PROJECT)/cmd/podman
.PHONY: bin/podman-remote-static
podman-remote-static: bin/podman-remote-static
- CGO_ENABLED=0 $(GO_BUILD) $(BUILDFLAGS) -gcflags '$(GCFLAGS)' -asmflags '$(ASMFLAGS)' -ldflags '$(LDFLAGS_PODMAN_STATIC)' -tags "!ABISupport containers_image_openpgp remoteclient" -o bin/podman-remote-static $(PROJECT)/cmd/podman
+ CGO_ENABLED=0 $(GO_BUILD) $(BUILDFLAGS) -gcflags '$(GCFLAGS)' -asmflags '$(ASMFLAGS)' -ldflags '$(LDFLAGS_PODMAN_STATIC)' -tags "${REMOTETAGS}" -o bin/podman-remote-static $(PROJECT)/cmd/podman
.PHONY: podman-remote
podman-remote: bin/podman-remote
@@ -207,7 +208,7 @@ podman.msi: podman-remote podman-remote-windows install-podman-remote-windows-do
podman-remote-%: .gopathok $(PODMAN_VARLINK_DEPENDENCIES) ## Build podman for a specific GOOS
$(eval BINSFX := $(shell test "$*" != "windows" || echo ".exe"))
- CGO_ENABLED=0 GOOS=$* $(GO_BUILD) -gcflags '$(GCFLAGS)' -asmflags '$(ASMFLAGS)' -ldflags '$(LDFLAGS_PODMAN)' -tags "remoteclient containers_image_openpgp exclude_graphdriver_devicemapper" -o bin/$@$(BINSFX) $(PROJECT)/cmd/podman
+ CGO_ENABLED=0 GOOS=$* $(GO_BUILD) -gcflags '$(GCFLAGS)' -asmflags '$(ASMFLAGS)' -ldflags '$(LDFLAGS_PODMAN)' -tags "${REMOTETAGS}" -o bin/$@$(BINSFX) $(PROJECT)/cmd/podman
local-cross: $(CROSS_BUILD_TARGETS) ## Cross local compilation
@@ -282,11 +283,11 @@ dbuild: libpodimage
.PHONY: dbuild-podman-remote
dbuild-podman-remote: libpodimage
- ${CONTAINER_RUNTIME} run --name=${LIBPOD_INSTANCE} --privileged -v ${PWD}:/go/src/${PROJECT} --rm ${LIBPOD_IMAGE} go build -ldflags '$(LDFLAGS_PODMAN)' -tags "$(BUILDTAGS) remoteclient" -o bin/podman-remote $(PROJECT)/cmd/podman
+ ${CONTAINER_RUNTIME} run --name=${LIBPOD_INSTANCE} --privileged -v ${PWD}:/go/src/${PROJECT} --rm ${LIBPOD_IMAGE} go build -ldflags '$(LDFLAGS_PODMAN)' -tags "$(REMOTETAGS)" -o bin/podman-remote $(PROJECT)/cmd/podman
.PHONY: dbuild-podman-remote-darwin
dbuild-podman-remote-darwin: libpodimage
- ${CONTAINER_RUNTIME} run --name=${LIBPOD_INSTANCE} --privileged -v ${PWD}:/go/src/${PROJECT} --rm ${LIBPOD_IMAGE} env GOOS=darwin go build -ldflags '$(LDFLAGS_PODMAN)' -tags "remoteclient containers_image_openpgp exclude_graphdriver_devicemapper" -o bin/podman-remote-darwin $(PROJECT)/cmd/podman
+ ${CONTAINER_RUNTIME} run --name=${LIBPOD_INSTANCE} --privileged -v ${PWD}:/go/src/${PROJECT} --rm ${LIBPOD_IMAGE} env GOOS=darwin go build -ldflags '$(LDFLAGS_PODMAN)' -tags "${REMOTETAGS}" -o bin/podman-remote-darwin $(PROJECT)/cmd/podman
.PHONY: test
test: libpodimage ## Run tests on built image
@@ -329,7 +330,7 @@ ginkgo:
.PHONY: ginkgo-remote
ginkgo-remote:
- ginkgo -v $(TESTFLAGS) -tags "$(BUILDTAGS) remoteclient" $(GINKGOTIMEOUT) -cover -flakeAttempts 3 -progress -trace -noColor test/e2e/.
+ ginkgo -v $(TESTFLAGS) -tags "$(REMOTETAGS)" $(GINKGOTIMEOUT) -cover -flakeAttempts 3 -progress -trace -noColor test/e2e/.
.PHONY: endpoint
endpoint:
@@ -356,14 +357,14 @@ remotesystem:
# varlink server spews copious unhelpful output; ignore it.
rc=0;\
if timeout -v 1 true; then \
- SOCK_FILE=$(shell mktemp --dry-run --tmpdir io.podman.XXXXXX);\
- export PODMAN_VARLINK_ADDRESS=unix:$$SOCK_FILE; \
- ./bin/podman varlink --timeout=0 $$PODMAN_VARLINK_ADDRESS &> $(if $(VARLINK_LOG),$(VARLINK_LOG),/dev/null) & \
+ SOCK_FILE=$(shell mktemp --dry-run --tmpdir podman.XXXXXX);\
+ export PODMAN_SOCKEY=unix:$$SOCK_FILE; \
+ ./bin/podman system service --timeout=0 $$PODMAN_VARLINK_ADDRESS &> $(if $(VARLINK_LOG),$(VARLINK_LOG),/dev/null) & \
retry=5;\
while [[ $$retry -ge 0 ]]; do\
- echo Waiting for varlink server...;\
+ echo Waiting for server...;\
sleep 1;\
- ./bin/podman-remote info &>/dev/null && break;\
+ ./bin/podman-remote --remote $(SOCK_FILE) info &>/dev/null && break;\
retry=$$(expr $$retry - 1);\
done;\
env PODMAN=./bin/podman-remote bats test/system/ ;\
@@ -527,11 +528,6 @@ install.man-nobuild:
.PHONY: install.man
install.man: docs install.man-nobuild
-.PHONY: install.config
-install.config:
- install ${SELINUXOPT} -d -m 755 $(DESTDIR)$(SHAREDIR_CONTAINERS)
- install ${SELINUXOPT} -m 644 libpod.conf $(DESTDIR)$(SHAREDIR_CONTAINERS)/libpod.conf
-
.PHONY: install.seccomp
install.seccomp:
# TODO: we should really be using the upstream one from github.com/seccomp
diff --git a/README.md b/README.md
index 34b12b3a9..4a5fb2333 100644
--- a/README.md
+++ b/README.md
@@ -5,13 +5,13 @@
Libpod provides a library for applications looking to use the Container Pod concept,
popularized by Kubernetes. Libpod also contains the Pod Manager tool `(Podman)`. Podman manages pods, containers, container images, and container volumes.
-* [Latest Version: 1.9.1](https://github.com/containers/libpod/releases/latest)
+* [Latest Version: 1.9.2](https://github.com/containers/libpod/releases/latest)
+ * Latest Remote client for Windows
+ * Latest Remote client for MacOs
+ * Latest Static Remote client for Linux
+
* [Continuous Integration:](contrib/cirrus/README.md) [![Build Status](https://api.cirrus-ci.com/github/containers/libpod.svg)](https://cirrus-ci.com/github/containers/libpod/master)
* [GoDoc: ![GoDoc](https://godoc.org/github.com/containers/libpod/libpod?status.svg)](https://godoc.org/github.com/containers/libpod/libpod)
-* Automated continuous release downloads (including remote-client):
- * [Latest remote client for Windows](https://storage.googleapis.com/libpod-master-releases/podman-remote-latest-master-windows-amd64.msi)
- * [Latest remote client for MacOS](https://storage.googleapis.com/libpod-master-releases/podman-remote-latest-master-darwin-amd64.zip)
- * [Latest Snap package](https://snapcraft.io/podman)
## Overview and scope
diff --git a/RELEASE_NOTES.md b/RELEASE_NOTES.md
index 6657529b9..65a0571d5 100644
--- a/RELEASE_NOTES.md
+++ b/RELEASE_NOTES.md
@@ -1,5 +1,19 @@
# Release Notes
+## 1.9.2
+### Bugfixes
+- Fixed a bug where `podman save` would fail when the target image was specified by digest ([#5234](https://github.com/containers/libpod/issues/5234))
+- Fixed a bug where rootless containers with ports forwarded to them could panic and dump core due to a concurrency issue ([#6018](https://github.com/containers/libpod/issues/6018))
+- Fixed a bug where rootless Podman could race when opening the rootless user namespace, resulting in commands failing to run
+- Fixed a bug where HTTP proxy environment variables forwarded into the container by the `--http-proxy` flag could not be overridden by `--env` or `--env-file` ([#6017](https://github.com/containers/libpod/issues/6017))
+- Fixed a bug where rootless Podman was setting resource limits on cgroups v2 systems that were not using systemd-managed cgroups (and thus did not support resource limits), resulting in containers failing to start
+
+### Misc
+- Rootless containers will now automatically set their ulimits to the maximum allowed for the user running the container, to match the behavior of containers run as root
+- Packages managed by the core Podman team will no longer include a default `libpod.conf`, instead defaulting to `containers.conf`. The default libpod.conf will remain available in the Github repository until the release of Podman 2.0
+- The default Podman CNI network configuration now sets HairpinMode to allow containers to access other containers via ports published on the host
+- Updated containers/common to v0.8.4
+
## 1.9.1
### Bugfixes
- Fixed a bug where healthchecks could become nonfunctional if container log paths were manually set with `--log-path` and multiple container logs were placed in the same directory ([#5915](https://github.com/containers/libpod/issues/5915))
diff --git a/changelog.txt b/changelog.txt
index 95f8d5b47..552a17663 100644
--- a/changelog.txt
+++ b/changelog.txt
@@ -1,3 +1,346 @@
+- Changelog for v2.0.0-rc1 (2020-05-18)
+ * v2endpoint remove image path correction
+ * Drop APIv2 resize endpoint
+ * Drop a debug line which could print very large messages
+ * v2 podman remote attach, start, and run
+ * Fix lint
+ * Remove duplicated exec handling code
+ * Fix lint
+ * Update API documentation for Inspect
+ * Parameters for ExecStart are body, not query
+ * Prune stale exec sessions on inspect
+ * Remove exec sessions on container restart
+ * Fix start order for APIv2 exec start endpoint
+ * Don't fail when saving exec status fails on removed ctr
+ * Add APIv2 handler for resizing exec sessions
+ * Ensure that Streams are set to defaults for HTTP attach
+ * Wire in endpoint for ExecStart
+ * Add an initial implementation of HTTP-forwarded exec
+ * Make convenience boxed true/false easier to use
+ * Use the libpod.conf cni_config_dir option for inspect and delete
+ * Cirrus: Refresh VM Images, Add Ubuntu 20 LTS
+ * Cirrus: Fix image-name hints
+ * Cirrus: Update Ubuntu 18 to 20
+ * fix bug --format {{json.}} of events
+ * V2 Update attach bindings to use Readers/Writers vs chan
+ * Ensure that cleanup runs before we set Removing state
+ * Fix two coverity issues (unchecked null return)
+ * Fix REMOTETAGS
+ * Cleanup OCI runtime before storage
+ * Default podman.spec to use crun
+ * Fix checkpoint --leave-running
+ * Bump github.com/containers/storage from 1.19.1 to 1.19.2
+ * Bump github.com/containernetworking/plugins from 0.8.5 to 0.8.6
+ * Update release notes and version on master
+ * WIP V2 attach bindings and test
+ * [CI:DOCS]remove libpod.conf from spec
+ * enable remote image tree
+ * Bump github.com/containers/conmon
+ * Bump gopkg.in/yaml.v2 from 2.2.8 to 2.3.0
+ * system tests: add volume tests
+ * cgroup: skip unified if we are using v1
+ * enable podman v2 networking for remote client
+ * Remove libpod.conf from repo
+ * add podman remote system df
+ * vendor crio/ocicni@v0.2.0
+ * test: enable networking test for rootless
+ * rootless: do not set pids limits with cgroupfs
+ * auto-update: support authfiles
+ * Add netgo build tag to static binary
+ * Adds tunnel routes for system reset.
+ * add port to podman remote command
+ * Bump github.com/containers/image/v5 from 5.4.3 to 5.4.4
+ * Bump github.com/containers/common from 0.11.1 to 0.11.2
+ * Some BATS cleanup: run and systemd tests
+ * v2podman image sign
+ * shm_lock_test: add nil check
+ * Add podman static build
+ * enable rootless mount tests
+ * spec: fix order for setting rlimits
+ * enable rootless integration testing
+ * [CI:DOCS] Add Security Policy
+ * V2 Impliment tunnelled podman version
+ * Ensure `podman inspect` output for NetworkMode is right
+ * Fix bug where pods would unintentionally share cgroupns
+ * bindings tests for container remove and inspect
+ * Add remaining annotations for `podman inspect`
+ * v2 podman unshare command
+ * Update the Podman readme
+ * v2 podman search rootless
+ * Fix `podman pod create --infra=false`
+ * default to tunnel without ABISupport tag
+ * abi: do not attempt to setup rootless if euid==0
+ * fix pod stats flake
+ * set binding tests to required
+ * Fix handling of overridden paths from database
+ * Fix typo in path
+ * Makefile: fix a dependency issue
+ * Fixed typo on podman network create man
+ * fix and enable systemd system tests
+ * Bump github.com/onsi/gomega from 1.9.0 to 1.10.0
+ * auto-update
+ * set --conmon-pidfile
+ * Fix parsing of --network for `podman pod create`
+ * Add podman-remote-static target
+ * podman: split env variables in env and overrides
+ * v2trust set and show
+ * container runlabel
+ * enable login/logut unspecified args
+ * [CI:DOCS] Add link to Tutorials to docs homepage
+ * Enables port test
+ * CI:DOCS: Document API docs + CORS maintenance
+ * Update manpages for image volumes and MAC address
+ * Updated heading from 5 to 6 in link.
+ * add {generate,play} kube
+ * Manifest remove, push
+ * Reenable systemd E2E tests
+ * Revert commit 016a91 already accepted.
+ * Updated heading from 5 to 6 in the link.
+ * Add small fixes for 'podman run' from diffing inspect
+ * manifest annotate
+ * Bump k8s.io/api from 0.17.4 to 0.18.2
+ * Bump github.com/containers/storage from 1.19.0 to 1.19.1
+ * Eliminate race condition on podman info
+ * v2 system subcommand
+ * v2 podman stats
+ * BATS help test: check usage string
+ * Rework port parsing to support --expose and -P
+ * [CI:DOC] Add linger to troubleshooting
+ * Fix errors found when comparing podman v1 --help versus V2
+ * Updated the broken links for the docs.
+ * Updated the broken links for the docs.
+ * image removal: refactor part 2
+ * build(deps): bump github.com/uber/jaeger-client-go
+ * Bump github.com/sirupsen/logrus from 1.5.0 to 1.6.0
+ * [CI:DOC]Use full repo name in podmanimage Dockerfiles
+ * Fix errors found in coverity scan
+ * Remove skip on containers.conf tests
+ * cgroupsns was not following containers.conf
+ * Properly handle default capabilities listed in containers.conf
+ * Properly handle containers.conf devices
+ * [CI:DOCS] Bring README.md up to date
+ * And system prune feature for v2.
+ * Fix errors found in coverity scan
+ * check --get-login when login
+ * search --limit compatible with docker
+ * add provided cni networks to spec gen
+ * fix commands without input
+ * System tests: help messages: check required-arg
+ * v2networking enable commands
+ * V2 Commands that require ParentNS (rootful) are report error
+ * Cirrus: Utilize new cache images
+ * Cirrus: Utilize new base images
+ * cirrus: Update to Fedora 32 proper
+ * Enable prune integration test. Fixes container prune.
+ * test: enable start tests
+ * podman, start: propagate back the raw input
+ * test: enable remaining run tests
+ * test: enable entrypoint tests
+ * test: enable create tests
+ * cmd, podman: do not override entrypoint if unset
+ * cmd, podman: use String instead of variable+StringVar
+ * cmd, podman: handle --pod new:POD
+ * create: propagate override-arch and override-os
+ * testv2: enable attach test
+ * V2 enable ps tests
+ * enable final system test
+ * V2 restore podman -v command
+ * V2 Restore images list tests
+ * enable search tests
+ * pull/search options: tls verify -> skip
+ * test: enable cp tests
+ * login system test: enable "push ok"
+ * enable the push e2e tests
+ * push: fix --tls-verify
+ * push: simplify cmd
+ * rootlessport: use two different channels
+ * specgen: honor slirp4netns
+ * rootless: move ns open before fork
+ * push: fix push with one argument
+ * enable inspect tests
+ * generate systemd
+ * Update release notes and README for 1.9.1 release
+ * Update podmanimage files to adjust perms on containers.conf for rootless
+ * User specified environment happen after other environments are set
+ * system tests must pass
+ * Fixes podman save fails when specifying an image using a digest #5234
+ * Fix typos in rm messages
+ * check image media/manifest type for healthchecks
+ * test: enable exec tests
+ * pkg, specgen: do not hardcode user=0 in the config if not specified
+ * specgen: remove dead code
+ * cmd: set correct parent for container exec
+ * Set up ulimits for rootless containers.
+ * enable build tests
+ * enable volume integration tests
+ * separate healthcheck and container log paths
+ * install.md: Fix typo
+ * Improve Entrypoint and Command support
+ * Add support for volumes-from, image volumes, init
+ * Fix NewSpecGenerator args in pkg/bindings/test
+ * enable load integration tests
+ * test: enable all pod tests
+ * libpod: set hostname from joined container
+ * namespaces: accept pod namespace
+ * pkg, ps: add namespaces methods
+ * enable integration tests for restart
+ * Make podman container list == podman ps
+ * test: enable pod rm tests
+ * pkg, pods: report pod rm errors
+ * pkg, pods: pod rm honors --ignore
+ * test: enable pod restart tests
+ * pkg, pods: not lose pod start/restart errors
+ * test: enable pod stop tests
+ * pkg, pods: honor --ignore for pod stop
+ * test: enable pod create tests
+ * specgen: relax test to accept default network
+ * spec, pod: honor --dns
+ * spec: propagate --no-hosts to specgen
+ * sort .gitignore
+ * .gitignore: add pkg/api/swagger.yaml
+ * build(deps): bump github.com/rootless-containers/rootlesskit
+ * implement pod stats
+ * test: fix check for pause on cgroup v2
+ * test: fix pause tests
+ * cmd, ps: add .Status as synonym for .State
+ * test: enable healthcheck tests
+ * podman: handle --no-healthcheck
+ * specgen: read healthchecks from the image
+ * podman: special case health-cmd none
+ * Enable pod inspect integration test
+ * Enable pod prune integration test
+ * enable run_restart integration tests
+ * enable run_ns integration tests
+ * enable run_signal integration tests
+ * Enable these tests
+ * Enable container inspect integration tests
+ * Enable pod ps integration tests
+ * Cleanup man pages for pull and push
+ * Adding system prune for podman v2
+ * V2 tests: enables commit tests
+ * Add --os to manifest add
+ * containers, init: skip invalid state errors with --all
+ * podman: assume user namespace if there are mappings
+ * Do not join pod namespaces without an infra ctr
+ * podman: implement userns=keep-id
+ * Cirrus: Utilize new VM images
+ * Cirrus: Unify package installation
+ * test: enable cgroup parent tests
+ * podman: fix --log-opt=path=%s
+ * podman: fix --http-proxy
+ * podman: fix podman --group-add
+ * test: fix --host-env test
+ * podman: fix --cgroups=disabled
+ * test: enable some run_test.go tests that pass now
+ * podman: add support for --rootfs
+ * Bump github.com/containers/common from 0.9.4 to 0.9.5
+ * specgen: fix error message
+ * create: move validate after setting default ns
+ * remove blank line
+ * set bigfilestemporarydir for pull
+ * Fix SELinux functions names to not be repetitive
+ * foo: delete spurious file
+ * Makefile: include -nobuild install targets
+ * podman: handle namespaces specified on the CLI
+ * specgen: do not always set shmsize
+ * pkg: fix shmsize error message
+ * Stop wrapping pull messages
+ * manifest create,add,inspect
+ * V2 Restore rmi tests
+ * V2 restore libpod.Shutdown() when exiting podman commands
+ * Turn on version.go except for -v check
+ * Fix podman push and podman pull to check for authfile
+ * Enable basic volumes support in Podmanv2
+ * Move selinux labeling support from pkg/util to pkg/selinux
+ * Fix integration tests for untag
+ * Instrumentation to answer #5765
+ * test rootless_storage_path from strorage.conf
+ * V2 Restore exists E2E tests
+ * Fix podman rm to have correct exit codes
+ * Fix v2 test podman info
+ * Fix handling of --cidfile on create/run
+ * vendor in containers/common v0.9.4
+ * Handle hostname flag from client
+ * Add support for devices from command line
+ * Fix handling of CGroupsParent and CGroupsMode
+ * Throw error on IPv6 ip addresses
+ * Force integration tests to pass
+ * Modify namespace generation code for specgen
+ * Bump to github.com/containers/common to v0.9.2
+ * my bad
+ * Provide a json variable pointing to a configured json API
+ * podmanv2 cp
+ * gate/README.md Fix link to .cirrus.yml and reword
+ * add entrypoint from image where needed
+ * Makefile: fix broken chcon for podman-remote
+ * podmanv2 container subcommands
+ * v2podman port
+ * v2: implement log{in,out}
+ * Move Fedora dependencies for building podman into separate file
+ * v2, podman: plug --userns=auto
+ * podman: do not set empty cgroup limit blocks
+ * Handle annotations passed in via the client
+ * Need to set the Entrypoint
+ * Fix podman inspect to return errors on failure
+ * pkg: implement rlimits
+ * podman rmi: refactor logic
+ * Add support for containers.conf to podmanimages
+ * Update podman to use containers.conf
+ * Fix podman inspect to accept -l and -s fields
+ * Handle Linux Capabilities correctly
+ * Add functions to return image informations
+ * V2 Rmove existing unix domain socket on startup
+ * Cirrus: Add support for Fedora 32
+ * Cirrus: More Ubuntu 19 + Fedora 31
+ * V2 podman image tree
+ * V2 Fix --latest for podman diff commands
+ * rootless: move join namespace inside child process
+ * rootless: skip looking up parent user ns
+ * common: setting cgroup resources correctly
+ * Update pod inspect report to hold current pod status.
+ * Pull images when doing podman create
+ * Return labes in API (fixes #5882)
+ * Make `find` ignore dot files
+ * Cleanup network option parsing
+ * enable integration testing
+ * V2 Fix support for tcp://[::]<port> connections
+ * Add pod prune for api v2.
+ * We were not handling the user option on create
+ * Fixes for system tests
+ * Enable some testing
+ * Log formatter: add BATS summary line
+ * Bump github.com/containers/psgo from 1.4.0 to 1.5.0
+ * podmanV2: implement build
+ * Fix bug where two configurations had been created
+ * Podman V2 birth
+ * V2 Enable rootless
+ * Add SELinux volume information to troubleshoot.md
+ * podman v2 remove bloat v2
+ * allow filters to work when listing containers
+ * Update podman-generate-systemd man page
+ * .gitignore: ignore v2 remote
+ * Bump github.com/containers/common from 0.9.0 to 0.9.1
+ * Add version to podman info command
+ * Add basic structure of output for APIv2 pod inspect
+ * v2 bloat pruning phase 2
+ * Add support for selecting kvm and systemd labels
+ * Fix up SELinux labeling
+ * podmanv2 fix runtime assignment
+ * Cirrus: Fix gate container build failure
+ * logformat: handle apiv2 results, add anchor links
+ * Update README to reflect that latest version is v1.9.0
+ * Ability to prune container in api V2
+ * Bump to v2.0.0-dev
+ * podmanv2 events
+ * test case added for image prune cache image
+ * note for skipping cache image added.
+ * image prune skips images with child images.
+ * swagger-check: new CI tool to cross-check swagger
+ * auto update: skip non-image policies
+ * build(deps): bump github.com/containers/common from 0.8.1 to 0.9.0
+ * logformat: handle apiv2 results, add anchor links
+ * If possible use the pod name when creating a network
+
- Changelog for v1.9.0 (2020-04-15)
* podmanV2: fix nil deref
* v2specgen prune libpod
diff --git a/cmd/podman/auto-update.go b/cmd/podman/auto-update.go
index 758cbbc6f..11433bc25 100644
--- a/cmd/podman/auto-update.go
+++ b/cmd/podman/auto-update.go
@@ -3,6 +3,7 @@ package main
import (
"fmt"
+ "github.com/containers/common/pkg/auth"
"github.com/containers/libpod/cmd/podman/registry"
"github.com/containers/libpod/pkg/domain/entities"
"github.com/containers/libpod/pkg/errorhandling"
@@ -11,16 +12,18 @@ import (
)
var (
+ autoUpdateOptions = entities.AutoUpdateOptions{}
autoUpdateDescription = `Auto update containers according to their auto-update policy.
Auto-update policies are specified with the "io.containers.autoupdate" label.
- Note that this command is experimental.`
+ Note that this command is experimental. Please refer to the podman-auto-update(1) man page for details.`
autoUpdateCommand = &cobra.Command{
- Use: "auto-update [flags]",
- Short: "Auto update containers according to their auto-update policy",
- Long: autoUpdateDescription,
- RunE: autoUpdate,
- Example: `podman auto-update`,
+ Use: "auto-update [flags]",
+ Short: "Auto update containers according to their auto-update policy",
+ Long: autoUpdateDescription,
+ RunE: autoUpdate,
+ Example: `podman auto-update
+ podman auto-update --authfile ~/authfile.json`,
}
)
@@ -29,6 +32,9 @@ func init() {
Mode: []entities.EngineMode{entities.ABIMode},
Command: autoUpdateCommand,
})
+
+ flags := autoUpdateCommand.Flags()
+ flags.StringVar(&autoUpdateOptions.Authfile, "authfile", auth.GetDefaultAuthFile(), "Path to the authentication file. Use REGISTRY_AUTH_FILE environment variable to override")
}
func autoUpdate(cmd *cobra.Command, args []string) error {
@@ -36,7 +42,7 @@ func autoUpdate(cmd *cobra.Command, args []string) error {
// Backwards compat. System tests expext this error string.
return errors.Errorf("`%s` takes no arguments", cmd.CommandPath())
}
- report, failures := registry.ContainerEngine().AutoUpdate(registry.GetContext())
+ report, failures := registry.ContainerEngine().AutoUpdate(registry.GetContext(), autoUpdateOptions)
if report != nil {
for _, unit := range report.Units {
fmt.Println(unit)
diff --git a/cmd/podman/common/specgen.go b/cmd/podman/common/specgen.go
index 664e66df8..1fabff378 100644
--- a/cmd/podman/common/specgen.go
+++ b/cmd/podman/common/specgen.go
@@ -8,12 +8,14 @@ import (
"strings"
"time"
+ "github.com/containers/common/pkg/config"
"github.com/containers/image/v5/manifest"
"github.com/containers/libpod/cmd/podman/parse"
"github.com/containers/libpod/libpod/define"
ann "github.com/containers/libpod/pkg/annotations"
envLib "github.com/containers/libpod/pkg/env"
ns "github.com/containers/libpod/pkg/namespaces"
+ "github.com/containers/libpod/pkg/rootless"
"github.com/containers/libpod/pkg/specgen"
systemdGen "github.com/containers/libpod/pkg/systemd/generate"
"github.com/containers/libpod/pkg/util"
@@ -126,20 +128,23 @@ func getIOLimits(s *specgen.SpecGenerator, c *ContainerCLIOpts, args []string) (
return io, nil
}
-func getPidsLimits(s *specgen.SpecGenerator, c *ContainerCLIOpts, args []string) (*specs.LinuxPids, error) {
+func getPidsLimits(s *specgen.SpecGenerator, c *ContainerCLIOpts, args []string) *specs.LinuxPids {
pids := &specs.LinuxPids{}
- hasLimits := false
- if c.CGroupsMode == "disabled" && c.PIDsLimit > 0 {
- return nil, nil
+ if c.CGroupsMode == "disabled" && c.PIDsLimit != 0 {
+ return nil
+ }
+ if c.PIDsLimit < 0 {
+ if rootless.IsRootless() && containerConfig.Engine.CgroupManager != config.SystemdCgroupsManager {
+ return nil
+ }
+ pids.Limit = containerConfig.PidsLimit()
+ return pids
}
if c.PIDsLimit > 0 {
pids.Limit = c.PIDsLimit
- hasLimits = true
+ return pids
}
- if !hasLimits {
- return nil, nil
- }
- return pids, nil
+ return nil
}
func getMemoryLimits(s *specgen.SpecGenerator, c *ContainerCLIOpts, args []string) (*specs.LinuxMemory, error) {
@@ -464,10 +469,7 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *ContainerCLIOpts, args []string
if err != nil {
return err
}
- s.ResourceLimits.Pids, err = getPidsLimits(s, c, args)
- if err != nil {
- return err
- }
+ s.ResourceLimits.Pids = getPidsLimits(s, c, args)
s.ResourceLimits.CPU, err = getCPULimits(s, c, args)
if err != nil {
return err
diff --git a/cmd/podman/containers/attach.go b/cmd/podman/containers/attach.go
index 119b47d3f..9f29d1664 100644
--- a/cmd/podman/containers/attach.go
+++ b/cmd/podman/containers/attach.go
@@ -52,14 +52,14 @@ func attachFlags(flags *pflag.FlagSet) {
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode},
+ Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: attachCommand,
})
flags := attachCommand.Flags()
attachFlags(flags)
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode},
+ Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: containerAttachCommand,
Parent: containerCmd,
})
diff --git a/cmd/podman/containers/checkpoint.go b/cmd/podman/containers/checkpoint.go
index 7259ed38b..c4723af21 100644
--- a/cmd/podman/containers/checkpoint.go
+++ b/cmd/podman/containers/checkpoint.go
@@ -45,7 +45,7 @@ func init() {
})
flags := checkpointCommand.Flags()
flags.BoolVarP(&checkpointOptions.Keep, "keep", "k", false, "Keep all temporary checkpoint files")
- flags.BoolVarP(&checkpointOptions.LeaveRuninng, "leave-running", "R", false, "Leave the container running after writing checkpoint to disk")
+ flags.BoolVarP(&checkpointOptions.LeaveRunning, "leave-running", "R", false, "Leave the container running after writing checkpoint to disk")
flags.BoolVar(&checkpointOptions.TCPEstablished, "tcp-established", false, "Checkpoint a container with established TCP connections")
flags.BoolVarP(&checkpointOptions.All, "all", "a", false, "Checkpoint all running containers")
flags.BoolVarP(&checkpointOptions.Latest, "latest", "l", false, "Act on the latest container podman is aware of")
diff --git a/cmd/podman/containers/cleanup.go b/cmd/podman/containers/cleanup.go
index 2bcd1c1e9..619031208 100644
--- a/cmd/podman/containers/cleanup.go
+++ b/cmd/podman/containers/cleanup.go
@@ -7,6 +7,8 @@ import (
"github.com/containers/libpod/cmd/podman/registry"
"github.com/containers/libpod/cmd/podman/utils"
"github.com/containers/libpod/pkg/domain/entities"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
@@ -43,6 +45,7 @@ func init() {
flags := cleanupCommand.Flags()
flags.BoolVarP(&cleanupOptions.All, "all", "a", false, "Cleans up all containers")
flags.BoolVarP(&cleanupOptions.Latest, "latest", "l", false, "Act on the latest container podman is aware of")
+ flags.StringVar(&cleanupOptions.Exec, "exec", "", "Clean up the given exec session instead of the container")
flags.BoolVar(&cleanupOptions.Remove, "rm", false, "After cleanup, remove the container entirely")
flags.BoolVar(&cleanupOptions.RemoveImage, "rmi", false, "After cleanup, remove the image entirely")
@@ -52,8 +55,26 @@ func cleanup(cmd *cobra.Command, args []string) error {
var (
errs utils.OutputErrors
)
+
+ if cleanupOptions.Exec != "" {
+ switch {
+ case cleanupOptions.All:
+ return errors.Errorf("exec and all options conflict")
+ case len(args) > 1:
+ return errors.Errorf("cannot use exec option when more than one container is given")
+ case cleanupOptions.RemoveImage:
+ return errors.Errorf("exec and rmi options conflict")
+ }
+ }
+
responses, err := registry.ContainerEngine().ContainerCleanup(registry.GetContext(), args, cleanupOptions)
if err != nil {
+ // `podman container cleanup` is almost always run in the
+ // background. Our only way of relaying information to the user
+ // is via syslog.
+ // As such, we need to logrus.Errorf our errors to ensure they
+ // are properly printed if --syslog is set.
+ logrus.Errorf("Error running container cleanup: %v", err)
return err
}
for _, r := range responses {
@@ -62,12 +83,15 @@ func cleanup(cmd *cobra.Command, args []string) error {
continue
}
if r.RmErr != nil {
+ logrus.Errorf("Error removing container: %v", r.RmErr)
errs = append(errs, r.RmErr)
}
if r.RmiErr != nil {
+ logrus.Errorf("Error removing image: %v", r.RmiErr)
errs = append(errs, r.RmiErr)
}
if r.CleanErr != nil {
+ logrus.Errorf("Error cleaning up container: %v", r.CleanErr)
errs = append(errs, r.CleanErr)
}
}
diff --git a/cmd/podman/containers/create.go b/cmd/podman/containers/create.go
index 2ecdda2e0..bb6cb5fdd 100644
--- a/cmd/podman/containers/create.go
+++ b/cmd/podman/containers/create.go
@@ -6,6 +6,8 @@ import (
"os"
"strings"
+ "github.com/containers/libpod/libpod/define"
+
"github.com/containers/common/pkg/config"
"github.com/containers/libpod/cmd/podman/common"
"github.com/containers/libpod/cmd/podman/registry"
@@ -168,6 +170,9 @@ func createInit(c *cobra.Command) error {
if c.Flag("pid").Changed {
cliVals.PID = c.Flag("pid").Value.String()
}
+ if !c.Flag("pids-limit").Changed {
+ cliVals.PIDsLimit = -1
+ }
if c.Flag("cgroupns").Changed {
cliVals.CGroupsNS = c.Flag("cgroupns").Value.String()
}
@@ -200,7 +205,7 @@ func pullImage(imageName string) error {
}
if !br.Value || pullPolicy == config.PullImageAlways {
if pullPolicy == config.PullImageNever {
- return errors.New("unable to find a name and tag match for busybox in repotags: no such image")
+ return errors.Wrapf(define.ErrNoSuchImage, "unable to find a name and tag match for %s in repotags", imageName)
}
_, pullErr := registry.ImageEngine().Pull(registry.GetContext(), imageName, entities.ImagePullOptions{
Authfile: cliVals.Authfile,
diff --git a/cmd/podman/containers/exec.go b/cmd/podman/containers/exec.go
index 0992b3862..7554d6a93 100644
--- a/cmd/podman/containers/exec.go
+++ b/cmd/podman/containers/exec.go
@@ -2,9 +2,11 @@ package containers
import (
"bufio"
+ "fmt"
"os"
"github.com/containers/libpod/cmd/podman/registry"
+ "github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/pkg/domain/entities"
envLib "github.com/containers/libpod/pkg/env"
"github.com/pkg/errors"
@@ -41,10 +43,12 @@ var (
var (
envInput, envFile []string
execOpts entities.ExecOptions
+ execDetach bool
)
func execFlags(flags *pflag.FlagSet) {
flags.SetInterspersed(false)
+ flags.BoolVarP(&execDetach, "detach", "d", false, "Run the exec session in detached mode (backgrounded)")
flags.StringVar(&execOpts.DetachKeys, "detach-keys", containerConfig.DetachKeys(), "Select the key sequence for detaching a container. Format is a single character [a-Z] or ctrl-<value> where <value> is one of: a-z, @, ^, [, , or _")
flags.StringArrayVarP(&envInput, "env", "e", []string{}, "Set environment variables")
flags.StringSliceVar(&envFile, "env-file", []string{}, "Read in a file of environment variables")
@@ -106,16 +110,27 @@ func exec(cmd *cobra.Command, args []string) error {
}
execOpts.Envs = envLib.Join(execOpts.Envs, cliEnv)
- execOpts.Streams.OutputStream = os.Stdout
- execOpts.Streams.ErrorStream = os.Stderr
- if execOpts.Interactive {
- execOpts.Streams.InputStream = bufio.NewReader(os.Stdin)
- execOpts.Streams.AttachInput = true
+
+ if !execDetach {
+ streams := define.AttachStreams{}
+ streams.OutputStream = os.Stdout
+ streams.ErrorStream = os.Stderr
+ if execOpts.Interactive {
+ streams.InputStream = bufio.NewReader(os.Stdin)
+ streams.AttachInput = true
+ }
+ streams.AttachOutput = true
+ streams.AttachError = true
+
+ exitCode, err := registry.ContainerEngine().ContainerExec(registry.GetContext(), nameOrId, execOpts, streams)
+ registry.SetExitCode(exitCode)
+ return err
}
- execOpts.Streams.AttachOutput = true
- execOpts.Streams.AttachError = true
- exitCode, err := registry.ContainerEngine().ContainerExec(registry.GetContext(), nameOrId, execOpts)
- registry.SetExitCode(exitCode)
- return err
+ id, err := registry.ContainerEngine().ContainerExecDetached(registry.GetContext(), nameOrId, execOpts)
+ if err != nil {
+ return err
+ }
+ fmt.Println(id)
+ return nil
}
diff --git a/cmd/podman/containers/port.go b/cmd/podman/containers/port.go
index ec0ddf838..d058a6aaf 100644
--- a/cmd/podman/containers/port.go
+++ b/cmd/podman/containers/port.go
@@ -57,7 +57,7 @@ func portFlags(flags *pflag.FlagSet) {
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode},
+ Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: portCommand,
})
@@ -65,7 +65,7 @@ func init() {
portFlags(flags)
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode},
+ Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: containerPortCommand,
Parent: containerCmd,
})
diff --git a/cmd/podman/containers/rm.go b/cmd/podman/containers/rm.go
index 2a0f9cc6a..f01462447 100644
--- a/cmd/podman/containers/rm.go
+++ b/cmd/podman/containers/rm.go
@@ -3,6 +3,7 @@ package containers
import (
"context"
"fmt"
+ "strings"
"github.com/containers/libpod/cmd/podman/parse"
"github.com/containers/libpod/cmd/podman/registry"
@@ -120,10 +121,14 @@ func rm(cmd *cobra.Command, args []string) error {
func setExitCode(err error) {
cause := errors.Cause(err)
- switch cause {
- case define.ErrNoSuchCtr:
+ switch {
+ case cause == define.ErrNoSuchCtr:
registry.SetExitCode(1)
- case define.ErrCtrStateInvalid:
+ case strings.Contains(cause.Error(), define.ErrNoSuchImage.Error()):
+ registry.SetExitCode(1)
+ case cause == define.ErrCtrStateInvalid:
+ registry.SetExitCode(2)
+ case strings.Contains(cause.Error(), define.ErrCtrStateInvalid.Error()):
registry.SetExitCode(2)
}
}
diff --git a/cmd/podman/containers/run.go b/cmd/podman/containers/run.go
index 5f3ea9ef4..2298691a9 100644
--- a/cmd/podman/containers/run.go
+++ b/cmd/podman/containers/run.go
@@ -66,14 +66,14 @@ func runFlags(flags *pflag.FlagSet) {
}
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode},
+ Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: runCommand,
})
flags := runCommand.Flags()
runFlags(flags)
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode},
+ Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: containerRunCommand,
Parent: containerCmd,
})
diff --git a/cmd/podman/containers/start.go b/cmd/podman/containers/start.go
index ce78d24ed..751fec65f 100644
--- a/cmd/podman/containers/start.go
+++ b/cmd/podman/containers/start.go
@@ -53,14 +53,14 @@ func startFlags(flags *pflag.FlagSet) {
}
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode},
+ Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: startCommand,
})
flags := startCommand.Flags()
startFlags(flags)
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode},
+ Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: containerStartCommand,
Parent: containerCmd,
})
diff --git a/cmd/podman/containers/top.go b/cmd/podman/containers/top.go
index 732a08623..d2b11ec77 100644
--- a/cmd/podman/containers/top.go
+++ b/cmd/podman/containers/top.go
@@ -9,20 +9,18 @@ import (
"github.com/containers/libpod/cmd/podman/registry"
"github.com/containers/libpod/pkg/domain/entities"
- "github.com/containers/psgo"
+ "github.com/containers/libpod/pkg/util"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
var (
- topDescription = fmt.Sprintf(`Similar to system "top" command.
+ topDescription = `Similar to system "top" command.
Specify format descriptors to alter the output.
- Running "podman top -l pid pcpu seccomp" will print the process ID, the CPU percentage and the seccomp mode of each process of the latest container.
- Format Descriptors:
- %s`, strings.Join(psgo.ListDescriptors(), ","))
+ Running "podman top -l pid pcpu seccomp" will print the process ID, the CPU percentage and the seccomp mode of each process of the latest container.`
topOptions = entities.TopOptions{}
@@ -68,6 +66,12 @@ func init() {
flags := topCommand.Flags()
topFlags(flags)
+ descriptors, err := util.GetContainerPidInformationDescriptors()
+ if err == nil {
+ topDescription = fmt.Sprintf("%s\n\n Format Descriptors:\n %s", topDescription, strings.Join(descriptors, ","))
+ topCommand.Long = topDescription
+ }
+
registry.Commands = append(registry.Commands, registry.CliCommand{
Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: containerTopCommand,
@@ -79,7 +83,11 @@ func init() {
func top(cmd *cobra.Command, args []string) error {
if topOptions.ListDescriptors {
- fmt.Println(strings.Join(psgo.ListDescriptors(), "\n"))
+ descriptors, err := util.GetContainerPidInformationDescriptors()
+ if err != nil {
+ return err
+ }
+ fmt.Println(strings.Join(descriptors, "\n"))
return nil
}
diff --git a/cmd/podman/images/list.go b/cmd/podman/images/list.go
index 83c039ed3..022c90f71 100644
--- a/cmd/podman/images/list.go
+++ b/cmd/podman/images/list.go
@@ -85,7 +85,7 @@ func images(cmd *cobra.Command, args []string) error {
return errors.New("cannot specify an image and a filter(s)")
}
- if len(listOptions.Filter) < 1 && len(args) > 0 {
+ if len(args) > 0 {
listOptions.Filter = append(listOptions.Filter, "reference="+args[0])
}
@@ -152,10 +152,16 @@ func writeTemplate(imageS []*entities.ImageSummary) error {
)
imgs := make([]imageReporter, 0, len(imageS))
for _, e := range imageS {
- for _, tag := range e.RepoTags {
- var h imageReporter
+ var h imageReporter
+ if len(e.RepoTags) > 0 {
+ for _, tag := range e.RepoTags {
+ h.ImageSummary = *e
+ h.Repository, h.Tag = tokenRepoTag(tag)
+ imgs = append(imgs, h)
+ }
+ } else {
h.ImageSummary = *e
- h.Repository, h.Tag = tokenRepoTag(tag)
+ h.Repository = "<none>"
imgs = append(imgs, h)
}
listFlag.readOnly = e.IsReadOnly()
diff --git a/cmd/podman/images/prune.go b/cmd/podman/images/prune.go
index 7c9e3eb61..676382a99 100644
--- a/cmd/podman/images/prune.go
+++ b/cmd/podman/images/prune.go
@@ -61,12 +61,6 @@ Are you sure you want to continue? [y/N] `)
}
}
- // TODO Remove once filter refactor is finished and url.Values rules :)
- for _, f := range filter {
- t := strings.SplitN(f, "=", 2)
- pruneOpts.Filters.Add(t[0], t[1])
- }
-
results, err := registry.ImageEngine().Prune(registry.GetContext(), pruneOpts)
if err != nil {
return err
diff --git a/cmd/podman/images/sign.go b/cmd/podman/images/sign.go
new file mode 100644
index 000000000..bd9cf2ea7
--- /dev/null
+++ b/cmd/podman/images/sign.go
@@ -0,0 +1,55 @@
+package images
+
+import (
+ "os"
+
+ "github.com/containers/libpod/cmd/podman/registry"
+ "github.com/containers/libpod/pkg/domain/entities"
+ "github.com/pkg/errors"
+ "github.com/spf13/cobra"
+)
+
+var (
+ signDescription = "Create a signature file that can be used later to verify the image."
+ signCommand = &cobra.Command{
+ Use: "sign [flags] IMAGE [IMAGE...]",
+ Short: "Sign an image",
+ Long: signDescription,
+ RunE: sign,
+ Args: cobra.MinimumNArgs(1),
+ Example: `podman image sign --sign-by mykey imageID
+ podman image sign --sign-by mykey --directory ./mykeydir imageID`,
+ }
+)
+
+var (
+ signOptions entities.SignOptions
+)
+
+func init() {
+ registry.Commands = append(registry.Commands, registry.CliCommand{
+ Mode: []entities.EngineMode{entities.ABIMode},
+ Command: signCommand,
+ Parent: imageCmd,
+ })
+ flags := signCommand.Flags()
+ flags.StringVarP(&signOptions.Directory, "directory", "d", "", "Define an alternate directory to store signatures")
+ flags.StringVar(&signOptions.SignBy, "sign-by", "", "Name of the signing key")
+ flags.StringVar(&signOptions.CertDir, "cert-dir", "", "`Pathname` of a directory containing TLS certificates and keys")
+}
+
+func sign(cmd *cobra.Command, args []string) error {
+ if signOptions.SignBy == "" {
+ return errors.Errorf("please provide an identity")
+ }
+
+ var sigStoreDir string
+ if len(signOptions.Directory) > 0 {
+ sigStoreDir = signOptions.Directory
+ if _, err := os.Stat(sigStoreDir); err != nil {
+ return errors.Wrapf(err, "invalid directory %s", sigStoreDir)
+ }
+ }
+ _, err := registry.ImageEngine().Sign(registry.Context(), args, signOptions)
+ return err
+}
diff --git a/cmd/podman/networks/create.go b/cmd/podman/networks/create.go
index 2bb75ea9e..5d28c7140 100644
--- a/cmd/podman/networks/create.go
+++ b/cmd/podman/networks/create.go
@@ -5,7 +5,7 @@ import (
"net"
"github.com/containers/libpod/cmd/podman/registry"
- "github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/pkg/domain/entities"
"github.com/containers/libpod/pkg/network"
"github.com/pkg/errors"
@@ -46,7 +46,7 @@ func networkCreateFlags(flags *pflag.FlagSet) {
}
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode},
+ Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: networkCreateCommand,
Parent: networkCmd,
})
@@ -65,8 +65,8 @@ func networkCreate(cmd *cobra.Command, args []string) error {
if len(args) > 1 {
return errors.Errorf("only one network can be created at a time")
}
- if len(args) > 0 && !libpod.NameRegex.MatchString(args[0]) {
- return libpod.RegexError
+ if len(args) > 0 && !define.NameRegex.MatchString(args[0]) {
+ return define.RegexError
}
if len(args) > 0 {
diff --git a/cmd/podman/networks/inspect.go b/cmd/podman/networks/inspect.go
index 0bc73579a..1b2e89909 100644
--- a/cmd/podman/networks/inspect.go
+++ b/cmd/podman/networks/inspect.go
@@ -3,6 +3,10 @@ package network
import (
"encoding/json"
"fmt"
+ "html/template"
+ "io"
+ "os"
+ "strings"
"github.com/containers/libpod/cmd/podman/registry"
"github.com/containers/libpod/pkg/domain/entities"
@@ -24,12 +28,18 @@ var (
}
)
+var (
+ networkInspectOptions entities.NetworkInspectOptions
+)
+
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode},
+ Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: networkinspectCommand,
Parent: networkCmd,
})
+ flags := networkinspectCommand.Flags()
+ flags.StringVarP(&networkInspectOptions.Format, "format", "f", "", "Pretty-print network to JSON or using a Go template")
}
func networkInspect(cmd *cobra.Command, args []string) error {
@@ -41,6 +51,22 @@ func networkInspect(cmd *cobra.Command, args []string) error {
if err != nil {
return err
}
- fmt.Println(string(b))
+ if strings.ToLower(networkInspectOptions.Format) == "json" || networkInspectOptions.Format == "" {
+ fmt.Println(string(b))
+ } else {
+ var w io.Writer = os.Stdout
+ //There can be more than 1 in the inspect output.
+ format := "{{range . }}" + networkInspectOptions.Format + "{{end}}"
+ tmpl, err := template.New("inspectNetworks").Parse(format)
+ if err != nil {
+ return err
+ }
+ if err := tmpl.Execute(w, responses); err != nil {
+ return err
+ }
+ if flusher, ok := w.(interface{ Flush() error }); ok {
+ return flusher.Flush()
+ }
+ }
return nil
}
diff --git a/cmd/podman/networks/list.go b/cmd/podman/networks/list.go
index e27062255..24604c055 100644
--- a/cmd/podman/networks/list.go
+++ b/cmd/podman/networks/list.go
@@ -4,13 +4,12 @@ import (
"encoding/json"
"fmt"
"html/template"
- "io"
"os"
"strings"
-
- "github.com/containers/libpod/cmd/podman/validate"
+ "text/tabwriter"
"github.com/containers/libpod/cmd/podman/registry"
+ "github.com/containers/libpod/cmd/podman/validate"
"github.com/containers/libpod/pkg/domain/entities"
"github.com/containers/libpod/pkg/network"
"github.com/spf13/cobra"
@@ -41,13 +40,14 @@ var (
func networkListFlags(flags *pflag.FlagSet) {
// TODO enable filters based on something
//flags.StringSliceVarP(&networklistCommand.Filter, "filter", "f", []string{}, "Pause all running containers")
- flags.StringVarP(&networkListOptions.Format, "format", "f", "", "Pretty-print containers to JSON or using a Go template")
+ flags.StringVarP(&networkListOptions.Format, "format", "f", "", "Pretty-print networks to JSON or using a Go template")
flags.BoolVarP(&networkListOptions.Quiet, "quiet", "q", false, "display only names")
+ flags.StringVarP(&networkListOptions.Filter, "filter", "", "", "Provide filter values (e.g. 'name=podman')")
}
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode},
+ Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: networklistCommand,
Parent: networkCmd,
})
@@ -57,10 +57,17 @@ func init() {
func networkList(cmd *cobra.Command, args []string) error {
var (
- w io.Writer = os.Stdout
nlprs []NetworkListPrintReports
)
+ // validate the filter pattern.
+ if len(networkListOptions.Filter) > 0 {
+ tokens := strings.Split(networkListOptions.Filter, "=")
+ if len(tokens) != 2 {
+ return fmt.Errorf("invalid filter syntax : %s", networkListOptions.Filter)
+ }
+ }
+
responses, err := registry.ContainerEngine().NetworkList(registry.Context(), networkListOptions)
if err != nil {
return err
@@ -71,7 +78,7 @@ func networkList(cmd *cobra.Command, args []string) error {
return quietOut(responses)
}
- if networkListOptions.Format == "json" {
+ if strings.ToLower(networkListOptions.Format) == "json" {
return jsonOut(responses)
}
@@ -95,13 +102,11 @@ func networkList(cmd *cobra.Command, args []string) error {
if err != nil {
return err
}
+ w := tabwriter.NewWriter(os.Stdout, 8, 2, 2, ' ', 0)
if err := tmpl.Execute(w, nlprs); err != nil {
return err
}
- if flusher, ok := w.(interface{ Flush() error }); ok {
- return flusher.Flush()
- }
- return nil
+ return w.Flush()
}
func quietOut(responses []*entities.NetworkListReport) error {
diff --git a/cmd/podman/networks/network.go b/cmd/podman/networks/network.go
index 56dd390ea..7f38cd2cd 100644
--- a/cmd/podman/networks/network.go
+++ b/cmd/podman/networks/network.go
@@ -20,7 +20,7 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode},
+ Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: networkCmd,
})
}
diff --git a/cmd/podman/networks/rm.go b/cmd/podman/networks/rm.go
index dc1eb9909..34d57f6ef 100644
--- a/cmd/podman/networks/rm.go
+++ b/cmd/podman/networks/rm.go
@@ -35,7 +35,7 @@ func networkRmFlags(flags *pflag.FlagSet) {
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode},
+ Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: networkrmCommand,
Parent: networkCmd,
})
diff --git a/cmd/podman/pods/create.go b/cmd/podman/pods/create.go
index e24cdef98..62b5b849e 100644
--- a/cmd/podman/pods/create.go
+++ b/cmd/podman/pods/create.go
@@ -3,6 +3,7 @@ package pods
import (
"context"
"fmt"
+ "io/ioutil"
"os"
"strings"
@@ -12,7 +13,6 @@ import (
"github.com/containers/libpod/cmd/podman/validate"
"github.com/containers/libpod/pkg/domain/entities"
"github.com/containers/libpod/pkg/errorhandling"
- createconfig "github.com/containers/libpod/pkg/spec"
"github.com/containers/libpod/pkg/specgen"
"github.com/containers/libpod/pkg/util"
"github.com/pkg/errors"
@@ -60,7 +60,7 @@ func init() {
flags.StringVarP(&createOptions.Name, "name", "n", "", "Assign a name to the pod")
flags.StringVarP(&createOptions.Hostname, "hostname", "", "", "Set a hostname to the pod")
flags.StringVar(&podIDFile, "pod-id-file", "", "Write the pod ID to the file")
- flags.StringVar(&share, "share", createconfig.DefaultKernelNamespaces, "A comma delimited list of kernel namespaces the pod will share")
+ flags.StringVar(&share, "share", specgen.DefaultKernelNamespaces, "A comma delimited list of kernel namespaces the pod will share")
flags.SetNormalizeFunc(aliasNetworkFlag)
}
@@ -147,6 +147,11 @@ func create(cmd *cobra.Command, args []string) error {
if err != nil {
return err
}
+ if len(podIDFile) > 0 {
+ if err = ioutil.WriteFile(podIDFile, []byte(response.Id), 0644); err != nil {
+ return errors.Wrapf(err, "failed to write pod ID to file %q", podIDFile)
+ }
+ }
fmt.Println(response.Id)
return nil
}
diff --git a/cmd/podman/pods/top.go b/cmd/podman/pods/top.go
index 9cf2bd525..ba1efb638 100644
--- a/cmd/podman/pods/top.go
+++ b/cmd/podman/pods/top.go
@@ -9,17 +9,15 @@ import (
"github.com/containers/libpod/cmd/podman/registry"
"github.com/containers/libpod/pkg/domain/entities"
- "github.com/containers/psgo"
+ "github.com/containers/libpod/pkg/util"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
var (
- topDescription = fmt.Sprintf(`Specify format descriptors to alter the output.
+ topDescription = `Specify format descriptors to alter the output.
- You may run "podman pod top -l pid pcpu seccomp" to print the process ID, the CPU percentage and the seccomp mode of each process of the latest pod.
- Format Descriptors:
- %s`, strings.Join(psgo.ListDescriptors(), ","))
+ You may run "podman pod top -l pid pcpu seccomp" to print the process ID, the CPU percentage and the seccomp mode of each process of the latest pod.`
topOptions = entities.PodTopOptions{}
@@ -43,6 +41,12 @@ func init() {
Parent: podCmd,
})
+ descriptors, err := util.GetContainerPidInformationDescriptors()
+ if err == nil {
+ topDescription = fmt.Sprintf("%s\n\n Format Descriptors:\n %s", topDescription, strings.Join(descriptors, ","))
+ topCommand.Long = topDescription
+ }
+
flags := topCommand.Flags()
flags.SetInterspersed(false)
flags.BoolVar(&topOptions.ListDescriptors, "list-descriptors", false, "")
@@ -56,7 +60,11 @@ func init() {
func top(cmd *cobra.Command, args []string) error {
if topOptions.ListDescriptors {
- fmt.Println(strings.Join(psgo.ListDescriptors(), "\n"))
+ descriptors, err := util.GetContainerPidInformationDescriptors()
+ if err != nil {
+ return err
+ }
+ fmt.Println(strings.Join(descriptors, "\n"))
return nil
}
diff --git a/cmd/podman/root.go b/cmd/podman/root.go
index 7d6f6f823..dffd9b534 100644
--- a/cmd/podman/root.go
+++ b/cmd/podman/root.go
@@ -2,7 +2,6 @@ package main
import (
"fmt"
- "log/syslog"
"os"
"path"
"runtime/pprof"
@@ -17,7 +16,6 @@ import (
"github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
- logrusSyslog "github.com/sirupsen/logrus/hooks/syslog"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
@@ -191,21 +189,6 @@ func loggingHook() {
}
}
-func syslogHook() {
- if !useSyslog {
- return
- }
-
- hook, err := logrusSyslog.NewSyslogHook("", "", syslog.LOG_INFO, "")
- if err != nil {
- fmt.Fprint(os.Stderr, "Failed to initialize syslog hook: "+err.Error())
- os.Exit(1)
- }
- if err == nil {
- logrus.AddHook(hook)
- }
-}
-
func rootFlags(opts *entities.PodmanConfig, flags *pflag.FlagSet) {
// V2 flags
flags.StringVarP(&opts.Uri, "remote", "r", registry.DefaultAPIAddress(), "URL to access Podman service")
diff --git a/cmd/podman/syslog_linux.go b/cmd/podman/syslog_linux.go
new file mode 100644
index 000000000..ac7bbfe0f
--- /dev/null
+++ b/cmd/podman/syslog_linux.go
@@ -0,0 +1,25 @@
+package main
+
+import (
+ "fmt"
+ "log/syslog"
+ "os"
+
+ "github.com/sirupsen/logrus"
+ logrusSyslog "github.com/sirupsen/logrus/hooks/syslog"
+)
+
+func syslogHook() {
+ if !useSyslog {
+ return
+ }
+
+ hook, err := logrusSyslog.NewSyslogHook("", "", syslog.LOG_INFO, "")
+ if err != nil {
+ fmt.Fprint(os.Stderr, "Failed to initialize syslog hook: "+err.Error())
+ os.Exit(1)
+ }
+ if err == nil {
+ logrus.AddHook(hook)
+ }
+}
diff --git a/cmd/podman/syslog_unsupported.go b/cmd/podman/syslog_unsupported.go
new file mode 100644
index 000000000..3765d96b9
--- /dev/null
+++ b/cmd/podman/syslog_unsupported.go
@@ -0,0 +1,17 @@
+// +build !linux
+
+package main
+
+import (
+ "fmt"
+ "os"
+)
+
+func syslogHook() {
+ if !useSyslog {
+ return
+ }
+
+ fmt.Fprintf(os.Stderr, "Logging to Syslog is not supported on Windows")
+ os.Exit(1)
+}
diff --git a/cmd/podman/system/df.go b/cmd/podman/system/df.go
index 7caa8e39a..8fe035209 100644
--- a/cmd/podman/system/df.go
+++ b/cmd/podman/system/df.go
@@ -37,7 +37,7 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode},
+ Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: dfSystemCommand,
Parent: systemCmd,
})
diff --git a/cmd/podman/system/events.go b/cmd/podman/system/events.go
index 6aae62dc0..27e80138e 100644
--- a/cmd/podman/system/events.go
+++ b/cmd/podman/system/events.go
@@ -5,6 +5,7 @@ import (
"context"
"html/template"
"os"
+ "strings"
"github.com/containers/buildah/pkg/formats"
"github.com/containers/libpod/cmd/podman/registry"
@@ -54,6 +55,9 @@ func eventsCmd(cmd *cobra.Command, args []string) error {
eventsError error
tmpl *template.Template
)
+ if strings.Join(strings.Fields(eventFormat), "") == "{{json.}}" {
+ eventFormat = formats.JSONString
+ }
if eventFormat != formats.JSONString {
tmpl, err = template.New("events").Parse(eventFormat)
if err != nil {
diff --git a/cmd/podman/system/reset.go b/cmd/podman/system/reset.go
index 22ddc7529..6caa91690 100644
--- a/cmd/podman/system/reset.go
+++ b/cmd/podman/system/reset.go
@@ -26,10 +26,8 @@ var (
Long: systemResetDescription,
Run: reset,
}
-)
-var (
- systemResetOptions entities.SystemResetOptions
+ forceFlag bool
)
func init() {
@@ -39,12 +37,12 @@ func init() {
Parent: systemCmd,
})
flags := systemResetCommand.Flags()
- flags.BoolVarP(&systemResetOptions.Force, "force", "f", false, "Do not prompt for confirmation")
+ flags.BoolVarP(&forceFlag, "force", "f", false, "Do not prompt for confirmation")
}
func reset(cmd *cobra.Command, args []string) {
// Prompt for confirmation if --force is not set
- if !systemResetOptions.Force {
+ if !forceFlag {
reader := bufio.NewReader(os.Stdin)
fmt.Print(`
WARNING! This will remove:
@@ -74,7 +72,7 @@ Are you sure you want to continue? [y/N] `)
}
defer engine.Shutdown(registry.Context())
- if err := engine.Reset(registry.Context(), systemResetOptions); err != nil {
+ if err := engine.Reset(registry.Context()); err != nil {
fmt.Println(err)
os.Exit(125)
}
diff --git a/cmd/podman/system/service.go b/cmd/podman/system/service.go
index 552c72f79..b5dd2f2aa 100644
--- a/cmd/podman/system/service.go
+++ b/cmd/podman/system/service.go
@@ -1,3 +1,5 @@
+// +build linux
+
package system
import (
diff --git a/cmd/podman/system/version.go b/cmd/podman/system/version.go
index 50bd81368..92a3225b6 100644
--- a/cmd/podman/system/version.go
+++ b/cmd/podman/system/version.go
@@ -6,8 +6,8 @@ import (
"os"
"strings"
"text/tabwriter"
- "time"
+ "github.com/containers/buildah/pkg/formats"
"github.com/containers/libpod/cmd/podman/registry"
"github.com/containers/libpod/cmd/podman/validate"
"github.com/containers/libpod/libpod/define"
@@ -52,6 +52,17 @@ func version(cmd *cobra.Command, args []string) error {
if !strings.HasSuffix(versionFormat, "\n") {
versionFormat += "\n"
}
+ out := formats.StdoutTemplate{Output: versions, Template: versionFormat}
+ err := out.Out()
+ if err != nil {
+ // On Failure, assume user is using older version of podman version --format and check client
+ versionFormat = strings.Replace(versionFormat, ".Server.", ".", 1)
+ out = formats.StdoutTemplate{Output: versions.Client, Template: versionFormat}
+ if err1 := out.Out(); err1 != nil {
+ return err
+ }
+ }
+ return nil
}
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
@@ -74,15 +85,11 @@ func version(cmd *cobra.Command, args []string) error {
func formatVersion(writer io.Writer, version *define.Version) {
fmt.Fprintf(writer, "Version:\t%s\n", version.Version)
- fmt.Fprintf(writer, "RemoteAPI Version:\t%d\n", version.RemoteAPIVersion)
+ fmt.Fprintf(writer, "API Version:\t%d\n", version.APIVersion)
fmt.Fprintf(writer, "Go Version:\t%s\n", version.GoVersion)
if version.GitCommit != "" {
fmt.Fprintf(writer, "Git Commit:\t%s\n", version.GitCommit)
}
- // Prints out the build time in readable format
- if version.Built != 0 {
- fmt.Fprintf(writer, "Built:\t%s\n", time.Unix(version.Built, 0).Format(time.ANSIC))
- }
-
+ fmt.Fprintf(writer, "Built:\t%s\n", version.BuiltTime)
fmt.Fprintf(writer, "OS/Arch:\t%s\n", version.OsArch)
}
diff --git a/completions/bash/podman b/completions/bash/podman
index 1e29a2e30..9baf7901e 100644
--- a/completions/bash/podman
+++ b/completions/bash/podman
@@ -702,6 +702,27 @@ __podman_images() {
__podman_q images $images_args | awk "$awk_script" | grep -v '<none>$'
}
+_podman_auto_update() {
+ local options_with_args="
+ --authfile
+ "
+
+ local boolean_options="
+ --help
+ -h
+ "
+
+ _complete_ "$options_with_args" "$boolean_options"
+ case "$cur" in
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ *)
+ __podman_complete_volume_names
+ ;;
+ esac
+}
+
# __podman_complete_volumes applies completion of volumes based on the current
# value of `$cur` or the value of the optional first option `--cur`, if given.
__podman_complete_volumes() {
@@ -1003,6 +1024,8 @@ _podman_network_inspect() {
local boolean_options="
--help
-h
+ --format
+ -f
"
_complete_ "$options_with_args" "$boolean_options"
@@ -1021,6 +1044,9 @@ _podman_network_ls() {
-h
--quiet
-q
+ --format
+ -f
+ -- filter
"
_complete_ "$options_with_args" "$boolean_options"
diff --git a/contrib/cirrus/container_test.sh b/contrib/cirrus/container_test.sh
index 4624868f1..bf0a0d3f1 100644
--- a/contrib/cirrus/container_test.sh
+++ b/contrib/cirrus/container_test.sh
@@ -126,7 +126,6 @@ if [ $install -eq 1 ]; then
make TAGS="${TAGS}" install.bin PREFIX=/usr ETCDIR=/etc
make TAGS="${TAGS}" install.man PREFIX=/usr ETCDIR=/etc
make TAGS="${TAGS}" install.cni PREFIX=/usr ETCDIR=/etc
- make TAGS="${TAGS}" install.config PREFIX=/usr ETCDIR=/etc
make TAGS="${TAGS}" install.systemd PREFIX=/usr ETCDIR=/etc
fi
diff --git a/contrib/cirrus/integration_test.sh b/contrib/cirrus/integration_test.sh
index 6341bcb4a..0f2b2ab7e 100755
--- a/contrib/cirrus/integration_test.sh
+++ b/contrib/cirrus/integration_test.sh
@@ -50,7 +50,6 @@ case "$SPECIALMODE" in
none)
make
make install PREFIX=/usr ETCDIR=/etc
- make install.config PREFIX=/usr
make test-binaries
if [[ "$TEST_REMOTE_CLIENT" == "true" ]]
then
diff --git a/contrib/cirrus/lib.sh b/contrib/cirrus/lib.sh
index 750aec3b6..cc5a3ffa7 100644
--- a/contrib/cirrus/lib.sh
+++ b/contrib/cirrus/lib.sh
@@ -63,11 +63,12 @@ CIRRUS_BUILD_ID=${CIRRUS_BUILD_ID:-$RANDOM$(date +%s)} # must be short and uniq
PACKER_VER="1.4.2"
# CSV of cache-image names to build (see $PACKER_BASE/libpod_images.json)
-# Base-images rarely change, define them here so they're out of the way.
-export PACKER_BUILDS="${PACKER_BUILDS:-ubuntu-18,ubuntu-19,fedora-32,fedora-31}"
-# Manually produced base-image names (see $SCRIPT_BASE/README.md)
-export UBUNTU_BASE_IMAGE="ubuntu-1910-eoan-v20200211"
-export PRIOR_UBUNTU_BASE_IMAGE="ubuntu-1804-bionic-v20200218"
+# List of cache imaes to build for 'CI:IMG' mode via build_vm_images.sh
+# Exists to support manual single-image building in case of emergency
+export PACKER_BUILDS="${PACKER_BUILDS:-ubuntu-20,ubuntu-19,fedora-32,fedora-31}"
+# Google cloud provides these, we just make copies (see $SCRIPT_BASE/README.md) for use
+export UBUNTU_BASE_IMAGE="ubuntu-2004-focal-v20200506"
+export PRIOR_UBUNTU_BASE_IMAGE="ubuntu-1910-eoan-v20200211"
# Manually produced base-image names (see $SCRIPT_BASE/README.md)
export FEDORA_BASE_IMAGE="fedora-cloud-base-32-1-6-1588257430"
export PRIOR_FEDORA_BASE_IMAGE="fedora-cloud-base-31-1-9-1588257430"
@@ -98,7 +99,6 @@ ROOTLESS_ENV_RE='(CIRRUS_.+)|(ROOTLESS_.+)|(.+_IMAGE.*)|(.+_BASE)|(.*DIRPATH)|(.
SECRET_ENV_RE='(IRCID)|(ACCOUNT)|(GC[EP]..+)|(SSH)'
SPECIALMODE="${SPECIALMODE:-none}"
-MOD_LIBPOD_CONF="${MOD_LIBPOD_CONF:false}"
TEST_REMOTE_CLIENT="${TEST_REMOTE_CLIENT:-false}"
export CONTAINER_RUNTIME=${CONTAINER_RUNTIME:-podman}
diff --git a/contrib/cirrus/packer/libpod_images.yml b/contrib/cirrus/packer/libpod_images.yml
index e33ad775e..754626a2e 100644
--- a/contrib/cirrus/packer/libpod_images.yml
+++ b/contrib/cirrus/packer/libpod_images.yml
@@ -29,7 +29,7 @@ sensitive-variables:
builders:
# v----- is a YAML anchor, allows referencing this object by name (below)
- &gce_hosted_image
- name: 'ubuntu-19'
+ name: 'ubuntu-20'
type: 'googlecompute'
image_name: '{{build_name}}{{user `BUILT_IMAGE_SUFFIX`}}'
image_family: '{{build_name}}-cache'
@@ -46,7 +46,7 @@ builders:
# v----- is a YAML alias, allows partial re-use of the anchor object
- <<: *gce_hosted_image
- name: 'ubuntu-18'
+ name: 'ubuntu-19'
source_image: '{{user `PRIOR_UBUNTU_BASE_IMAGE`}}'
source_image_family: 'prior-ubuntu-base'
diff --git a/contrib/cirrus/packer/ubuntu_packaging.sh b/contrib/cirrus/packer/ubuntu_packaging.sh
index b57bc95e9..fd0280230 100644
--- a/contrib/cirrus/packer/ubuntu_packaging.sh
+++ b/contrib/cirrus/packer/ubuntu_packaging.sh
@@ -26,12 +26,6 @@ source /usr/share/automation/environment
$LILTO ooe.sh $SUDOAPTADD ppa:criu/ppa
-# Install newer version of golang
-if [[ "$OS_RELEASE_VER" -eq "18" ]]
-then
- $LILTO ooe.sh $SUDOAPTADD ppa:longsleep/golang-backports
-fi
-
echo "Configuring/Instaling deps from Open build server"
VERSION_ID=$(source /etc/os-release; echo $VERSION_ID)
echo "deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_$VERSION_ID/ /" \
@@ -45,7 +39,9 @@ INSTALL_PACKAGES=(\
autoconf
automake
bash-completion
+ bats
bison
+ btrfs-progs
build-essential
buildah
bzip2
@@ -60,6 +56,7 @@ INSTALL_PACKAGES=(\
e2fslibs-dev
emacs-nox
file
+ fuse3
gawk
gcc
gettext
@@ -71,11 +68,13 @@ INSTALL_PACKAGES=(\
jq
libaio-dev
libapparmor-dev
+ libbtrfs-dev
libcap-dev
libdevmapper-dev
libdevmapper1.02.1
libfuse-dev
libfuse2
+ libfuse3-dev
libglib2.0-dev
libgpgme11-dev
liblzma-dev
@@ -99,8 +98,6 @@ INSTALL_PACKAGES=(\
podman
protobuf-c-compiler
protobuf-compiler
- python-future
- python-minimal
python-protobuf
python3-dateutil
python3-pip
@@ -118,29 +115,16 @@ INSTALL_PACKAGES=(\
vim
wget
xz-utils
- yum-utils
zip
zlib1g-dev
)
-if [[ $OS_RELEASE_VER -ge 19 ]]
-then
- INSTALL_PACKAGES+=(\
- bats
- btrfs-progs
- fuse3
- libbtrfs-dev
- libfuse3-dev
- )
-else
- echo "Downloading version of bats with fix for a \$IFS related bug in 'run' command"
- cd /tmp
- BATS_URL='http://launchpadlibrarian.net/438140887/bats_1.1.0+git104-g1c83a1b-1_all.deb'
- curl -L -O "$BATS_URL"
- cd -
+# These aren't resolvable on Ubuntu 20
+if [[ "$OS_RELEASE_VER" -le 19 ]]; then
INSTALL_PACKAGES+=(\
- /tmp/$(basename $BATS_URL)
- btrfs-tools
+ python-future
+ python-minimal
+ yum-utils
)
fi
diff --git a/contrib/cirrus/setup_environment.sh b/contrib/cirrus/setup_environment.sh
index 756240444..945b33909 100755
--- a/contrib/cirrus/setup_environment.sh
+++ b/contrib/cirrus/setup_environment.sh
@@ -76,12 +76,6 @@ case "$CG_FS_TYPE" in
X=$(echo "export OCI_RUNTIME=/usr/bin/crun" | \
tee -a /etc/environment) && eval "$X" && echo "$X"
- if [[ "$MOD_LIBPOD_CONF" == "true" ]]; then
- warn "Updating runtime setting in repo. copy of libpod.conf"
- sed -i -r -e 's/^runtime = "runc"/runtime = "crun"/' $GOSRC/libpod.conf
- git diff $GOSRC/libpod.conf
- fi
-
if [[ "$OS_RELEASE_ID" == "fedora" ]]; then
warn "Upgrading to the latest crun"
# Normally not something to do for stable testing
diff --git a/contrib/spec/podman.spec.in b/contrib/spec/podman.spec.in
index 1dfbdf208..e4415c291 100644
--- a/contrib/spec/podman.spec.in
+++ b/contrib/spec/podman.spec.in
@@ -80,13 +80,14 @@ BuildRequires: libselinux-devel
BuildRequires: pkgconfig
BuildRequires: make
BuildRequires: systemd-devel
-Requires: runc
Requires: skopeo-containers
Requires: containernetworking-plugins >= 0.6.0-3
Requires: iptables
%if 0%{?rhel} <= 7
Requires: container-selinux
%else
+Requires: oci-runtime
+Recommends: crun
Recommends: container-selinux
Recommends: slirp4netns
Recommends: fuse-overlayfs
@@ -423,10 +424,6 @@ PODMAN_VERSION=%{version} %{__make} PREFIX=%{buildroot}%{_prefix} ETCDIR=%{build
mv pkg/hooks/README.md pkg/hooks/README-hooks.md
-# install libpod.conf
-install -dp %{buildroot}%{_datadir}/containers
-install -p -m 644 %{repo}.conf %{buildroot}%{_datadir}/containers
-
# install conmon
install -dp %{buildroot}%{_libexecdir}/%{name}
install -p -m 755 conmon/bin/conmon %{buildroot}%{_libexecdir}/%{name}
@@ -506,7 +503,6 @@ export GOPATH=%{buildroot}/%{gopath}:$(pwd)/vendor:%{gopath}
%{_datadir}/zsh/site-functions/*
%{_libexecdir}/%{name}/conmon
%config(noreplace) %{_sysconfdir}/cni/net.d/87-%{name}-bridge.conflist
-%{_datadir}/containers/%{repo}.conf
%{_unitdir}/io.podman.service
%{_unitdir}/io.podman.socket
%{_usr}/lib/systemd/user/io.podman.service
diff --git a/docs/source/markdown/links/podman-image-diff.1 b/docs/source/markdown/links/podman-image-diff.1
new file mode 100644
index 000000000..ac4881f98
--- /dev/null
+++ b/docs/source/markdown/links/podman-image-diff.1
@@ -0,0 +1 @@
+.so man1/podman-diff.1
diff --git a/docs/source/markdown/links/podman-image-search.1 b/docs/source/markdown/links/podman-image-search.1
new file mode 100644
index 000000000..73e70d3e3
--- /dev/null
+++ b/docs/source/markdown/links/podman-image-search.1
@@ -0,0 +1 @@
+.so man1/podman-search.1
diff --git a/docs/source/markdown/podman-auto-update.1.md b/docs/source/markdown/podman-auto-update.1.md
index 93ad22f76..435a767c1 100644
--- a/docs/source/markdown/podman-auto-update.1.md
+++ b/docs/source/markdown/podman-auto-update.1.md
@@ -13,6 +13,8 @@ If the label is present and set to "image", Podman reaches out to the correspond
An image is considered updated if the digest in the local storage is different than the one of the remote image.
If an image must be updated, Podman pulls it down and restarts the systemd unit executing the container.
+If "io.containers.autoupdate.authfile" label is present, Podman reaches out to corresponding authfile when pulling images.
+
At container-creation time, Podman looks up the "PODMAN_SYSTEMD_UNIT" environment variables and stores it verbatim in the container's label.
This variable is now set by all systemd units generated by `podman-generate-systemd` and is set to `%n` (i.e., the name of systemd unit starting the container).
This data is then being used in the auto-update sequence to instruct systemd (via DBUS) to restart the unit and hence to restart the container.
@@ -21,11 +23,23 @@ Note that `podman auto-update` relies on systemd and requires a fully-qualified
This enforcement is necessary to know which image to actually check and pull.
If an image ID was used, Podman would not know which image to check/pull anymore.
+## OPTIONS
+
+**--authfile**=*path*
+
+Path of the authentication file. Default is ${XDG\_RUNTIME\_DIR}/containers/auth.json, which is set using `podman login`.
+If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`. (Not available for remote commands)
+
+Note: You can also override the default path of the authentication file by setting the REGISTRY\_AUTH\_FILE
+environment variable. `export REGISTRY_AUTH_FILE=path`
+
## EXAMPLES
```
# Start a container
-$ podman run -d busybox:latest top
+$ podman run --label "io.containers.autoupdate=image" \
+ --label "io.containers.autoupdate.autfile=/some/authfile.json" \
+ -d busybox:latest top
bc219740a210455fa27deacc96d50a9e20516492f1417507c13ce1533dbdcd9d
# Generate a systemd unit for this container
diff --git a/docs/source/markdown/podman-container-cleanup.1.md b/docs/source/markdown/podman-container-cleanup.1.md
index 66a6cff62..a200c2c36 100644
--- a/docs/source/markdown/podman-container-cleanup.1.md
+++ b/docs/source/markdown/podman-container-cleanup.1.md
@@ -16,6 +16,13 @@ Sometimes container's mount points and network stacks can remain if the podman c
Cleanup all containers.
+**--exec**=_session_
+
+Clean up an exec session for a single container.
+Can only be specified if a single container is being cleaned up (conflicts with **--all** as such).
+If **--rm** is not specified, temporary files for the exec session will be cleaned up; if it is, the exec session will be removed from the container.
+Conflicts with **--rmi** as the container is not being cleaned up so the image cannot be removed.
+
**--latest**, **-l**
Instead of providing the container name or ID, use the last created container. If you use methods other than Podman
to run containers such as CRI-O, the last started container could be from either of those methods.
diff --git a/docs/source/markdown/podman-events.1.md b/docs/source/markdown/podman-events.1.md
index bb1923574..a05047684 100644
--- a/docs/source/markdown/podman-events.1.md
+++ b/docs/source/markdown/podman-events.1.md
@@ -142,7 +142,7 @@ $ sudo podman events --since 5m
Show Podman events in JSON Lines format
```
-events --format json
+$ podman events --format json
{"ID":"683b0909d556a9c02fa8cd2b61c3531a965db42158627622d1a67b391964d519","Image":"localhost/myshdemo:latest","Name":"agitated_diffie","Status":"cleanup","Time":"2019-04-27T22:47:00.849932843-04:00","Type":"container"}
{"ID":"a0f8ab051bfd43f9c5141a8a2502139707e4b38d98ac0872e57c5315381e88ad","Image":"docker.io/library/alpine:latest","Name":"friendly_tereshkova","Status":"unmount","Time":"2019-04-28T13:43:38.063017276-04:00","Type":"container"}
```
diff --git a/docs/source/markdown/podman-exec.1.md b/docs/source/markdown/podman-exec.1.md
index 1bd10f9ba..b24a1f8aa 100644
--- a/docs/source/markdown/podman-exec.1.md
+++ b/docs/source/markdown/podman-exec.1.md
@@ -13,6 +13,10 @@ podman\-exec - Execute a command in a running container
## OPTIONS
+**--detach**
+
+Start the exec session, but do not attach to it. The command will run in the background and the exec session will be automatically removed when it completes. The **podman exec** command will print the ID of the exec session and exit immediately after it starts.
+
**--detach-keys**=*sequence*
Specify the key sequence for detaching a container. Format is a single character `[a-Z]` or one or more `ctrl-<value>` characters where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. Specifying "" will disable this feature. The default is *ctrl-p,ctrl-q*.
diff --git a/docs/source/markdown/podman-info.1.md b/docs/source/markdown/podman-info.1.md
index d9da4c3f8..24ab97c91 100644
--- a/docs/source/markdown/podman-info.1.md
+++ b/docs/source/markdown/podman-info.1.md
@@ -30,128 +30,212 @@ Run podman info with plain text response:
```
$ podman info
host:
- BuildahVersion: 1.4-dev
- Conmon:
- package: Unknown
- path: /usr/libexec/podman/conmon
- version: 'conmon version 1.12.0-dev, commit: d724f3d54ad2d95b6de741085d4990190ebfd7ff'
- Distribution:
- distribution: fedora
- version: "28"
- MemFree: 1271083008
- MemTotal: 33074233344
- OCIRuntime:
- package: runc-1.0.0-51.dev.gitfdd8055.fc28.x86_64
- path: /usr/bin/runc
- version: 'runc version spec: 1.0.0'
- SwapFree: 34309664768
- SwapTotal: 34359734272
arch: amd64
+ buildahVersion: 1.15.0
+ cgroupVersion: v1
+ conmon:
+ package: conmon-2.0.16-2.fc32.x86_64
+ path: /usr/bin/conmon
+ version: 'conmon version 2.0.16, commit: 1044176f7dd177c100779d1c63931d6022e419bd'
cpus: 8
+ distribution:
+ distribution: fedora
+ version: "32"
+ eventLogger: file
hostname: localhost.localdomain
- kernel: 4.18.7-200.fc28.x86_64
+ idMappings:
+ gidmap:
+ - container_id: 0
+ host_id: 3267
+ size: 1
+ - container_id: 1
+ host_id: 100000
+ size: 65536
+ uidmap:
+ - container_id: 0
+ host_id: 3267
+ size: 1
+ - container_id: 1
+ host_id: 100000
+ size: 65536
+ kernel: 5.6.11-300.fc32.x86_64
+ linkmode: dynamic
+ memFree: 1401929728
+ memTotal: 16416161792
+ ociRuntime:
+ name: runc
+ package: containerd.io-1.2.10-3.2.fc31.x86_64
+ path: /usr/bin/runc
+ version: |-
+ runc version 1.0.0-rc8+dev
+ commit: 3e425f80a8c931f88e6d94a8c831b9d5aa481657
+ spec: 1.0.1-dev
os: linux
- uptime: 218h 49m 33.66s (Approximately 9.08 days)
+ rootless: true
+ slirp4netns:
+ executable: /bin/slirp4netns
+ package: slirp4netns-1.0.0-1.fc32.x86_64
+ version: |-
+ slirp4netns version 1.0.0
+ commit: a3be729152a33e692cd28b52f664defbf2e7810a
+ libslirp: 4.2.0
+ swapFree: 8291610624
+ swapTotal: 8296329216
+ uptime: 52h 29m 39.78s (Approximately 2.17 days)
registries:
- docker.io:
- Blocked: true
- Insecure: true
- Location: docker.io
- MirrorByDigestOnly: false
- Mirrors:
- - Insecure: true
- Location: example2.io/example/ubi8-minimal
- Prefix: docker.io
- redhat.com:
- Blocked: false
- Insecure: false
- Location: registry.access.redhat.com/ubi8
- MirrorByDigestOnly: true
- Mirrors:
- - Insecure: false
- Location: example.io/example/ubi8-minimal
- - Insecure: true
- Location: example3.io/example/ubi8-minimal
- Prefix: redhat.com
+ search:
+ - registry.fedoraproject.org
+ - registry.access.redhat.com
+ - registry.centos.org
+ - docker.io
store:
- ConfigFile: /etc/containers/storage.conf
- ContainerStore:
- number: 37
- GraphDriverName: overlay
- GraphOptions:
- - overlay.mountopt=nodev
- - overlay.override_kernel_check=true
- GraphRoot: /var/lib/containers/storage
- GraphStatus:
+ configFile: /home/dwalsh/.config/containers/storage.conf
+ containerStore:
+ number: 2
+ paused: 0
+ running: 0
+ stopped: 2
+ graphDriverName: overlay
+ graphOptions:
+ overlay.mount_program:
+ Executable: /home/dwalsh/bin/fuse-overlayfs
+ Package: Unknown
+ Version: |-
+ fusermount3 version: 3.9.1
+ fuse-overlayfs: version 0.7.2
+ FUSE library version 3.9.1
+ using FUSE kernel interface version 7.31
+ graphRoot: /home/dwalsh/.local/share/containers/storage
+ graphStatus:
Backing Filesystem: extfs
- Native Overlay Diff: "true"
+ Native Overlay Diff: "false"
Supports d_type: "true"
- ImageStore:
- number: 17
- RunRoot: /var/run/containers/storage
-
+ Using metacopy: "false"
+ imageStore:
+ number: 7
+ runRoot: /run/user/3267/containers
+ volumePath: /home/dwalsh/.local/share/containers/storage/volumes
+version:
+ Built: 1589899246
+ BuiltTime: Tue May 19 10:40:46 2020
+ GitCommit: c3678ce3289f4195f3f16802411e795c6a587c9f-dirty
+ GoVersion: go1.14.2
+ OsArch: linux/amd64
+ APIVersion: 1
+ Version: 2.0.0
```
Run podman info with JSON formatted response:
```
{
- "host": {
- "BuildahVersion": "1.4-dev",
- "Conmon": {
- "package": "Unknown",
- "path": "/usr/libexec/podman/conmon",
- "version": "conmon version 1.12.0-dev, commit: d724f3d54ad2d95b6de741085d4990190ebfd7ff"
- },
- "Distribution": {
- "distribution": "fedora",
- "version": "28"
+ "host": {
+ "arch": "amd64",
+ "buildahVersion": "1.15.0",
+ "cgroupVersion": "v1",
+ "conmon": {
+ "package": "conmon-2.0.16-2.fc32.x86_64",
+ "path": "/usr/bin/conmon",
+ "version": "conmon version 2.0.16, commit: 1044176f7dd177c100779d1c63931d6022e419bd"
+ },
+ "cpus": 8,
+ "distribution": {
+ "distribution": "fedora",
+ "version": "32"
+ },
+ "eventLogger": "file",
+ "hostname": "localhost.localdomain",
+ "idMappings": {
+ "gidmap": [
+ {
+ "container_id": 0,
+ "host_id": 3267,
+ "size": 1
},
- "MemFree": 1204109312,
- "MemTotal": 33074233344,
- "OCIRuntime": {
- "package": "runc-1.0.0-51.dev.gitfdd8055.fc28.x86_64",
- "path": "/usr/bin/runc",
- "version": "runc version spec: 1.0.0"
+ {
+ "container_id": 1,
+ "host_id": 100000,
+ "size": 65536
+ }
+ ],
+ "uidmap": [
+ {
+ "container_id": 0,
+ "host_id": 3267,
+ "size": 1
},
- "SwapFree": 34309664768,
- "SwapTotal": 34359734272,
- "arch": "amd64",
- "cpus": 8,
- "hostname": "localhost.localdomain",
- "kernel": "4.18.7-200.fc28.x86_64",
- "os": "linux",
- "uptime": "218h 50m 35.02s (Approximately 9.08 days)"
+ {
+ "container_id": 1,
+ "host_id": 100000,
+ "size": 65536
+ }
+ ]
},
- "insecure registries": {
- "registries": []
+ "kernel": "5.6.11-300.fc32.x86_64",
+ "memFree": 1380356096,
+ "memTotal": 16416161792,
+ "ociRuntime": {
+ "name": "runc",
+ "package": "containerd.io-1.2.10-3.2.fc31.x86_64",
+ "path": "/usr/bin/runc",
+ "version": "runc version 1.0.0-rc8+dev\ncommit: 3e425f80a8c931f88e6d94a8c831b9d5aa481657\nspec: 1.0.1-dev"
},
- "registries": {
- "registries": [
- "quay.io",
- "registry.fedoraproject.org",
- "docker.io",
- "registry.access.redhat.com"
- ]
+ "os": "linux",
+ "rootless": true,
+ "slirp4netns": {
+ "executable": "/bin/slirp4netns",
+ "package": "slirp4netns-1.0.0-1.fc32.x86_64",
+ "version": "slirp4netns version 1.0.0\ncommit: a3be729152a33e692cd28b52f664defbf2e7810a\nlibslirp: 4.2.0"
},
- "store": {
- "ContainerStore": {
- "number": 37
- },
- "GraphDriverName": "overlay",
- "GraphOptions": [
- "overlay.mountopt=nodev",
- "overlay.override_kernel_check=true"
- ],
- "GraphRoot": "/var/lib/containers/storage",
- "GraphStatus": {
- "Backing Filesystem": "extfs",
- "Native Overlay Diff": "true",
- "Supports d_type": "true"
- },
- "ImageStore": {
- "number": 17
- },
- "RunRoot": "/var/run/containers/storage"
- }
+ "swapFree": 8291610624,
+ "swapTotal": 8296329216,
+ "uptime": "52h 27m 39.38s (Approximately 2.17 days)",
+ "linkmode": "dynamic"
+ },
+ "store": {
+ "configFile": "/home/dwalsh/.config/containers/storage.conf",
+ "containerStore": {
+ "number": 2,
+ "paused": 0,
+ "running": 0,
+ "stopped": 2
+ },
+ "graphDriverName": "overlay",
+ "graphOptions": {
+ "overlay.mount_program": {
+ "Executable": "/home/dwalsh/bin/fuse-overlayfs",
+ "Package": "Unknown",
+ "Version": "fusermount3 version: 3.9.1\nfuse-overlayfs: version 0.7.2\nFUSE library version 3.9.1\nusing FUSE kernel interface version 7.31"
+}
+ },
+ "graphRoot": "/home/dwalsh/.local/share/containers/storage",
+ "graphStatus": {
+ "Backing Filesystem": "extfs",
+ "Native Overlay Diff": "false",
+ "Supports d_type": "true",
+ "Using metacopy": "false"
+ },
+ "imageStore": {
+ "number": 7
+ },
+ "runRoot": "/run/user/3267/containers",
+ "volumePath": "/home/dwalsh/.local/share/containers/storage/volumes"
+ },
+ "registries": {
+ "search": [
+ "registry.fedoraproject.org",
+ "registry.access.redhat.com",
+ "registry.centos.org",
+ "docker.io"
+]
+ },
+ "version": {
+ "APIVersion": 1,
+ "Version": "2.0.0",
+ "GoVersion": "go1.14.2",
+ "GitCommit": "c3678ce3289f4195f3f16802411e795c6a587c9f-dirty",
+ "BuiltTime": "Tue May 19 10:40:46 2020",
+ "Built": 1589899246,
+ "OsArch": "linux/amd64"
+ }
}
```
Run podman info and only get the registries information.
diff --git a/docs/source/markdown/podman-network-inspect.1.md b/docs/source/markdown/podman-network-inspect.1.md
index dfa7e4b0c..ca6875d18 100644
--- a/docs/source/markdown/podman-network-inspect.1.md
+++ b/docs/source/markdown/podman-network-inspect.1.md
@@ -9,6 +9,15 @@ podman\-network\-inspect - Displays the raw CNI network configuration for one or
## DESCRIPTION
Display the raw (JSON format) network configuration. This command is not available for rootless users.
+## OPTIONS
+**--quiet**, **-q**
+
+The `quiet` option will restrict the output to only the network names.
+
+**--format**, **-f**
+
+Pretty-print networks to JSON or using a Go template.
+
## EXAMPLE
Inspect the default podman network
@@ -43,6 +52,11 @@ Inspect the default podman network
]
```
+```
+# podman network inspect podman --format '{{(index .plugins 0).ipam.ranges}}'
+[[map[gateway:10.88.0.1 subnet:10.88.0.0/16]]]
+```
+
## SEE ALSO
podman(1), podman-network(1), podman-network-ls(1)
diff --git a/docs/source/markdown/podman-network-ls.1.md b/docs/source/markdown/podman-network-ls.1.md
index 46e424593..7b20cf5e0 100644
--- a/docs/source/markdown/podman-network-ls.1.md
+++ b/docs/source/markdown/podman-network-ls.1.md
@@ -12,7 +12,15 @@ Displays a list of existing podman networks. This command is not available for r
## OPTIONS
**--quiet**, **-q**
-The `quiet` option will restrict the output to only the network names
+The `quiet` option will restrict the output to only the network names.
+
+**--format**, **-f**
+
+Pretty-print networks to JSON or using a Go template.
+
+**--filter**
+
+Provide filter values (e.g. 'name=podman').
## EXAMPLE
@@ -36,6 +44,14 @@ outside
podman9
```
+Display name of network which support bridge plugin
+```
+# podman network ls --filter plugin=portmap --format {{.Name}}
+podman
+podman2
+podman9
+```
+
## SEE ALSO
podman(1), podman-network(1), podman-network-inspect(1)
diff --git a/docs/source/markdown/podman-version.1.md b/docs/source/markdown/podman-version.1.md
index 86c270e02..185e8e296 100644
--- a/docs/source/markdown/podman-version.1.md
+++ b/docs/source/markdown/podman-version.1.md
@@ -25,17 +25,18 @@ Change output format to "json" or a Go template.
A sample output of the `version` command:
```
$ podman version
-Version: 0.11.1
-Go Version: go1.11
-Git Commit: "8967a1d691ed44896b81ad48c863033f23c65eb0-dirty"
-Built: Thu Nov 8 22:35:40 2018
-OS/Arch: linux/amd64
+Version: 2.0.0
+API Version: 1
+Go Version: go1.14.2
+Git Commit: 4520664f63c3a7f9a80227715359e20069d95542
+Built: Tue May 19 10:48:59 2020
+OS/Arch: linux/amd64
```
Filtering out only the version:
```
$ podman version --format '{{.Client.Version}}'
-1.6.3
+2.0.0
```
## SEE ALSO
diff --git a/docs/tutorials/podman-derivative-api.md b/docs/tutorials/podman-derivative-api.md
index 065b0c4a9..8a1f40fc0 100644
--- a/docs/tutorials/podman-derivative-api.md
+++ b/docs/tutorials/podman-derivative-api.md
@@ -4,6 +4,20 @@
libpod today is a Golang library and a CLI. The choice of interface you make has advantages and disadvantages.
+Using the REST API
+---
+
+Advantages:
+
+ - Stable, versioned API
+ - Language-agnostic
+ - [Well-documented](http://docs.podman.io/en/latest/_static/api.html) API
+
+Disadvantages:
+
+ - Error handling is less verbose than Golang API
+ - May be slower
+
Running as a subprocess
---
@@ -35,12 +49,12 @@ Disadvantages:
Varlink
---
-Some code exists for this; splits the difference. Future uncertain.
+The Varlink API is presently deprecated. We do not recommend adopting it for new projects.
Making the choice
---
A good question to ask first is: Do you want users to be able to use `podman` to manipulate the containers created by your project?
-If so, that makes it more likely that you want to run `podman` as a subprocess. If you want a separate image store and a fundamentally
+If so, that makes it more likely that you want to run `podman` as a subprocess or using the HTTP API. If you want a separate image store and a fundamentally
different experience; if what you're doing with containers is quite different from those created by the `podman` CLI,
that may drive you towards vendoring.
diff --git a/go.mod b/go.mod
index aeb9e3110..aadce78c3 100644
--- a/go.mod
+++ b/go.mod
@@ -4,19 +4,20 @@ go 1.12
require (
github.com/BurntSushi/toml v0.3.1
+ github.com/blang/semver v3.5.1+incompatible
github.com/buger/goterm v0.0.0-20181115115552-c206103e1f37
github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd // indirect
github.com/containernetworking/cni v0.7.2-0.20200304161608-4fae32b84921
- github.com/containernetworking/plugins v0.8.5
+ github.com/containernetworking/plugins v0.8.6
github.com/containers/buildah v1.14.9-0.20200501175434-42a48f9373d9
- github.com/containers/common v0.11.1
- github.com/containers/conmon v2.0.14+incompatible
- github.com/containers/image/v5 v5.4.3
+ github.com/containers/common v0.11.2
+ github.com/containers/conmon v2.0.16+incompatible
+ github.com/containers/image/v5 v5.4.4
github.com/containers/psgo v1.5.0
- github.com/containers/storage v1.19.1
+ github.com/containers/storage v1.20.1
github.com/coreos/go-systemd/v22 v22.0.0
- github.com/cri-o/ocicni v0.1.1-0.20190920040751-deac903fd99b
+ github.com/cri-o/ocicni v0.2.0
github.com/cyphar/filepath-securejoin v0.2.2
github.com/davecgh/go-spew v1.1.1
github.com/docker/distribution v2.7.1+incompatible
@@ -34,19 +35,19 @@ require (
github.com/hpcloud/tail v1.0.0
github.com/json-iterator/go v1.1.9
github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618
- github.com/onsi/ginkgo v1.12.0
- github.com/onsi/gomega v1.10.0
- github.com/opencontainers/go-digest v1.0.0-rc1
+ github.com/onsi/ginkgo v1.12.2
+ github.com/onsi/gomega v1.10.1
+ github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6
github.com/opencontainers/runc v1.0.0-rc9
- github.com/opencontainers/runtime-spec v0.1.2-0.20190618234442-a950415649c7
+ github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2
github.com/opencontainers/runtime-tools v0.9.0
github.com/opencontainers/selinux v1.5.1
github.com/opentracing/opentracing-go v1.1.0
github.com/pkg/errors v0.9.1
github.com/pmezard/go-difflib v1.0.0
github.com/rootless-containers/rootlesskit v0.9.4
- github.com/seccomp/containers-golang v0.0.0-20190312124753-8ca8945ccf5f
+ github.com/seccomp/containers-golang v0.4.1
github.com/sirupsen/logrus v1.6.0
github.com/spf13/cobra v0.0.7
github.com/spf13/pflag v1.0.5
@@ -57,12 +58,12 @@ require (
github.com/varlink/go v0.0.0-20190502142041-0f1d566d194b
github.com/vishvananda/netlink v1.1.0
go.etcd.io/bbolt v1.3.4
- golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59
- golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e
+ golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5
+ golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a
- golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775
- gopkg.in/yaml.v2 v2.2.8
- k8s.io/api v0.18.2
- k8s.io/apimachinery v0.18.2
+ golang.org/x/sys v0.0.0-20200519105757-fe76b779f299
+ gopkg.in/yaml.v2 v2.3.0
+ k8s.io/api v0.18.3
+ k8s.io/apimachinery v0.18.3
k8s.io/client-go v0.0.0-20190620085101-78d2af792bab
)
diff --git a/go.sum b/go.sum
index 6beb950ab..fc2907d70 100644
--- a/go.sum
+++ b/go.sum
@@ -10,13 +10,13 @@ github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
-github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5 h1:ygIc8M6trr62pF5DucadTWGdEB4mEyvzi0e2nbcmcyA=
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
github.com/Microsoft/go-winio v0.4.15-0.20200113171025-3fe6c5262873 h1:93nQ7k53GjoMQ07HVP8g6Zj1fQZDDj7Xy2VkNNtvX8o=
github.com/Microsoft/go-winio v0.4.15-0.20200113171025-3fe6c5262873/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
-github.com/Microsoft/hcsshim v0.8.7 h1:ptnOoufxGSzauVTsdE+wMYnCWA301PdoN4xg5oRdZpg=
github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ=
+github.com/Microsoft/hcsshim v0.8.9 h1:VrfodqvztU8YSOvygU+DN1BGaSGxmrNfqOv5oOuX2Bk=
+github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
@@ -35,8 +35,9 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
-github.com/blang/semver v3.1.0+incompatible h1:7hqmJYuaEK3qwVjWubYiht3j93YI0WQBuysxHIfUriU=
github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
+github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
+github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/buger/goterm v0.0.0-20181115115552-c206103e1f37 h1:uxxtrnACqI9zK4ENDMf0WpXfUsHP5V8liuq5QdgDISU=
github.com/buger/goterm v0.0.0-20181115115552-c206103e1f37/go.mod h1:u9UyCz2eTrSGy6fbupqJ54eY5c4IC8gREQ1053dK12U=
github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
@@ -52,9 +53,9 @@ github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqh
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.3.0 h1:xjvXQWABwS2uiv3TWgQt5Uth60Gu86LTGZXMJkjc7rY=
github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc h1:TP+534wVlf61smEIq1nwLLAjQVEK2EADoW3CX9AuT+8=
+github.com/containerd/containerd v1.3.2 h1:ForxmXkA6tPIvffbrDAcPUIB32QgXkt2XFj+F0UxetA=
+github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
github.com/containerd/continuity v0.0.0-20200228182428-0f16d7a0959c h1:8ahmSVELW1wghbjerVAyuEYD5+Dio66RYvSS0iGfL1M=
github.com/containerd/continuity v0.0.0-20200228182428-0f16d7a0959c/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY=
@@ -66,30 +67,29 @@ github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ
github.com/containernetworking/cni v0.7.2-0.20190904153231-83439463f784/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
github.com/containernetworking/cni v0.7.2-0.20200304161608-4fae32b84921 h1:eUMd8hlGasYcg1tBqETZtxaW3a7EIxqY7Z1g65gcKQg=
github.com/containernetworking/cni v0.7.2-0.20200304161608-4fae32b84921/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
-github.com/containernetworking/plugins v0.8.5 h1:pCvEMrFf7yzJI8+/D/7jkvE96KD52b7/Eu+jpahihy8=
-github.com/containernetworking/plugins v0.8.5/go.mod h1:UZ2539umj8djuRQmBxuazHeJbYrLV8BSBejkk+she6o=
+github.com/containernetworking/plugins v0.8.6 h1:npZTLiMa4CRn6m5P9+1Dz4O1j0UeFbm8VYN6dlsw568=
+github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM=
github.com/containers/buildah v1.14.9-0.20200501175434-42a48f9373d9 h1:EGegltin15wEzCI/5jeHcxBKfwwIHYkBUvsYC3XP060=
github.com/containers/buildah v1.14.9-0.20200501175434-42a48f9373d9/go.mod h1:+2aNsVcd4pVzmVAbOfWN5X+0Lpz2rtICSGXbTSCzdBU=
-github.com/containers/common v0.10.0 h1:Km1foMJJBIxceA1/UCZcIuwf8sCF71sP5DwE6Oh1BEA=
github.com/containers/common v0.10.0/go.mod h1:6A/moCuQITXLqBe5A0WKKTcCfCmEQRbknI05HcPzOL0=
-github.com/containers/common v0.11.1 h1:i++kltFD92bKfDeE3B+Bpe5jYVTnAibmIUUUnXYKoPo=
-github.com/containers/common v0.11.1/go.mod h1:2w3QE6VUmhltGYW4wV00h4okq1Crs7hNI1ZD2I0QRUY=
-github.com/containers/conmon v2.0.14+incompatible h1:knU1O1QxXy5YxtjMQVKEyCajROaehizK9FHaICl+P5Y=
-github.com/containers/conmon v2.0.14+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
-github.com/containers/image/v5 v5.4.3 h1:zn2HR7uu4hpvT5QQHgjqonOzKDuM1I1UHUEmzZT5sbs=
+github.com/containers/common v0.11.2 h1:e4477fCE3qSA+Z2vT+uUMUTn8s8CyIM++qNm3PCSl68=
+github.com/containers/common v0.11.2/go.mod h1:2w3QE6VUmhltGYW4wV00h4okq1Crs7hNI1ZD2I0QRUY=
+github.com/containers/conmon v2.0.16+incompatible h1:QFOlb9Id4WoJ24BelCFWwDSPTquwKMp3L3g2iGmRTq4=
+github.com/containers/conmon v2.0.16+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
github.com/containers/image/v5 v5.4.3/go.mod h1:pN0tvp3YbDd7BWavK2aE0mvJUqVd2HmhPjekyWSFm0U=
+github.com/containers/image/v5 v5.4.4 h1:JSanNn3v/BMd3o0MEvO4R4OKNuoJUSzVGQAI1+0FMXE=
+github.com/containers/image/v5 v5.4.4/go.mod h1:g7cxNXitiLi6pEr9/L9n/0wfazRuhDKXU15kV86N8h8=
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE=
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
github.com/containers/ocicrypt v1.0.2 h1:Q0/IPs8ohfbXNxEfyJ2pFVmvJu5BhqJUAmc6ES9NKbo=
github.com/containers/ocicrypt v1.0.2/go.mod h1:nsOhbP19flrX6rE7ieGFvBlr7modwmNjsqWarIUce4M=
github.com/containers/psgo v1.5.0 h1:uofUREsrm0Ls5K4tkEIFPqWSHKyg3Bvoqo/Q2eDmj8g=
github.com/containers/psgo v1.5.0/go.mod h1:2ubh0SsreMZjSXW1Hif58JrEcFudQyIy9EzPUWfawVU=
-github.com/containers/storage v1.18.2 h1:4cgFbrrgr9nR9xCeOmfpyxk1MtXYZGr7XGPJfAVkGmc=
github.com/containers/storage v1.18.2/go.mod h1:WTBMf+a9ZZ/LbmEVeLHH2TX4CikWbO1Bt+/m58ZHVPg=
-github.com/containers/storage v1.19.0 h1:bVIF5EglbT5PQnqcN7sE6VWqoQzlToqzjXdz+eNubQg=
github.com/containers/storage v1.19.0/go.mod h1:9Xc4rrTubn5hmtBfL+PSJH1XlfTQwR4VAG1NDUIpCts=
-github.com/containers/storage v1.19.1 h1:YKIzOO12iaD5Ra0PKFS6emcygbHLmwmQOCQRU/19YAQ=
github.com/containers/storage v1.19.1/go.mod h1:KbXjSwKnx17ejOsjFcCXSf78mCgZkQSLPBNTMRc3XrQ=
+github.com/containers/storage v1.20.1 h1:2XE4eRIqSa6YjhAZjNwIkIKE6+Miy+5WV8l1KzY2ZKk=
+github.com/containers/storage v1.20.1/go.mod h1:RoKzO8KSDogCT6c06rEbanZTcKYxshorB33JikEGc3A=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-iptables v0.4.5 h1:DpHb9vJrZQEFMcVLFKAAGMUVX0XoRC0ptCthinRYm38=
@@ -103,8 +103,8 @@ github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
-github.com/cri-o/ocicni v0.1.1-0.20190920040751-deac903fd99b h1:SgS+WV10y2Bubuy2HquSBori6DXj9sqRN77Hgs5H7Qc=
-github.com/cri-o/ocicni v0.1.1-0.20190920040751-deac903fd99b/go.mod h1:ZOuIEOp/3MB1eCBWANnNxM3zUA3NWh76wSRCsnKAg2c=
+github.com/cri-o/ocicni v0.2.0 h1:p0kO+/fcLTO574CcDwzAosFdP2U+NEL+a4wph3Bt85k=
+github.com/cri-o/ocicni v0.2.0/go.mod h1:ZOuIEOp/3MB1eCBWANnNxM3zUA3NWh76wSRCsnKAg2c=
github.com/cyphar/filepath-securejoin v0.2.2 h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg=
github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ=
@@ -138,7 +138,6 @@ github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNE
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 h1:cenwrSVm+Z7QLSV/BsnenAOcDXdX4cMv4wP0B/5QbPg=
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
-github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e h1:p1yVGRW3nmb85p1Sh1ZJSDm4A4iKLS5QNbvUHMgGu/M=
github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc=
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
@@ -146,7 +145,6 @@ github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/etcd-io/bbolt v1.3.3 h1:gSJmxrs37LgTqR/oyJBWok6k6SvXEUerFTbltIhXkBM=
github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw=
github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
@@ -196,17 +194,23 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb
github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/google/btree v0.0.0-20160524151835-7d79101e329e/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
-github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@@ -257,23 +261,19 @@ github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/u
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/juju/errors v0.0.0-20180806074554-22422dad46e1/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q=
-github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U=
-github.com/juju/testing v0.0.0-20190613124551-e81189438503/go.mod h1:63prj8cnj0tU0S9OHjGJn+b1h0ZghCndfnbQolrYTwA=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.10.3 h1:OP96hzwJVBIHYU52pVTI6CczrxPvrGfgqF9N5eTO0Q8=
github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/compress v1.10.4 h1:jFzIFaf586tquEB5EhzQG0HwGNSlgAJpG53G6Ss11wc=
github.com/klauspost/compress v1.10.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.10.5 h1:7q6vHIqubShURwQz8cQK6yIe/xC3IF0Vm7TGfqjewrc=
github.com/klauspost/compress v1.10.5/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/pgzip v1.2.3 h1:Ce2to9wvs/cuJ2b86/CKQoTYr9VHfpanYosZ0UBJqdw=
github.com/klauspost/pgzip v1.2.3/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
+github.com/klauspost/pgzip v1.2.4 h1:TQ7CNpYKovDOmqzRHKxJh0BeaBI7UdQZYc6p7pMQh1A=
+github.com/klauspost/pgzip v1.2.4/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -317,6 +317,8 @@ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRW
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
+github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
+github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
@@ -324,21 +326,25 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.12.0 h1:Iw5WCbBcaAAd0fpRb1c9r5YCylv4XDoCSigm1zLevwU=
github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg=
+github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
+github.com/onsi/ginkgo v1.12.2 h1:Ke9m3h2Hu0wsZ45yewCqhYr3Z+emcNTuLY2nMWCkrSI=
+github.com/onsi/ginkgo v1.12.2/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
-github.com/onsi/gomega v1.9.0 h1:R1uwffexN6Pr340GtYRIdZmAiN4J+iw6WG4wog1DUXg=
github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
-github.com/onsi/gomega v1.10.0 h1:Gwkk+PTu/nfOwNMtUB/mRUv0X7ewW5dO4AERT1ThVKo=
github.com/onsi/gomega v1.10.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
+github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE=
+github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
+github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
+github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6 h1:yN8BPXVwMBAm3Cuvh1L5XE8XpvYRMdsVLd82ILprhUU=
github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
@@ -350,9 +356,12 @@ github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rm
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v0.1.2-0.20190618234442-a950415649c7 h1:Dliu5QO+4JYWu/yMshaMU7G3JN2POGpwjJN7gjy10Go=
github.com/opencontainers/runtime-spec v0.1.2-0.20190618234442-a950415649c7/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2 h1:9mv9SC7GWmRWE0J/+oD8w3GsN2KYGKtg6uwLN7hfP5E=
+github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
github.com/opencontainers/runtime-tools v0.9.0 h1:FYgwVsKRI/H9hU32MJ/4MLOzXWodKK5zsQavY8NPMkU=
github.com/opencontainers/runtime-tools v0.9.0/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
+github.com/opencontainers/selinux v1.3.0/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs=
github.com/opencontainers/selinux v1.4.0/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g=
github.com/opencontainers/selinux v1.5.1 h1:jskKwSMFYqyTrHEuJgQoUlTcId0av64S6EWObrIfn5Y=
github.com/opencontainers/selinux v1.5.1/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g=
@@ -384,7 +393,6 @@ github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5Fsn
github.com/prometheus/client_golang v1.1.0 h1:BQ53HtBmfOitExawJ6LokA4x8ov/z0SYYb0+HxJfRI8=
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
-github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@@ -393,6 +401,7 @@ github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.6.0 h1:kRhiuYSXR3+uv2IbVbZhUxK5zVD/2pp3Gd2PpvPkpEo=
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
+github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
@@ -409,6 +418,8 @@ github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8 h1:2c1EFnZHIPCW8q
github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
github.com/seccomp/containers-golang v0.0.0-20190312124753-8ca8945ccf5f h1:OtU/w6sBKmXYaw2KEODxjcYi3oPSyyslhgGFgIJVGAI=
github.com/seccomp/containers-golang v0.0.0-20190312124753-8ca8945ccf5f/go.mod h1:f/98/SnvAzhAEFQJ3u836FePXvcbE8BS0YGMQNn4mhA=
+github.com/seccomp/containers-golang v0.4.1 h1:6hsmsP8Y9T6PWKJELqAkRWkc6Te60+zK64avkjInd44=
+github.com/seccomp/containers-golang v0.4.1/go.mod h1:5fP9lgyYyklJ8fg8Geq193G1QLe0ikf34z+hZKIjmnE=
github.com/seccomp/libseccomp-golang v0.9.1 h1:NJjM5DNFOs0s3kYE1WUOr6G8V97sdt46rlXTMfXGWBo=
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
@@ -418,7 +429,6 @@ github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjM
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
-github.com/sirupsen/logrus v1.5.0 h1:1N5EYkVAPEywqZRJd7cwnRtCb6xJx7NH3T3WUTF980Q=
github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo=
github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
@@ -467,8 +477,9 @@ github.com/varlink/go v0.0.0-20190502142041-0f1d566d194b h1:hdDRrn9OP/roL8a/e/5Z
github.com/varlink/go v0.0.0-20190502142041-0f1d566d194b/go.mod h1:YHaw8N660ESgMgLOZfLQqT1htFItynAUxMesFBho52s=
github.com/vbatts/tar-split v0.11.1 h1:0Odu65rhcZ3JZaPHxl7tCI3V/C/Q9Zf82UFravl02dE=
github.com/vbatts/tar-split v0.11.1/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g=
-github.com/vbauerster/mpb/v5 v5.0.3 h1:Ldt/azOkbThTk2loi6FrBd/3fhxGFQ24MxFAS88PoNY=
github.com/vbauerster/mpb/v5 v5.0.3/go.mod h1:h3YxU5CSr8rZP4Q3xZPVB3jJLhWPou63lHEdr9ytH4Y=
+github.com/vbauerster/mpb/v5 v5.0.4 h1:w7l/tJfHmtIOKZkU+bhbDZOUxj1kln9jy4DUOp3Tl14=
+github.com/vbauerster/mpb/v5 v5.0.4/go.mod h1:fvzasBUyuo35UyuA6sSOlVhpLoNQsp2nBdHw7OiSUU8=
github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0=
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
@@ -503,8 +514,9 @@ golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200311171314-f7b00557c8c4/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59 h1:3zb4D3T4G8jdExgVU/95+vQXfpEPiMdCaZgmGVxjNHM=
golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5 h1:Q7tZBpemrlsc2I7IyODzhtallWRSm4Q0d09pL6XbQtU=
+golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -533,8 +545,9 @@ golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e h1:3G+cUijn7XD+S4eJFddp53Pv7+slrESplyjG25HgL+k=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7 h1:AeiKBIuRw3UomYXSbLy0Mc2dDLfdtbT/IVn4keq83P0=
+golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a h1:tImsplftrFpALCYumobsd0K86vlAs/eXGFms2txfJfA=
golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -562,7 +575,9 @@ golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190921190940-14da1ac737cc/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -571,8 +586,10 @@ golang.org/x/sys v0.0.0-20191127021746-63cb32ae39b2/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775 h1:TC0v2RSO1u2kn1ZugjrFXkRZAEaqMN/RW+OTZkBzmLE=
golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200519105757-fe76b779f299 h1:DYfZAGf2WMFjMxbgTjaC+2HC7NkNAQs+6Q8b9WEB/F4=
+golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
@@ -605,18 +622,25 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7
google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb h1:i1Ppqkc3WQXikh8bXiwHqAN5Rv3/qDCcRk0/Otx73BY=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.24.0 h1:vb/1TCsVn3DcJlQ0Gs1yB1pKI6Do2/QNwxdKqmc/b0s=
+google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=
google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk=
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -630,7 +654,6 @@ gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKW
gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
-gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/square/go-jose.v2 v2.3.1 h1:SK5KegNXmKmqE342YYN2qPHEnUYeoMiXXl1poUlI+o4=
gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
@@ -640,20 +663,21 @@ gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bl
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
+gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
k8s.io/api v0.0.0-20190620084959-7cf5895f2711/go.mod h1:TBhBqb1AWbBQbW3XRusr7n7E4v2+5ZY8r8sAMnyFC5A=
k8s.io/api v0.17.0/go.mod h1:npsyOePkeP0CPwyGfXDHxvypiYMJxBWAMpQxCaJ4ZxI=
-k8s.io/api v0.18.2 h1:wG5g5ZmSVgm5B+eHMIbI9EGATS2L8Z72rda19RIEgY8=
-k8s.io/api v0.18.2/go.mod h1:SJCWI7OLzhZSvbY7U8zwNl9UA4o1fizoug34OV/2r78=
+k8s.io/api v0.18.3 h1:2AJaUQdgUZLoDZHrun21PW2Nx9+ll6cUzvn3IKhSIn0=
+k8s.io/api v0.18.3/go.mod h1:UOaMwERbqJMfeeeHc8XJKawj4P9TgDRnViIqqBeH2QA=
k8s.io/apimachinery v0.0.0-20190612205821-1799e75a0719/go.mod h1:I4A+glKBHiTgiEjQiCCQfCAIcIMFGt291SmsvcrFzJA=
k8s.io/apimachinery v0.17.0/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg=
-k8s.io/apimachinery v0.18.2 h1:44CmtbmkzVDAhCpRVSiP2R5PPrC2RtlIv/MoB8xpdRA=
-k8s.io/apimachinery v0.18.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA=
+k8s.io/apimachinery v0.18.3 h1:pOGcbVAhxADgUYnjS08EFXs9QMl8qaH5U4fr5LGUrSk=
+k8s.io/apimachinery v0.18.3/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko=
k8s.io/client-go v0.0.0-20190620085101-78d2af792bab h1:E8Fecph0qbNsAbijJJQryKu4Oi9QTp5cVpjTE+nqg6g=
k8s.io/client-go v0.0.0-20190620085101-78d2af792bab/go.mod h1:E95RaSlHr79aHaX0aGSwcPNfygDiPKOVXdmivCIZT0k=
k8s.io/code-generator v0.17.0/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s=
@@ -665,7 +689,7 @@ k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc=
k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E=
-k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
+k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
k8s.io/utils v0.0.0-20190221042446-c2654d5206da h1:ElyM7RPonbKnQqOcw7dG2IK5uvQQn3b/WPHqD5mBvP4=
k8s.io/utils v0.0.0-20190221042446-c2654d5206da/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0=
@@ -679,7 +703,6 @@ sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:w
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
-sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
diff --git a/hack/get_ci_vm.sh b/hack/get_ci_vm.sh
index 7e31c19c6..1d48f0996 100755
--- a/hack/get_ci_vm.sh
+++ b/hack/get_ci_vm.sh
@@ -67,13 +67,6 @@ delvm() {
cleanup
}
-image_hints() {
- _BIS=$(egrep -m 1 '_BUILT_IMAGE_SUFFIX:[[:space:]+"[[:print:]]+"' "$LIBPODROOT/.cirrus.yml" | cut -d: -f 2 | tr -d '"[:blank:]')
- egrep '[[:space:]]+[[:alnum:]].+_CACHE_IMAGE_NAME:[[:space:]+"[[:print:]]+"' \
- "$LIBPODROOT/.cirrus.yml" | cut -d: -f 2 | tr -d '"[:blank:]' | \
- sed -r -e "s/\\\$[{]_BUILT_IMAGE_SUFFIX[}]/$_BIS/" | sort -u
-}
-
show_usage() {
echo -e "\n${RED}ERROR: $1${NOR}"
echo -e "${YEL}Usage: $(basename $0) [-m <SPECIALMODE>] [-u <ROOTLESS_USER> ] <image_name>${NOR}"
@@ -90,17 +83,34 @@ show_usage() {
}
get_env_vars() {
- python -c '
-import yaml
+ # Deal with both YAML and embedded shell-like substitutions in values
+ # if substitution fails, fall back to printing naked env. var as-is.
+ python3 -c '
+import yaml,re
env=yaml.load(open(".cirrus.yml"), Loader=yaml.SafeLoader)["env"]
-keys=[k for k in env if "ENCRYPTED" not in str(env[k])]
+dollar_env_var=re.compile(r"\$(\w+)")
+dollarcurly_env_var=re.compile(r"\$\{(\w+)\}")
+class ReIterKey(dict):
+ def __missing__(self, key):
+ # Cirrus-CI provides some runtime-only env. vars. Avoid
+ # breaking this hack-script if/when any are present in YAML
+ return "${0}".format(key)
+rep=r"{\1}" # Convert env vars markup to -> str.format_map(re_iter_key) markup
+out=ReIterKey()
for k,v in env.items():
v=str(v)
- if "ENCRYPTED" not in v and "ADD_SECOND_PARTITION" not in v:
- print("{0}=\"{1}\"".format(k, v)),
+ if "ENCRYPTED" not in v:
+ out[k]=dollar_env_var.sub(rep, dollarcurly_env_var.sub(rep, v))
+for k,v in out.items():
+ print("{0}=\"{1}\"".format(k, v.format_map(out)))
'
}
+image_hints() {
+ get_env_vars | fgrep '_CACHE_IMAGE_NAME' | awk -F "=" '{print $2}'
+}
+
+
parse_args(){
echo -e "$USAGE_WARNING"
diff --git a/hack/podman-registry b/hack/podman-registry
new file mode 100755
index 000000000..e7708ce6a
--- /dev/null
+++ b/hack/podman-registry
@@ -0,0 +1,231 @@
+#! /bin/bash
+#
+# podman-registry - start/stop/monitor a local instance of registry:2
+#
+ME=$(basename $0)
+
+###############################################################################
+# BEGIN defaults
+
+PODMAN_REGISTRY_IMAGE=docker.io/library/registry:2
+
+PODMAN_REGISTRY_USER=
+PODMAN_REGISTRY_PASS=
+PODMAN_REGISTRY_PORT=
+
+# Podman binary to run
+PODMAN=${PODMAN:-$(type -p podman)}
+
+# END defaults
+###############################################################################
+# BEGIN help messages
+
+missing=" argument is missing; see $ME --help for details"
+usage="Usage: $ME [options] [start|stop|ps|logs]
+
+$ME manages a local instance of a container registry.
+
+When called to start a registry, $ME will pull an image
+into a local temporary directory, create an htpasswd, start the
+registry, and dump a series of environment variables to stdout:
+
+ \$ $ME start
+ PODMAN_REGISTRY_IMAGE=\"docker.io/library/registry:2\"
+ PODMAN_REGISTRY_PORT=\"5050\"
+ PODMAN_REGISTRY_USER=\"userZ3RZ\"
+ PODMAN_REGISTRY_PASS=\"T8JVJzKrcl4p6uT\"
+
+Expected usage, therefore, is something like this in a script
+
+ eval \$($ME start)
+
+To stop the registry, you will need to know the port number:
+
+ $ME -P \$PODMAN_REGISTRY_PORT stop
+
+Override the default image, port, user, password with:
+
+ -i IMAGE registry image to pull (default: $PODMAN_REGISTRY_IMAGE)
+ -u USER registry user (default: random)
+ -p PASS password for registry user (default: random)
+ -P PORT port to bind to (on 127.0.0.1) (default: random, 5000-5999)
+
+Other options:
+
+ -h display usage message
+"
+
+die () {
+ echo "$ME: $*" >&2
+ exit 1
+}
+
+# END help messages
+###############################################################################
+# BEGIN option processing
+
+while getopts "i:u:p:P:hv" opt; do
+ case "$opt" in
+ i) PODMAN_REGISTRY_IMAGE=$OPTARG ;;
+ u) PODMAN_REGISTRY_USER=$OPTARG ;;
+ p) PODMAN_REGISTRY_PASS=$OPTARG ;;
+ P) PODMAN_REGISTRY_PORT=$OPTARG ;;
+ h) echo "$usage"; exit 0;;
+ v) verbose=1 ;;
+ \?) echo "Run '$ME -h' for help" >&2; exit 1;;
+ esac
+done
+shift $((OPTIND-1))
+
+# END option processing
+###############################################################################
+# BEGIN helper functions
+
+function random_string() {
+ local length=${1:-10}
+
+ head /dev/urandom | tr -dc a-zA-Z0-9 | head -c$length
+}
+
+function podman() {
+ if [ -z "${PODMAN_REGISTRY_PORT}" ]; then
+ die "podman port undefined; please invoke me with -P PORT"
+ fi
+
+ if [ -z "${PODMAN_REGISTRY_WORKDIR}" ]; then
+ PODMAN_REGISTRY_WORKDIR=${TMPDIR:-/tmp}/podman-registry-${PODMAN_REGISTRY_PORT}
+ if [ ! -d ${PODMAN_REGISTRY_WORKDIR} ]; then
+ die "$ME: directory does not exist: ${PODMAN_REGISTRY_WORKDIR}"
+ fi
+ fi
+
+ ${PODMAN} --root ${PODMAN_REGISTRY_WORKDIR}/root \
+ --runroot ${PODMAN_REGISTRY_WORKDIR}/runroot \
+ "$@"
+}
+
+# END helper functions
+###############################################################################
+# BEGIN action processing
+
+function do_start() {
+ # If called without a port, assign a random one in the 5xxx range
+ if [ -z "${PODMAN_REGISTRY_PORT}" ]; then
+ for port in $(shuf -i 5000-5999);do
+ if ! { exec 3<> /dev/tcp/127.0.0.1/$port; } &>/dev/null; then
+ PODMAN_REGISTRY_PORT=$port
+ break
+ fi
+ done
+ fi
+
+ PODMAN_REGISTRY_WORKDIR=${TMPDIR:-/tmp}/podman-registry-${PODMAN_REGISTRY_PORT}
+ if [ -d ${PODMAN_REGISTRY_WORKDIR} ]; then
+ die "$ME: directory exists: ${PODMAN_REGISTRY_WORKDIR} (another registry might already be running on this port)"
+ fi
+
+ # Randomly-generated username and password, if none given on command line
+ if [ -z "${PODMAN_REGISTRY_USER}" ]; then
+ PODMAN_REGISTRY_USER="user$(random_string 4)"
+ fi
+ if [ -z "${PODMAN_REGISTRY_PASS}" ]; then
+ PODMAN_REGISTRY_PASS=$(random_string 15)
+ fi
+
+ # Die on any error
+ set -e
+
+ mkdir -p ${PODMAN_REGISTRY_WORKDIR}
+
+ local AUTHDIR=${PODMAN_REGISTRY_WORKDIR}/auth
+ mkdir -p $AUTHDIR
+
+ # We have to be silent; our only output must be env. vars. Log output here.
+ local log=${PODMAN_REGISTRY_WORKDIR}/log
+ touch $log
+
+ # Pull registry image, but into a separate container storage
+ mkdir -p ${PODMAN_REGISTRY_WORKDIR}/root
+ mkdir -p ${PODMAN_REGISTRY_WORKDIR}/runroot
+
+ # Give it three tries, to compensate for flakes
+ podman pull ${PODMAN_REGISTRY_IMAGE} &>> $log ||
+ podman pull ${PODMAN_REGISTRY_IMAGE} &>> $log ||
+ podman pull ${PODMAN_REGISTRY_IMAGE} &>> $log
+
+ # Registry image needs a cert. Self-signed is good enough.
+ local CERT=$AUTHDIR/domain.crt
+ # FIXME: if this fails, we fail silently! It'd be more helpful
+ # to say 'openssl failed' and cat the logfile
+ openssl req -newkey rsa:4096 -nodes -sha256 \
+ -keyout ${AUTHDIR}/domain.key -x509 -days 2 \
+ -out ${AUTHDIR}/domain.crt \
+ -subj "/C=US/ST=Foo/L=Bar/O=Red Hat, Inc./CN=localhost" \
+ &>> $log
+
+ # Store credentials where container will see them
+ podman run --rm \
+ --entrypoint htpasswd ${PODMAN_REGISTRY_IMAGE} \
+ -Bbn ${PODMAN_REGISTRY_USER} ${PODMAN_REGISTRY_PASS} \
+ > $AUTHDIR/htpasswd
+
+ # In case someone needs to debug
+ echo "${PODMAN_REGISTRY_USER}:${PODMAN_REGISTRY_PASS}" \
+ > $AUTHDIR/htpasswd-plaintext
+
+ # Run the registry container.
+ podman run --quiet -d \
+ -p ${PODMAN_REGISTRY_PORT}:5000 \
+ --name registry \
+ -v $AUTHDIR:/auth:Z \
+ -e "REGISTRY_AUTH=htpasswd" \
+ -e "REGISTRY_AUTH_HTPASSWD_REALM=Registry Realm" \
+ -e "REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpasswd" \
+ -e "REGISTRY_HTTP_TLS_CERTIFICATE=/auth/domain.crt" \
+ -e "REGISTRY_HTTP_TLS_KEY=/auth/domain.key" \
+ registry:2 &>> $log
+
+ # Dump settings. Our caller will use these to access the registry.
+ for v in IMAGE PORT USER PASS; do
+ echo "PODMAN_REGISTRY_${v}=\"$(eval echo \$PODMAN_REGISTRY_${v})\""
+ done
+}
+
+
+function do_stop() {
+ podman stop registry
+ podman rm -f registry
+
+ rm -rf ${PODMAN_REGISTRY_WORKDIR}
+}
+
+
+function do_ps() {
+ podman ps -a
+}
+
+
+function do_logs() {
+ podman logs registry
+}
+
+# END action processing
+###############################################################################
+# BEGIN command-line processing
+
+# First command-line arg must be an action
+action=${1?ACTION$missing}
+shift
+
+case "$action" in
+ start) do_start ;;
+ stop) do_stop ;;
+ ps) do_ps ;;
+ logs) do_logs ;;
+ *) die "Unknown action '$action'; must be start / stop / ps / logs" ;;
+esac
+
+# END command-line processing
+###############################################################################
+
+exit 0
diff --git a/hack/podman-registry-go/registry.go b/hack/podman-registry-go/registry.go
new file mode 100644
index 000000000..a83304914
--- /dev/null
+++ b/hack/podman-registry-go/registry.go
@@ -0,0 +1,98 @@
+package registry
+
+import (
+ "strings"
+
+ "github.com/containers/libpod/utils"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+const (
+ imageKey = "PODMAN_REGISTRY_IMAGE"
+ userKey = "PODMAN_REGISTRY_USER"
+ passKey = "PODMAN_REGISTRY_PASS"
+ portKey = "PODMAN_REGISTRY_PORT"
+)
+
+var binary = "podman-registry"
+
+// Registry is locally running registry.
+type Registry struct {
+ // Image - container image of the registry.
+ Image string
+ // User - the user to authenticate against the registry.
+ User string
+ // Password - the accompanying password for the user.
+ Password string
+ // Port - the port the registry is listening to on the host.
+ Port string
+ // running indicates if the registry is running.
+ running bool
+}
+
+// Start a new registry and return it along with it's image, user, password, and port.
+func Start() (*Registry, error) {
+ // Start a registry.
+ out, err := utils.ExecCmd(binary, "start")
+ if err != nil {
+ return nil, errors.Wrapf(err, "error running %q: %s", binary, out)
+ }
+
+ // Parse the output.
+ registry := Registry{}
+ for _, s := range strings.Split(out, "\n") {
+ if s == "" {
+ continue
+ }
+ spl := strings.Split(s, "=")
+ if len(spl) != 2 {
+ return nil, errors.Errorf("unexpected output format %q: want 'PODMAN_...=...'", s)
+ }
+ key := spl[0]
+ val := strings.TrimSuffix(strings.TrimPrefix(spl[1], "\""), "\"")
+ switch key {
+ case imageKey:
+ registry.Image = val
+ case userKey:
+ registry.User = val
+ case passKey:
+ registry.Password = val
+ case portKey:
+ registry.Port = val
+ default:
+ logrus.Errorf("unexpected podman-registry output: %q", s)
+ }
+ }
+
+ // Extra sanity check.
+ if registry.Image == "" {
+ return nil, errors.Errorf("unexpected output %q: %q missing", out, imageKey)
+ }
+ if registry.User == "" {
+ return nil, errors.Errorf("unexpected output %q: %q missing", out, userKey)
+ }
+ if registry.Password == "" {
+ return nil, errors.Errorf("unexpected output %q: %q missing", out, passKey)
+ }
+ if registry.Port == "" {
+ return nil, errors.Errorf("unexpected output %q: %q missing", out, portKey)
+ }
+
+ registry.running = true
+
+ return &registry, nil
+}
+
+// Stop the registry.
+func (r *Registry) Stop() error {
+ // Stop a registry.
+ if !r.running {
+ return nil
+ }
+ if _, err := utils.ExecCmd(binary, "-P", r.Port, "stop"); err != nil {
+ return errors.Wrapf(err, "error stopping registry (%v) with %q", *r, binary)
+ }
+ r.running = false
+ return nil
+}
diff --git a/hack/podman-registry-go/registry_test.go b/hack/podman-registry-go/registry_test.go
new file mode 100644
index 000000000..4e4bf5fe2
--- /dev/null
+++ b/hack/podman-registry-go/registry_test.go
@@ -0,0 +1,40 @@
+package registry
+
+import (
+ "testing"
+
+ "github.com/hashicorp/go-multierror"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestStartAndStopMultipleRegistries(t *testing.T) {
+ binary = "../podman-registry"
+
+ registries := []*Registry{}
+
+ // Start registries.
+ var errors *multierror.Error
+ for i := 0; i < 3; i++ {
+ reg, err := Start()
+ if err != nil {
+ errors = multierror.Append(errors, err)
+ continue
+ }
+ assert.True(t, len(reg.Image) > 0)
+ assert.True(t, len(reg.User) > 0)
+ assert.True(t, len(reg.Password) > 0)
+ assert.True(t, len(reg.Port) > 0)
+ registries = append(registries, reg)
+ }
+
+ // Stop registries.
+ for _, reg := range registries {
+ // Make sure we can stop it properly.
+ errors = multierror.Append(errors, reg.Stop())
+ // Stopping an already stopped registry is fine as well.
+ errors = multierror.Append(errors, reg.Stop())
+ }
+
+ require.NoError(t, errors.ErrorOrNil())
+}
diff --git a/libpod.conf b/libpod.conf
deleted file mode 100644
index 1bc31eb4c..000000000
--- a/libpod.conf
+++ /dev/null
@@ -1,181 +0,0 @@
-# libpod.conf is the default configuration file for all tools using libpod to
-# manage containers
-
-# Default transport method for pulling and pushing for images
-image_default_transport = "docker://"
-
-# Paths to look for the conmon container manager binary.
-# If the paths are empty or no valid path was found, then the `$PATH`
-# environment variable will be used as the fallback.
-conmon_path = [
- "/usr/libexec/podman/conmon",
- "/usr/local/libexec/podman/conmon",
- "/usr/local/lib/podman/conmon",
- "/usr/bin/conmon",
- "/usr/sbin/conmon",
- "/usr/local/bin/conmon",
- "/usr/local/sbin/conmon",
- "/run/current-system/sw/bin/conmon",
-]
-
-# Environment variables to pass into conmon
-conmon_env_vars = [
- "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
-]
-
-# CGroup Manager - valid values are "systemd" and "cgroupfs"
-cgroup_manager = "systemd"
-
-# Container init binary
-#init_path = "/usr/libexec/podman/catatonit"
-
-# Directory for persistent libpod files (database, etc)
-# By default, this will be configured relative to where containers/storage
-# stores containers
-# Uncomment to change location from this default
-#static_dir = "/var/lib/containers/storage/libpod"
-
-# Directory for temporary files. Must be tmpfs (wiped after reboot)
-tmp_dir = "/var/run/libpod"
-
-# Maximum size of log files (in bytes)
-# -1 is unlimited
-max_log_size = -1
-
-# Whether to use chroot instead of pivot_root in the runtime
-no_pivot_root = false
-
-# Directory containing CNI plugin configuration files
-cni_config_dir = "/etc/cni/net.d/"
-
-# Directories where the CNI plugin binaries may be located
-cni_plugin_dir = [
- "/usr/libexec/cni",
- "/usr/lib/cni",
- "/usr/local/lib/cni",
- "/opt/cni/bin"
-]
-
-# Default CNI network for libpod.
-# If multiple CNI network configs are present, libpod will use the network with
-# the name given here for containers unless explicitly overridden.
-# The default here is set to the name we set in the
-# 87-podman-bridge.conflist included in the repository.
-# Not setting this, or setting it to the empty string, will use normal CNI
-# precedence rules for selecting between multiple networks.
-cni_default_network = "podman"
-
-# Default libpod namespace
-# If libpod is joined to a namespace, it will see only containers and pods
-# that were created in the same namespace, and will create new containers and
-# pods in that namespace.
-# The default namespace is "", which corresponds to no namespace. When no
-# namespace is set, all containers and pods are visible.
-#namespace = ""
-
-# Default infra (pause) image name for pod infra containers
-infra_image = "k8s.gcr.io/pause:3.2"
-
-# Default command to run the infra container
-infra_command = "/pause"
-
-# Determines whether libpod will reserve ports on the host when they are
-# forwarded to containers. When enabled, when ports are forwarded to containers,
-# they are held open by conmon as long as the container is running, ensuring that
-# they cannot be reused by other programs on the host. However, this can cause
-# significant memory usage if a container has many ports forwarded to it.
-# Disabling this can save memory.
-#enable_port_reservation = true
-
-# Default libpod support for container labeling
-# label=true
-
-# The locking mechanism to use
-lock_type = "shm"
-
-# Number of locks available for containers and pods.
-# If this is changed, a lock renumber must be performed (e.g. with the
-# 'podman system renumber' command).
-num_locks = 2048
-
-# Directory for libpod named volumes.
-# By default, this will be configured relative to where containers/storage
-# stores containers.
-# Uncomment to change location from this default.
-#volume_path = "/var/lib/containers/storage/volumes"
-
-# Selects which logging mechanism to use for Podman events. Valid values
-# are `journald` or `file`.
-# events_logger = "journald"
-
-# Specify the keys sequence used to detach a container.
-# Format is a single character [a-Z] or a comma separated sequence of
-# `ctrl-<value>`, where `<value>` is one of:
-# `a-z`, `@`, `^`, `[`, `\`, `]`, `^` or `_`
-#
-# detach_keys = "ctrl-p,ctrl-q"
-
-# Default OCI runtime
-runtime = "runc"
-
-# List of the OCI runtimes that support --format=json. When json is supported
-# libpod will use it for reporting nicer errors.
-runtime_supports_json = ["crun", "runc"]
-
-# List of all the OCI runtimes that support --cgroup-manager=disable to disable
-# creation of CGroups for containers.
-runtime_supports_nocgroups = ["crun"]
-
-# Paths to look for a valid OCI runtime (runc, runv, etc)
-# If the paths are empty or no valid path was found, then the `$PATH`
-# environment variable will be used as the fallback.
-[runtimes]
-runc = [
- "/usr/bin/runc",
- "/usr/sbin/runc",
- "/usr/local/bin/runc",
- "/usr/local/sbin/runc",
- "/sbin/runc",
- "/bin/runc",
- "/usr/lib/cri-o-runc/sbin/runc",
- "/run/current-system/sw/bin/runc",
-]
-
-crun = [
- "/usr/bin/crun",
- "/usr/sbin/crun",
- "/usr/local/bin/crun",
- "/usr/local/sbin/crun",
- "/sbin/crun",
- "/bin/crun",
- "/run/current-system/sw/bin/crun",
-]
-
-# Kata Containers is an OCI runtime, where containers are run inside lightweight
-# Virtual Machines (VMs). Kata provides additional isolation towards the host,
-# minimizing the host attack surface and mitigating the consequences of
-# containers breakout.
-# Please notes that Kata does not support rootless podman yet, but we can leave
-# the paths below blank to let them be discovered by the $PATH environment
-# variable.
-
-# Kata Containers with the default configured VMM
-kata-runtime = [
- "/usr/bin/kata-runtime",
-]
-
-# Kata Containers with the QEMU VMM
-kata-qemu = [
- "/usr/bin/kata-qemu",
-]
-
-# Kata Containers with the Firecracker VMM
-kata-fc = [
- "/usr/bin/kata-fc",
-]
-
-# The [runtimes] table MUST be the last thing in this file.
-# (Unless another table is added)
-# TOML does not provide a way to end a table other than a further table being
-# defined, so every key hereafter will be part of [runtimes] and not the main
-# config.
diff --git a/libpod/boltdb_state_internal.go b/libpod/boltdb_state_internal.go
index 33ff0720f..21d55bf77 100644
--- a/libpod/boltdb_state_internal.go
+++ b/libpod/boltdb_state_internal.go
@@ -695,7 +695,10 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error {
return errors.Wrapf(define.ErrNoSuchVolume, "no volume with name %s found in database when adding container %s", vol.Name, ctr.ID())
}
- ctrDepsBkt := volDB.Bucket(volDependenciesBkt)
+ ctrDepsBkt, err := volDB.CreateBucketIfNotExists(volDependenciesBkt)
+ if err != nil {
+ return errors.Wrapf(err, "error creating volume %s dependencies bucket to add container %s", vol.Name, ctr.ID())
+ }
if depExists := ctrDepsBkt.Get(ctrID); depExists == nil {
if err := ctrDepsBkt.Put(ctrID, ctrID); err != nil {
return errors.Wrapf(err, "error adding container %s to volume %s dependencies", ctr.ID(), vol.Name)
@@ -890,6 +893,9 @@ func (s *BoltState) removeContainer(ctr *Container, pod *Pod, tx *bolt.Tx) error
}
ctrDepsBkt := volDB.Bucket(volDependenciesBkt)
+ if ctrDepsBkt == nil {
+ return errors.Wrapf(define.ErrInternal, "volume %s is missing container dependencies bucket, cannot remove container %s from dependencies", vol.Name, ctr.ID())
+ }
if depExists := ctrDepsBkt.Get(ctrID); depExists == nil {
if err := ctrDepsBkt.Delete(ctrID); err != nil {
return errors.Wrapf(err, "error deleting container %s dependency on volume %s", ctr.ID(), vol.Name)
diff --git a/libpod/container_api.go b/libpod/container_api.go
index b31079b26..d366ffb84 100644
--- a/libpod/container_api.go
+++ b/libpod/container_api.go
@@ -285,6 +285,7 @@ func (c *Container) HTTPAttach(httpCon net.Conn, httpBuf *bufio.ReadWriter, stre
logrus.Infof("Performing HTTP Hijack attach to container %s", c.ID())
+ logSize := 0
if streamLogs {
// Get all logs for the container
logChan := make(chan *logs.LogLine)
@@ -302,7 +303,7 @@ func (c *Container) HTTPAttach(httpCon net.Conn, httpBuf *bufio.ReadWriter, stre
device := logLine.Device
var header []byte
headerLen := uint32(len(logLine.Msg))
-
+ logSize += len(logLine.Msg)
switch strings.ToLower(device) {
case "stdin":
header = makeHTTPAttachHeader(0, headerLen)
@@ -341,7 +342,7 @@ func (c *Container) HTTPAttach(httpCon net.Conn, httpBuf *bufio.ReadWriter, stre
if err := c.ReadLog(logOpts, logChan); err != nil {
return err
}
- logrus.Debugf("Done reading logs for container %s", c.ID())
+ logrus.Debugf("Done reading logs for container %s, %d bytes", c.ID(), logSize)
if err := <-errChan; err != nil {
return err
}
diff --git a/libpod/container_exec.go b/libpod/container_exec.go
index c1ce8b724..f2943b73c 100644
--- a/libpod/container_exec.go
+++ b/libpod/container_exec.go
@@ -1,7 +1,9 @@
package libpod
import (
+ "bufio"
"io/ioutil"
+ "net"
"os"
"path/filepath"
"strconv"
@@ -60,6 +62,13 @@ type ExecConfig struct {
// given is the number that will be passed into the exec session,
// starting at 3.
PreserveFDs uint `json:"preserveFds,omitempty"`
+ // ExitCommand is the exec session's exit command.
+ // This command will be executed when the exec session exits.
+ // If unset, no command will be executed.
+ // Two arguments will be appended to the exit command by Libpod:
+ // The ID of the exec session, and the ID of the container the exec
+ // session is a part of (in that order).
+ ExitCommand []string `json:"exitCommand,omitempty"`
}
// ExecSession contains information on a single exec session attached to a given
@@ -102,7 +111,7 @@ func (e *ExecSession) Inspect() (*define.InspectExecSession, error) {
}
output := new(define.InspectExecSession)
- output.CanRemove = e.State != define.ExecStateRunning
+ output.CanRemove = e.State == define.ExecStateStopped
output.ContainerID = e.ContainerId
if e.Config.DetachKeys != nil {
output.DetachKeys = *e.Config.DetachKeys
@@ -156,9 +165,6 @@ func (c *Container) ExecCreate(config *ExecConfig) (string, error) {
if len(config.Command) == 0 {
return "", errors.Wrapf(define.ErrInvalidArg, "must provide a non-empty command to start an exec session")
}
- if config.Terminal && (config.AttachStdin || config.AttachStdout || config.AttachStderr) {
- return "", errors.Wrapf(define.ErrInvalidArg, "cannot specify streams to attach to when exec session has a pseudoterminal")
- }
// Verify that we are in a good state to continue
if !c.ensureState(define.ContainerStateRunning) {
@@ -192,6 +198,10 @@ func (c *Container) ExecCreate(config *ExecConfig) (string, error) {
return "", errors.Wrapf(err, "error copying exec configuration into exec session")
}
+ if len(session.Config.ExitCommand) > 0 {
+ session.Config.ExitCommand = append(session.Config.ExitCommand, []string{session.ID(), c.ID()}...)
+ }
+
if c.state.ExecSessions == nil {
c.state.ExecSessions = make(map[string]*ExecSession)
}
@@ -211,11 +221,52 @@ func (c *Container) ExecCreate(config *ExecConfig) (string, error) {
}
// ExecStart starts an exec session in the container, but does not attach to it.
-// Returns immediately upon starting the exec session.
+// Returns immediately upon starting the exec session, unlike other ExecStart
+// functions, which will only return when the exec session exits.
func (c *Container) ExecStart(sessionID string) error {
- // Will be implemented in part 2, migrating Start and implementing
- // detached Start.
- return define.ErrNotImplemented
+ if !c.batched {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ if err := c.syncContainer(); err != nil {
+ return err
+ }
+ }
+
+ // Verify that we are in a good state to continue
+ if !c.ensureState(define.ContainerStateRunning) {
+ return errors.Wrapf(define.ErrCtrStateInvalid, "can only start exec sessions when their container is running")
+ }
+
+ session, ok := c.state.ExecSessions[sessionID]
+ if !ok {
+ return errors.Wrapf(define.ErrNoSuchExecSession, "container %s has no exec session with ID %s", c.ID(), sessionID)
+ }
+
+ if session.State != define.ExecStateCreated {
+ return errors.Wrapf(define.ErrExecSessionStateInvalid, "can only start created exec sessions, while container %s session %s state is %q", c.ID(), session.ID(), session.State.String())
+ }
+
+ logrus.Infof("Going to start container %s exec session %s and attach to it", c.ID(), session.ID())
+
+ opts, err := prepareForExec(c, session)
+ if err != nil {
+ return err
+ }
+
+ pid, err := c.ociRuntime.ExecContainerDetached(c, session.ID(), opts, session.Config.AttachStdin)
+ if err != nil {
+ return err
+ }
+
+ c.newContainerEvent(events.Exec)
+ logrus.Debugf("Successfully started exec session %s in container %s", session.ID(), c.ID())
+
+ // Update and save session to reflect PID/running
+ session.PID = pid
+ session.State = define.ExecStateRunning
+
+ return c.save()
}
// ExecStartAndAttach starts and attaches to an exec session in a container.
@@ -247,34 +298,12 @@ func (c *Container) ExecStartAndAttach(sessionID string, streams *define.AttachS
logrus.Infof("Going to start container %s exec session %s and attach to it", c.ID(), session.ID())
- // TODO: check logic here - should we set Privileged if the container is
- // privileged?
- var capList []string
- if session.Config.Privileged || c.config.Privileged {
- capList = capabilities.AllCapabilities()
- }
-
- user := c.config.User
- if session.Config.User != "" {
- user = session.Config.User
- }
-
- if err := c.createExecBundle(session.ID()); err != nil {
+ opts, err := prepareForExec(c, session)
+ if err != nil {
return err
}
- opts := new(ExecOptions)
- opts.Cmd = session.Config.Command
- opts.CapAdd = capList
- opts.Env = session.Config.Environment
- opts.Terminal = session.Config.Terminal
- opts.Cwd = session.Config.WorkDir
- opts.User = user
- opts.Streams = streams
- opts.PreserveFDs = session.Config.PreserveFDs
- opts.DetachKeys = session.Config.DetachKeys
-
- pid, attachChan, err := c.ociRuntime.ExecContainer(c, session.ID(), opts)
+ pid, attachChan, err := c.ociRuntime.ExecContainer(c, session.ID(), opts, streams)
if err != nil {
return err
}
@@ -318,28 +347,124 @@ func (c *Container) ExecStartAndAttach(sessionID string, streams *define.AttachS
c.lock.Lock()
}
- // Sync the container to pick up state changes
- if err := c.syncContainer(); err != nil {
+ if err := writeExecExitCode(c, session.ID(), exitCode); err != nil {
if lastErr != nil {
logrus.Errorf("Container %s exec session %s error: %v", c.ID(), session.ID(), lastErr)
}
- return errors.Wrapf(err, "error syncing container %s state to remove exec session %s", c.ID(), session.ID())
+ lastErr = err
+ }
+
+ // Clean up after ourselves
+ if err := c.cleanupExecBundle(session.ID()); err != nil {
+ if lastErr != nil {
+ logrus.Errorf("Container %s exec session %s error: %v", c.ID(), session.ID(), lastErr)
+ }
+ lastErr = err
+ }
+
+ return lastErr
+}
+
+// ExecHTTPStartAndAttach starts and performs an HTTP attach to an exec session.
+func (c *Container) ExecHTTPStartAndAttach(sessionID string, httpCon net.Conn, httpBuf *bufio.ReadWriter, streams *HTTPAttachStreams, detachKeys *string, cancel <-chan bool) (deferredErr error) {
+ // TODO: How do we combine streams with the default streams set in the exec session?
+
+ // The flow here is somewhat strange, because we need to determine if
+ // there's a terminal ASAP (for error handling).
+ // Until we know, assume it's true (don't add standard stream headers).
+ // Add a defer to ensure our invariant (HTTP session is closed) is
+ // maintained.
+ isTerminal := true
+ defer func() {
+ hijackWriteErrorAndClose(deferredErr, c.ID(), isTerminal, httpCon, httpBuf)
+ }()
+
+ if !c.batched {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ if err := c.syncContainer(); err != nil {
+ return err
+ }
}
- // Update status
- // Since we did a syncContainer, the old session has been overwritten.
- // Grab a fresh one from the database.
- session, ok = c.state.ExecSessions[sessionID]
+ session, ok := c.state.ExecSessions[sessionID]
if !ok {
- // Exec session already removed.
- logrus.Infof("Container %s exec session %s already removed from database", c.ID(), sessionID)
- return nil
+ return errors.Wrapf(define.ErrNoSuchExecSession, "container %s has no exec session with ID %s", c.ID(), sessionID)
}
- session.State = define.ExecStateStopped
- session.ExitCode = exitCode
- session.PID = 0
+ // We can now finally get the real value of isTerminal.
+ isTerminal = session.Config.Terminal
+
+ // Verify that we are in a good state to continue
+ if !c.ensureState(define.ContainerStateRunning) {
+ return errors.Wrapf(define.ErrCtrStateInvalid, "can only start exec sessions when their container is running")
+ }
+
+ if session.State != define.ExecStateCreated {
+ return errors.Wrapf(define.ErrExecSessionStateInvalid, "can only start created exec sessions, while container %s session %s state is %q", c.ID(), session.ID(), session.State.String())
+ }
+
+ logrus.Infof("Going to start container %s exec session %s and attach to it", c.ID(), session.ID())
+
+ execOpts, err := prepareForExec(c, session)
+ if err != nil {
+ return err
+ }
+
+ if streams == nil {
+ streams = new(HTTPAttachStreams)
+ streams.Stdin = session.Config.AttachStdin
+ streams.Stdout = session.Config.AttachStdout
+ streams.Stderr = session.Config.AttachStderr
+ }
+
+ pid, attachChan, err := c.ociRuntime.ExecContainerHTTP(c, session.ID(), execOpts, httpCon, httpBuf, streams, cancel)
+ if err != nil {
+ return err
+ }
+
+ // TODO: Investigate whether more of this can be made common with
+ // ExecStartAndAttach
+
+ c.newContainerEvent(events.Exec)
+ logrus.Debugf("Successfully started exec session %s in container %s", session.ID(), c.ID())
+
+ var lastErr error
+
+ session.PID = pid
+ session.State = define.ExecStateRunning
if err := c.save(); err != nil {
+ lastErr = err
+ }
+
+ // Unlock so other processes can use the container
+ if !c.batched {
+ c.lock.Unlock()
+ }
+
+ tmpErr := <-attachChan
+ if lastErr != nil {
+ logrus.Errorf("Container %s exec session %s error: %v", c.ID(), session.ID(), lastErr)
+ }
+ lastErr = tmpErr
+
+ exitCode, err := c.readExecExitCode(session.ID())
+ if err != nil {
+ if lastErr != nil {
+ logrus.Errorf("Container %s exec session %s error: %v", c.ID(), session.ID(), lastErr)
+ }
+ lastErr = err
+ }
+
+ logrus.Debugf("Container %s exec session %s completed with exit code %d", c.ID(), session.ID(), exitCode)
+
+ // Lock again
+ if !c.batched {
+ c.lock.Lock()
+ }
+
+ if err := writeExecExitCode(c, session.ID(), exitCode); err != nil {
if lastErr != nil {
logrus.Errorf("Container %s exec session %s error: %v", c.ID(), session.ID(), lastErr)
}
@@ -357,12 +482,6 @@ func (c *Container) ExecStartAndAttach(sessionID string, streams *define.AttachS
return lastErr
}
-// ExecHTTPStartAndAttach starts and performs an HTTP attach to an exec session.
-func (c *Container) ExecHTTPStartAndAttach(sessionID string) error {
- // Will be implemented in part 2, migrating Start.
- return define.ErrNotImplemented
-}
-
// ExecStop stops an exec session in the container.
// If a timeout is provided, it will be used; otherwise, the timeout will
// default to the stop timeout of the container.
@@ -444,7 +563,27 @@ func (c *Container) ExecCleanup(sessionID string) error {
}
if session.State == define.ExecStateRunning {
- return errors.Wrapf(define.ErrExecSessionStateInvalid, "cannot clean up container %s exec session %s as it is running", c.ID(), session.ID())
+ // Check if the exec session is still running.
+ alive, err := c.ociRuntime.ExecUpdateStatus(c, session.ID())
+ if err != nil {
+ return err
+ }
+
+ if alive {
+ return errors.Wrapf(define.ErrExecSessionStateInvalid, "cannot clean up container %s exec session %s as it is running", c.ID(), session.ID())
+ }
+
+ exitCode, err := c.readExecExitCode(session.ID())
+ if err != nil {
+ return err
+ }
+ session.ExitCode = exitCode
+ session.PID = 0
+ session.State = define.ExecStateStopped
+
+ if err := c.save(); err != nil {
+ return err
+ }
}
logrus.Infof("Cleaning up container %s exec session %s", c.ID(), session.ID())
@@ -474,11 +613,11 @@ func (c *Container) ExecRemove(sessionID string, force bool) error {
// Update status of exec session if running, so we cna check if it
// stopped in the meantime.
if session.State == define.ExecStateRunning {
- stopped, err := c.ociRuntime.ExecUpdateStatus(c, session.ID())
+ running, err := c.ociRuntime.ExecUpdateStatus(c, session.ID())
if err != nil {
return err
}
- if stopped {
+ if !running {
session.State = define.ExecStateStopped
// TODO: should we retrieve exit code here?
// TODO: Might be worth saving state here.
@@ -733,13 +872,6 @@ func (c *Container) getActiveExecSessions() ([]string, error) {
continue
}
if !alive {
- if err := c.cleanupExecBundle(id); err != nil {
- if lastErr != nil {
- logrus.Errorf("Error checking container %s exec sessions: %v", c.ID(), lastErr)
- }
- lastErr = err
- }
-
_, isLegacy := c.state.LegacyExecSessions[id]
if isLegacy {
delete(c.state.LegacyExecSessions, id)
@@ -759,6 +891,12 @@ func (c *Container) getActiveExecSessions() ([]string, error) {
needSave = true
}
+ if err := c.cleanupExecBundle(id); err != nil {
+ if lastErr != nil {
+ logrus.Errorf("Error checking container %s exec sessions: %v", c.ID(), lastErr)
+ }
+ lastErr = err
+ }
} else {
activeSessions = append(activeSessions, id)
}
@@ -779,6 +917,8 @@ func (c *Container) getActiveExecSessions() ([]string, error) {
func (c *Container) removeAllExecSessions() error {
knownSessions := c.getKnownExecSessions()
+ logrus.Debugf("Removing all exec sessions for container %s", c.ID())
+
var lastErr error
for _, id := range knownSessions {
if err := c.ociRuntime.ExecStopContainer(c, id, c.StopTimeout()); err != nil {
@@ -814,3 +954,68 @@ func (c *Container) removeAllExecSessions() error {
return lastErr
}
+
+// Make an ExecOptions struct to start the OCI runtime and prepare its exec
+// bundle.
+func prepareForExec(c *Container, session *ExecSession) (*ExecOptions, error) {
+ // TODO: check logic here - should we set Privileged if the container is
+ // privileged?
+ var capList []string
+ if session.Config.Privileged || c.config.Privileged {
+ capList = capabilities.AllCapabilities()
+ }
+
+ user := c.config.User
+ if session.Config.User != "" {
+ user = session.Config.User
+ }
+
+ if err := c.createExecBundle(session.ID()); err != nil {
+ return nil, err
+ }
+
+ opts := new(ExecOptions)
+ opts.Cmd = session.Config.Command
+ opts.CapAdd = capList
+ opts.Env = session.Config.Environment
+ opts.Terminal = session.Config.Terminal
+ opts.Cwd = session.Config.WorkDir
+ opts.User = user
+ opts.PreserveFDs = session.Config.PreserveFDs
+ opts.DetachKeys = session.Config.DetachKeys
+ opts.ExitCommand = session.Config.ExitCommand
+
+ return opts, nil
+}
+
+// Write an exec session's exit code to the database
+func writeExecExitCode(c *Container, sessionID string, exitCode int) error {
+ // We can't reuse the old exec session (things may have changed from
+ // under use, the container was unlocked).
+ // So re-sync and get a fresh copy.
+ // If we can't do this, no point in continuing, any attempt to save
+ // would write garbage to the DB.
+ if err := c.syncContainer(); err != nil {
+ if errors.Cause(err) == define.ErrNoSuchCtr || errors.Cause(err) == define.ErrCtrRemoved {
+ // Container's entirely removed. We can't save status,
+ // but the container's entirely removed, so we don't
+ // need to. Exit without error.
+ return nil
+ }
+ return errors.Wrapf(err, "error syncing container %s state to remove exec session %s", c.ID(), sessionID)
+ }
+
+ session, ok := c.state.ExecSessions[sessionID]
+ if !ok {
+ // Exec session already removed.
+ logrus.Infof("Container %s exec session %s already removed from database", c.ID(), sessionID)
+ return nil
+ }
+
+ session.State = define.ExecStateStopped
+ session.ExitCode = exitCode
+ session.PID = 0
+
+ // Finally, save our changes.
+ return c.save()
+}
diff --git a/libpod/container_internal.go b/libpod/container_internal.go
index 3fcf687ec..43e873bd6 100644
--- a/libpod/container_internal.go
+++ b/libpod/container_internal.go
@@ -1011,6 +1011,14 @@ func (c *Container) init(ctx context.Context, retainRetries bool) error {
logrus.Debugf("Created container %s in OCI runtime", c.ID())
+ // Remove any exec sessions leftover from a potential prior run.
+ if len(c.state.ExecSessions) > 0 {
+ if err := c.runtime.state.RemoveContainerExecSessions(c); err != nil {
+ logrus.Errorf("Error removing container %s exec sessions from DB: %v", c.ID(), err)
+ }
+ c.state.ExecSessions = make(map[string]*ExecSession)
+ }
+
c.state.ExitCode = 0
c.state.Exited = false
c.state.State = define.ContainerStateCreated
@@ -1161,7 +1169,7 @@ func (c *Container) start() error {
c.state.State = define.ContainerStateRunning
if c.config.HealthCheckConfig != nil {
- if err := c.updateHealthStatus(HealthCheckStarting); err != nil {
+ if err := c.updateHealthStatus(define.HealthCheckStarting); err != nil {
logrus.Error(err)
}
if err := c.startTimer(); err != nil {
@@ -1562,21 +1570,24 @@ func (c *Container) cleanup(ctx context.Context) error {
lastError = errors.Wrapf(err, "error removing container %s network", c.ID())
}
- // Unmount storage
- if err := c.cleanupStorage(); err != nil {
+ // Remove the container from the runtime, if necessary.
+ // Do this *before* unmounting storage - some runtimes (e.g. Kata)
+ // apparently object to having storage removed while the container still
+ // exists.
+ if err := c.cleanupRuntime(ctx); err != nil {
if lastError != nil {
- logrus.Errorf("Error unmounting container %s storage: %v", c.ID(), err)
+ logrus.Errorf("Error removing container %s from OCI runtime: %v", c.ID(), err)
} else {
- lastError = errors.Wrapf(err, "error unmounting container %s storage", c.ID())
+ lastError = err
}
}
- // Remove the container from the runtime, if necessary
- if err := c.cleanupRuntime(ctx); err != nil {
+ // Unmount storage
+ if err := c.cleanupStorage(); err != nil {
if lastError != nil {
- logrus.Errorf("Error removing container %s from OCI runtime: %v", c.ID(), err)
+ logrus.Errorf("Error unmounting container %s storage: %v", c.ID(), err)
} else {
- lastError = err
+ lastError = errors.Wrapf(err, "error unmounting container %s storage", c.ID())
}
}
diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go
index 8ee0fb456..2bd6099f0 100644
--- a/libpod/container_internal_linux.go
+++ b/libpod/container_internal_linux.go
@@ -1236,7 +1236,7 @@ func (c *Container) makeBindMounts() error {
}
// Add Secret Mounts
- secretMounts := secrets.SecretMountsWithUIDGID(c.config.MountLabel, c.state.RunDir, c.runtime.config.Containers.DefaultMountsFile, c.state.RunDir, c.RootUID(), c.RootGID(), rootless.IsRootless(), false)
+ secretMounts := secrets.SecretMountsWithUIDGID(c.config.MountLabel, c.state.RunDir, c.runtime.config.Containers.DefaultMountsFile, c.state.Mountpoint, c.RootUID(), c.RootGID(), rootless.IsRootless(), false)
for _, mount := range secretMounts {
if _, ok := c.state.BindMounts[mount.Destination]; !ok {
c.state.BindMounts[mount.Destination] = mount.Source
diff --git a/libpod/container_internal_test.go b/libpod/container_internal_test.go
index 5428504ef..fdf7c2e20 100644
--- a/libpod/container_internal_test.go
+++ b/libpod/container_internal_test.go
@@ -60,7 +60,7 @@ func TestPostDeleteHooks(t *testing.T) {
t.Fatal(err)
}
- stateRegexp := `{"ociVersion":"1\.0\.1-dev","id":"123abc","status":"stopped","bundle":"` + strings.TrimSuffix(os.TempDir(), "/") + `/libpod_test_[0-9]*","annotations":{"a":"b"}}`
+ stateRegexp := `{"ociVersion":"1\.0\.2-dev","id":"123abc","status":"stopped","bundle":"` + strings.TrimSuffix(os.TempDir(), "/") + `/libpod_test_[0-9]*","annotations":{"a":"b"}}`
for _, p := range []string{statePath, copyPath} {
path := p
t.Run(path, func(t *testing.T) {
diff --git a/libpod/define/config.go b/libpod/define/config.go
index 692eafb04..5ca4da4af 100644
--- a/libpod/define/config.go
+++ b/libpod/define/config.go
@@ -3,6 +3,9 @@ package define
import (
"bufio"
"io"
+ "regexp"
+
+ "github.com/pkg/errors"
)
var (
@@ -10,6 +13,13 @@ var (
DefaultSHMLockPath = "/libpod_lock"
// DefaultRootlessSHMLockPath is the default path for rootless SHM locks
DefaultRootlessSHMLockPath = "/libpod_rootless_lock"
+
+ // NameRegex is a regular expression to validate container/pod names.
+ // This must NOT be changed from outside of Libpod. It should be a
+ // constant, but Go won't let us do that.
+ NameRegex = regexp.MustCompile("^[a-zA-Z0-9][a-zA-Z0-9_.-]*$")
+ // RegexError is thrown in presence of an invalid container/pod name.
+ RegexError = errors.Wrapf(ErrInvalidArg, "names must match [a-zA-Z0-9][a-zA-Z0-9_.-]*")
)
const (
diff --git a/libpod/define/healthchecks.go b/libpod/define/healthchecks.go
new file mode 100644
index 000000000..4114262b6
--- /dev/null
+++ b/libpod/define/healthchecks.go
@@ -0,0 +1,36 @@
+package define
+
+const (
+ // HealthCheckHealthy describes a healthy container
+ HealthCheckHealthy string = "healthy"
+ // HealthCheckUnhealthy describes an unhealthy container
+ HealthCheckUnhealthy string = "unhealthy"
+ // HealthCheckStarting describes the time between when the container starts
+ // and the start-period (time allowed for the container to start and application
+ // to be running) expires.
+ HealthCheckStarting string = "starting"
+)
+
+// HealthCheckStatus represents the current state of a container
+type HealthCheckStatus int
+
+const (
+ // HealthCheckSuccess means the health worked
+ HealthCheckSuccess HealthCheckStatus = iota
+ // HealthCheckFailure means the health ran and failed
+ HealthCheckFailure HealthCheckStatus = iota
+ // HealthCheckContainerStopped means the health check cannot
+ // be run because the container is stopped
+ HealthCheckContainerStopped HealthCheckStatus = iota
+ // HealthCheckContainerNotFound means the container could
+ // not be found in local store
+ HealthCheckContainerNotFound HealthCheckStatus = iota
+ // HealthCheckNotDefined means the container has no health
+ // check defined in it
+ HealthCheckNotDefined HealthCheckStatus = iota
+ // HealthCheckInternalError means some something failed obtaining or running
+ // a given health check
+ HealthCheckInternalError HealthCheckStatus = iota
+ // HealthCheckDefined means the healthcheck was found on the container
+ HealthCheckDefined HealthCheckStatus = iota
+)
diff --git a/libpod/define/version.go b/libpod/define/version.go
index 954cd00f1..3eb016264 100644
--- a/libpod/define/version.go
+++ b/libpod/define/version.go
@@ -3,6 +3,7 @@ package define
import (
"runtime"
"strconv"
+ "time"
podmanVersion "github.com/containers/libpod/version"
)
@@ -19,12 +20,13 @@ var (
// Version is an output struct for varlink
type Version struct {
- RemoteAPIVersion int64
- Version string
- GoVersion string
- GitCommit string
- Built int64
- OsArch string
+ APIVersion int64
+ Version string
+ GoVersion string
+ GitCommit string
+ BuiltTime string
+ Built int64
+ OsArch string
}
// GetVersion returns a VersionOutput struct for varlink and podman
@@ -40,11 +42,12 @@ func GetVersion() (Version, error) {
}
}
return Version{
- RemoteAPIVersion: podmanVersion.RemoteAPIVersion,
- Version: podmanVersion.Version,
- GoVersion: runtime.Version(),
- GitCommit: gitCommit,
- Built: buildTime,
- OsArch: runtime.GOOS + "/" + runtime.GOARCH,
+ APIVersion: podmanVersion.APIVersion,
+ Version: podmanVersion.Version,
+ GoVersion: runtime.Version(),
+ GitCommit: gitCommit,
+ BuiltTime: time.Unix(buildTime, 0).Format(time.ANSIC),
+ Built: buildTime,
+ OsArch: runtime.GOOS + "/" + runtime.GOARCH,
}, nil
}
diff --git a/libpod/healthcheck.go b/libpod/healthcheck.go
index aec5fa4e0..0006b7c06 100644
--- a/libpod/healthcheck.go
+++ b/libpod/healthcheck.go
@@ -14,43 +14,12 @@ import (
"github.com/sirupsen/logrus"
)
-// HealthCheckStatus represents the current state of a container
-type HealthCheckStatus int
-
const (
- // HealthCheckSuccess means the health worked
- HealthCheckSuccess HealthCheckStatus = iota
- // HealthCheckFailure means the health ran and failed
- HealthCheckFailure HealthCheckStatus = iota
- // HealthCheckContainerStopped means the health check cannot
- // be run because the container is stopped
- HealthCheckContainerStopped HealthCheckStatus = iota
- // HealthCheckContainerNotFound means the container could
- // not be found in local store
- HealthCheckContainerNotFound HealthCheckStatus = iota
- // HealthCheckNotDefined means the container has no health
- // check defined in it
- HealthCheckNotDefined HealthCheckStatus = iota
- // HealthCheckInternalError means some something failed obtaining or running
- // a given health check
- HealthCheckInternalError HealthCheckStatus = iota
- // HealthCheckDefined means the healthcheck was found on the container
- HealthCheckDefined HealthCheckStatus = iota
-
// MaxHealthCheckNumberLogs is the maximum number of attempts we keep
// in the healthcheck history file
MaxHealthCheckNumberLogs int = 5
// MaxHealthCheckLogLength in characters
MaxHealthCheckLogLength = 500
-
- // HealthCheckHealthy describes a healthy container
- HealthCheckHealthy string = "healthy"
- // HealthCheckUnhealthy describes an unhealthy container
- HealthCheckUnhealthy string = "unhealthy"
- // HealthCheckStarting describes the time between when the container starts
- // and the start-period (time allowed for the container to start and application
- // to be running) expires.
- HealthCheckStarting string = "starting"
)
// hcWriteCloser allows us to use bufio as a WriteCloser
@@ -65,10 +34,10 @@ func (hcwc hcWriteCloser) Close() error {
// HealthCheck verifies the state and validity of the healthcheck configuration
// on the container and then executes the healthcheck
-func (r *Runtime) HealthCheck(name string) (HealthCheckStatus, error) {
+func (r *Runtime) HealthCheck(name string) (define.HealthCheckStatus, error) {
container, err := r.LookupContainer(name)
if err != nil {
- return HealthCheckContainerNotFound, errors.Wrapf(err, "unable to lookup %s to perform a health check", name)
+ return define.HealthCheckContainerNotFound, errors.Wrapf(err, "unable to lookup %s to perform a health check", name)
}
hcStatus, err := checkHealthCheckCanBeRun(container)
if err == nil {
@@ -78,7 +47,7 @@ func (r *Runtime) HealthCheck(name string) (HealthCheckStatus, error) {
}
// runHealthCheck runs the health check as defined by the container
-func (c *Container) runHealthCheck() (HealthCheckStatus, error) {
+func (c *Container) runHealthCheck() (define.HealthCheckStatus, error) {
var (
newCommand []string
returnCode int
@@ -87,11 +56,11 @@ func (c *Container) runHealthCheck() (HealthCheckStatus, error) {
)
hcCommand := c.HealthCheckConfig().Test
if len(hcCommand) < 1 {
- return HealthCheckNotDefined, errors.Errorf("container %s has no defined healthcheck", c.ID())
+ return define.HealthCheckNotDefined, errors.Errorf("container %s has no defined healthcheck", c.ID())
}
switch hcCommand[0] {
case "", "NONE":
- return HealthCheckNotDefined, errors.Errorf("container %s has no defined healthcheck", c.ID())
+ return define.HealthCheckNotDefined, errors.Errorf("container %s has no defined healthcheck", c.ID())
case "CMD":
newCommand = hcCommand[1:]
case "CMD-SHELL":
@@ -102,7 +71,7 @@ func (c *Container) runHealthCheck() (HealthCheckStatus, error) {
newCommand = hcCommand
}
if len(newCommand) < 1 || newCommand[0] == "" {
- return HealthCheckNotDefined, errors.Errorf("container %s has no defined healthcheck", c.ID())
+ return define.HealthCheckNotDefined, errors.Errorf("container %s has no defined healthcheck", c.ID())
}
captureBuffer := bufio.NewWriter(&capture)
hcw := hcWriteCloser{
@@ -120,13 +89,13 @@ func (c *Container) runHealthCheck() (HealthCheckStatus, error) {
logrus.Debugf("executing health check command %s for %s", strings.Join(newCommand, " "), c.ID())
timeStart := time.Now()
- hcResult := HealthCheckSuccess
+ hcResult := define.HealthCheckSuccess
config := new(ExecConfig)
config.Command = newCommand
_, hcErr := c.Exec(config, streams, nil)
if hcErr != nil {
errCause := errors.Cause(hcErr)
- hcResult = HealthCheckFailure
+ hcResult = define.HealthCheckFailure
if errCause == define.ErrOCIRuntimeNotFound ||
errCause == define.ErrOCIRuntimePermissionDenied ||
errCause == define.ErrOCIRuntime {
@@ -154,7 +123,7 @@ func (c *Container) runHealthCheck() (HealthCheckStatus, error) {
if timeEnd.Sub(timeStart) > c.HealthCheckConfig().Timeout {
returnCode = -1
- hcResult = HealthCheckFailure
+ hcResult = define.HealthCheckFailure
hcErr = errors.Errorf("healthcheck command exceeded timeout of %s", c.HealthCheckConfig().Timeout.String())
}
hcl := newHealthCheckLog(timeStart, timeEnd, returnCode, eventLog)
@@ -164,18 +133,18 @@ func (c *Container) runHealthCheck() (HealthCheckStatus, error) {
return hcResult, hcErr
}
-func checkHealthCheckCanBeRun(c *Container) (HealthCheckStatus, error) {
+func checkHealthCheckCanBeRun(c *Container) (define.HealthCheckStatus, error) {
cstate, err := c.State()
if err != nil {
- return HealthCheckInternalError, err
+ return define.HealthCheckInternalError, err
}
if cstate != define.ContainerStateRunning {
- return HealthCheckContainerStopped, errors.Errorf("container %s is not running", c.ID())
+ return define.HealthCheckContainerStopped, errors.Errorf("container %s is not running", c.ID())
}
if !c.HasHealthCheck() {
- return HealthCheckNotDefined, errors.Errorf("container %s has no defined healthcheck", c.ID())
+ return define.HealthCheckNotDefined, errors.Errorf("container %s has no defined healthcheck", c.ID())
}
- return HealthCheckDefined, nil
+ return define.HealthCheckDefined, nil
}
func newHealthCheckLog(start, end time.Time, exitCode int, log string) define.HealthCheckLog {
@@ -210,18 +179,18 @@ func (c *Container) updateHealthCheckLog(hcl define.HealthCheckLog, inStartPerio
}
if hcl.ExitCode == 0 {
// set status to healthy, reset failing state to 0
- healthCheck.Status = HealthCheckHealthy
+ healthCheck.Status = define.HealthCheckHealthy
healthCheck.FailingStreak = 0
} else {
if len(healthCheck.Status) < 1 {
- healthCheck.Status = HealthCheckHealthy
+ healthCheck.Status = define.HealthCheckHealthy
}
if !inStartPeriod {
// increment failing streak
healthCheck.FailingStreak += 1
// if failing streak > retries, then status to unhealthy
if healthCheck.FailingStreak >= c.HealthCheckConfig().Retries {
- healthCheck.Status = HealthCheckUnhealthy
+ healthCheck.Status = define.HealthCheckUnhealthy
}
}
}
diff --git a/libpod/image/filters.go b/libpod/image/filters.go
index 8ca3526a0..747eba165 100644
--- a/libpod/image/filters.go
+++ b/libpod/image/filters.go
@@ -170,8 +170,7 @@ func (ir *Runtime) createFilterFuncs(filters []string, img *Image) ([]ResultFilt
labelFilter := strings.Join(splitFilter[1:], "=")
filterFuncs = append(filterFuncs, LabelFilter(ctx, labelFilter))
case "reference":
- referenceFilter := strings.Join(splitFilter[1:], "=")
- filterFuncs = append(filterFuncs, ReferenceFilter(ctx, referenceFilter))
+ filterFuncs = append(filterFuncs, ReferenceFilter(ctx, splitFilter[1]))
case "id":
filterFuncs = append(filterFuncs, IdFilter(splitFilter[1]))
default:
diff --git a/libpod/oci.go b/libpod/oci.go
index 9991c5625..7c5218319 100644
--- a/libpod/oci.go
+++ b/libpod/oci.go
@@ -61,17 +61,30 @@ type OCIRuntime interface {
// the attach session to be terminated if provided via the STDIN
// channel. If they are not provided, the default detach keys will be
// used instead. Detach keys of "" will disable detaching via keyboard.
- // The streams parameter may be passed for containers that did not
- // create a terminal and will determine which streams to forward to the
+ // The streams parameter will determine which streams to forward to the
// client.
HTTPAttach(ctr *Container, httpConn net.Conn, httpBuf *bufio.ReadWriter, streams *HTTPAttachStreams, detachKeys *string, cancel <-chan bool) error
// AttachResize resizes the terminal in use by the given container.
AttachResize(ctr *Container, newSize remotecommand.TerminalSize) error
// ExecContainer executes a command in a running container.
- // Returns an int (exit code), error channel (errors from attach), and
- // error (errors that occurred attempting to start the exec session).
- ExecContainer(ctr *Container, sessionID string, options *ExecOptions) (int, chan error, error)
+ // Returns an int (PID of exec session), error channel (errors from
+ // attach), and error (errors that occurred attempting to start the exec
+ // session). This returns once the exec session is running - not once it
+ // has completed, as one might expect. The attach session will remain
+ // running, in a goroutine that will return via the chan error in the
+ // return signature.
+ ExecContainer(ctr *Container, sessionID string, options *ExecOptions, streams *define.AttachStreams) (int, chan error, error)
+ // ExecContainerHTTP executes a command in a running container and
+ // attaches its standard streams to a provided hijacked HTTP session.
+ // Maintains the same invariants as ExecContainer (returns on session
+ // start, with a goroutine running in the background to handle attach).
+ // The HTTP attach itself maintains the same invariants as HTTPAttach.
+ ExecContainerHTTP(ctr *Container, sessionID string, options *ExecOptions, httpConn net.Conn, httpBuf *bufio.ReadWriter, streams *HTTPAttachStreams, cancel <-chan bool) (int, chan error, error)
+ // ExecContainerDetached executes a command in a running container, but
+ // does not attach to it. Returns the PID of the exec session and an
+ // error (if starting the exec session failed)
+ ExecContainerDetached(ctr *Container, sessionID string, options *ExecOptions, stdin bool) (int, error)
// ExecAttachResize resizes the terminal of a running exec session. Only
// allowed with sessions that were created with a TTY.
ExecAttachResize(ctr *Container, sessionID string, newSize remotecommand.TerminalSize) error
@@ -156,6 +169,9 @@ type ExecOptions struct {
// If provided but set to "", detaching from the container will be
// disabled.
DetachKeys *string
+ // ExitCommand is a command that will be run after the exec session
+ // exits.
+ ExitCommand []string
}
// HTTPAttachStreams informs the HTTPAttach endpoint which of the container's
diff --git a/libpod/oci_conmon_exec_linux.go b/libpod/oci_conmon_exec_linux.go
new file mode 100644
index 000000000..51819f90a
--- /dev/null
+++ b/libpod/oci_conmon_exec_linux.go
@@ -0,0 +1,599 @@
+package libpod
+
+import (
+ "bufio"
+ "fmt"
+ "net"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "syscall"
+ "time"
+
+ "github.com/containers/common/pkg/config"
+ "github.com/containers/libpod/libpod/define"
+ "github.com/containers/libpod/pkg/errorhandling"
+ "github.com/containers/libpod/pkg/util"
+ "github.com/containers/libpod/utils"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sys/unix"
+ "k8s.io/client-go/tools/remotecommand"
+)
+
+// ExecContainer executes a command in a running container
+func (r *ConmonOCIRuntime) ExecContainer(c *Container, sessionID string, options *ExecOptions, streams *define.AttachStreams) (int, chan error, error) {
+ if options == nil {
+ return -1, nil, errors.Wrapf(define.ErrInvalidArg, "must provide an ExecOptions struct to ExecContainer")
+ }
+ if len(options.Cmd) == 0 {
+ return -1, nil, errors.Wrapf(define.ErrInvalidArg, "must provide a command to execute")
+ }
+
+ if sessionID == "" {
+ return -1, nil, errors.Wrapf(define.ErrEmptyID, "must provide a session ID for exec")
+ }
+
+ // TODO: Should we default this to false?
+ // Or maybe make streams mandatory?
+ attachStdin := true
+ if streams != nil {
+ attachStdin = streams.AttachInput
+ }
+
+ var ociLog string
+ if logrus.GetLevel() != logrus.DebugLevel && r.supportsJSON {
+ ociLog = c.execOCILog(sessionID)
+ }
+
+ execCmd, pipes, err := r.startExec(c, sessionID, options, attachStdin, ociLog)
+ if err != nil {
+ return -1, nil, err
+ }
+
+ // Only close sync pipe. Start and attach are consumed in the attach
+ // goroutine.
+ defer func() {
+ if pipes.syncPipe != nil && !pipes.syncClosed {
+ errorhandling.CloseQuiet(pipes.syncPipe)
+ pipes.syncClosed = true
+ }
+ }()
+
+ // TODO Only create if !detach
+ // Attach to the container before starting it
+ attachChan := make(chan error)
+ go func() {
+ // attachToExec is responsible for closing pipes
+ attachChan <- c.attachToExec(streams, options.DetachKeys, sessionID, pipes.startPipe, pipes.attachPipe)
+ close(attachChan)
+ }()
+
+ if err := execCmd.Wait(); err != nil {
+ return -1, nil, errors.Wrapf(err, "cannot run conmon")
+ }
+
+ pid, err := readConmonPipeData(pipes.syncPipe, ociLog)
+
+ return pid, attachChan, err
+}
+
+// ExecContainerHTTP executes a new command in an existing container and
+// forwards its standard streams over an attach
+func (r *ConmonOCIRuntime) ExecContainerHTTP(ctr *Container, sessionID string, options *ExecOptions, httpConn net.Conn, httpBuf *bufio.ReadWriter, streams *HTTPAttachStreams, cancel <-chan bool) (int, chan error, error) {
+ if streams != nil {
+ if !streams.Stdin && !streams.Stdout && !streams.Stderr {
+ return -1, nil, errors.Wrapf(define.ErrInvalidArg, "must provide at least one stream to attach to")
+ }
+ }
+
+ if options == nil {
+ return -1, nil, errors.Wrapf(define.ErrInvalidArg, "must provide exec options to ExecContainerHTTP")
+ }
+
+ detachString := config.DefaultDetachKeys
+ if options.DetachKeys != nil {
+ detachString = *options.DetachKeys
+ }
+ detachKeys, err := processDetachKeys(detachString)
+ if err != nil {
+ return -1, nil, err
+ }
+
+ // TODO: Should we default this to false?
+ // Or maybe make streams mandatory?
+ attachStdin := true
+ if streams != nil {
+ attachStdin = streams.Stdin
+ }
+
+ var ociLog string
+ if logrus.GetLevel() != logrus.DebugLevel && r.supportsJSON {
+ ociLog = ctr.execOCILog(sessionID)
+ }
+
+ execCmd, pipes, err := r.startExec(ctr, sessionID, options, attachStdin, ociLog)
+ if err != nil {
+ return -1, nil, err
+ }
+
+ // Only close sync pipe. Start and attach are consumed in the attach
+ // goroutine.
+ defer func() {
+ if pipes.syncPipe != nil && !pipes.syncClosed {
+ errorhandling.CloseQuiet(pipes.syncPipe)
+ pipes.syncClosed = true
+ }
+ }()
+
+ attachChan := make(chan error)
+ go func() {
+ // attachToExec is responsible for closing pipes
+ attachChan <- attachExecHTTP(ctr, sessionID, httpBuf, streams, pipes, detachKeys, options.Terminal, cancel)
+ close(attachChan)
+ }()
+
+ // Wait for conmon to succeed, when return.
+ if err := execCmd.Wait(); err != nil {
+ return -1, nil, errors.Wrapf(err, "cannot run conmon")
+ }
+
+ pid, err := readConmonPipeData(pipes.syncPipe, ociLog)
+
+ return pid, attachChan, err
+}
+
+// ExecContainerDetached executes a command in a running container, but does
+// not attach to it.
+func (r *ConmonOCIRuntime) ExecContainerDetached(ctr *Container, sessionID string, options *ExecOptions, stdin bool) (int, error) {
+ if options == nil {
+ return -1, errors.Wrapf(define.ErrInvalidArg, "must provide exec options to ExecContainerHTTP")
+ }
+
+ var ociLog string
+ if logrus.GetLevel() != logrus.DebugLevel && r.supportsJSON {
+ ociLog = ctr.execOCILog(sessionID)
+ }
+
+ execCmd, pipes, err := r.startExec(ctr, sessionID, options, stdin, ociLog)
+ if err != nil {
+ return -1, err
+ }
+
+ defer func() {
+ pipes.cleanup()
+ }()
+
+ // Wait for Conmon to tell us we're ready to attach.
+ // We aren't actually *going* to attach, but this means that we're good
+ // to proceed.
+ if _, err := readConmonPipeData(pipes.attachPipe, ""); err != nil {
+ return -1, err
+ }
+
+ // Start the exec session
+ if err := writeConmonPipeData(pipes.startPipe); err != nil {
+ return -1, err
+ }
+
+ // Wait for conmon to succeed, when return.
+ if err := execCmd.Wait(); err != nil {
+ return -1, errors.Wrapf(err, "cannot run conmon")
+ }
+
+ pid, err := readConmonPipeData(pipes.syncPipe, ociLog)
+
+ return pid, err
+}
+
+// ExecAttachResize resizes the TTY of the given exec session.
+func (r *ConmonOCIRuntime) ExecAttachResize(ctr *Container, sessionID string, newSize remotecommand.TerminalSize) error {
+ controlFile, err := openControlFile(ctr, ctr.execBundlePath(sessionID))
+ if err != nil {
+ return err
+ }
+ defer controlFile.Close()
+
+ if _, err = fmt.Fprintf(controlFile, "%d %d %d\n", 1, newSize.Height, newSize.Width); err != nil {
+ return errors.Wrapf(err, "failed to write to ctl file to resize terminal")
+ }
+
+ return nil
+}
+
+// ExecStopContainer stops a given exec session in a running container.
+func (r *ConmonOCIRuntime) ExecStopContainer(ctr *Container, sessionID string, timeout uint) error {
+ pid, err := ctr.getExecSessionPID(sessionID)
+ if err != nil {
+ return err
+ }
+
+ logrus.Debugf("Going to stop container %s exec session %s", ctr.ID(), sessionID)
+
+ // Is the session dead?
+ // Ping the PID with signal 0 to see if it still exists.
+ if err := unix.Kill(pid, 0); err != nil {
+ if err == unix.ESRCH {
+ return nil
+ }
+ return errors.Wrapf(err, "error pinging container %s exec session %s PID %d with signal 0", ctr.ID(), sessionID, pid)
+ }
+
+ if timeout > 0 {
+ // Use SIGTERM by default, then SIGSTOP after timeout.
+ logrus.Debugf("Killing exec session %s (PID %d) of container %s with SIGTERM", sessionID, pid, ctr.ID())
+ if err := unix.Kill(pid, unix.SIGTERM); err != nil {
+ if err == unix.ESRCH {
+ return nil
+ }
+ return errors.Wrapf(err, "error killing container %s exec session %s PID %d with SIGTERM", ctr.ID(), sessionID, pid)
+ }
+
+ // Wait for the PID to stop
+ if err := waitPidStop(pid, time.Duration(timeout)*time.Second); err != nil {
+ logrus.Warnf("Timed out waiting for container %s exec session %s to stop, resorting to SIGKILL", ctr.ID(), sessionID)
+ } else {
+ // No error, container is dead
+ return nil
+ }
+ }
+
+ // SIGTERM did not work. On to SIGKILL.
+ logrus.Debugf("Killing exec session %s (PID %d) of container %s with SIGKILL", sessionID, pid, ctr.ID())
+ if err := unix.Kill(pid, unix.SIGTERM); err != nil {
+ if err == unix.ESRCH {
+ return nil
+ }
+ return errors.Wrapf(err, "error killing container %s exec session %s PID %d with SIGKILL", ctr.ID(), sessionID, pid)
+ }
+
+ // Wait for the PID to stop
+ if err := waitPidStop(pid, killContainerTimeout*time.Second); err != nil {
+ return errors.Wrapf(err, "timed out waiting for container %s exec session %s PID %d to stop after SIGKILL", ctr.ID(), sessionID, pid)
+ }
+
+ return nil
+}
+
+// ExecUpdateStatus checks if the given exec session is still running.
+func (r *ConmonOCIRuntime) ExecUpdateStatus(ctr *Container, sessionID string) (bool, error) {
+ pid, err := ctr.getExecSessionPID(sessionID)
+ if err != nil {
+ return false, err
+ }
+
+ logrus.Debugf("Checking status of container %s exec session %s", ctr.ID(), sessionID)
+
+ // Is the session dead?
+ // Ping the PID with signal 0 to see if it still exists.
+ if err := unix.Kill(pid, 0); err != nil {
+ if err == unix.ESRCH {
+ return false, nil
+ }
+ return false, errors.Wrapf(err, "error pinging container %s exec session %s PID %d with signal 0", ctr.ID(), sessionID, pid)
+ }
+
+ return true, nil
+}
+
+// ExecContainerCleanup cleans up files created when a command is run via
+// ExecContainer. This includes the attach socket for the exec session.
+func (r *ConmonOCIRuntime) ExecContainerCleanup(ctr *Container, sessionID string) error {
+ // Clean up the sockets dir. Issue #3962
+ // Also ignore if it doesn't exist for some reason; hence the conditional return below
+ if err := os.RemoveAll(filepath.Join(r.socketsDir, sessionID)); err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ return nil
+}
+
+// ExecAttachSocketPath is the path to a container's exec session attach socket.
+func (r *ConmonOCIRuntime) ExecAttachSocketPath(ctr *Container, sessionID string) (string, error) {
+ // We don't even use container, so don't validity check it
+ if sessionID == "" {
+ return "", errors.Wrapf(define.ErrInvalidArg, "must provide a valid session ID to get attach socket path")
+ }
+
+ return filepath.Join(r.socketsDir, sessionID, "attach"), nil
+}
+
+// This contains pipes used by the exec API.
+type execPipes struct {
+ syncPipe *os.File
+ syncClosed bool
+ startPipe *os.File
+ startClosed bool
+ attachPipe *os.File
+ attachClosed bool
+}
+
+func (p *execPipes) cleanup() {
+ if p.syncPipe != nil && !p.syncClosed {
+ errorhandling.CloseQuiet(p.syncPipe)
+ p.syncClosed = true
+ }
+ if p.startPipe != nil && !p.startClosed {
+ errorhandling.CloseQuiet(p.startPipe)
+ p.startClosed = true
+ }
+ if p.attachPipe != nil && !p.attachClosed {
+ errorhandling.CloseQuiet(p.attachPipe)
+ p.attachClosed = true
+ }
+}
+
+// Start an exec session's conmon parent from the given options.
+func (r *ConmonOCIRuntime) startExec(c *Container, sessionID string, options *ExecOptions, attachStdin bool, ociLog string) (_ *exec.Cmd, _ *execPipes, deferredErr error) {
+ pipes := new(execPipes)
+
+ if options == nil {
+ return nil, nil, errors.Wrapf(define.ErrInvalidArg, "must provide an ExecOptions struct to ExecContainer")
+ }
+ if len(options.Cmd) == 0 {
+ return nil, nil, errors.Wrapf(define.ErrInvalidArg, "must provide a command to execute")
+ }
+
+ if sessionID == "" {
+ return nil, nil, errors.Wrapf(define.ErrEmptyID, "must provide a session ID for exec")
+ }
+
+ // create sync pipe to receive the pid
+ parentSyncPipe, childSyncPipe, err := newPipe()
+ if err != nil {
+ return nil, nil, errors.Wrapf(err, "error creating socket pair")
+ }
+ pipes.syncPipe = parentSyncPipe
+
+ defer func() {
+ if deferredErr != nil {
+ pipes.cleanup()
+ }
+ }()
+
+ // create start pipe to set the cgroup before running
+ // attachToExec is responsible for closing parentStartPipe
+ childStartPipe, parentStartPipe, err := newPipe()
+ if err != nil {
+ return nil, nil, errors.Wrapf(err, "error creating socket pair")
+ }
+ pipes.startPipe = parentStartPipe
+
+ // create the attach pipe to allow attach socket to be created before
+ // $RUNTIME exec starts running. This is to make sure we can capture all output
+ // from the process through that socket, rather than half reading the log, half attaching to the socket
+ // attachToExec is responsible for closing parentAttachPipe
+ parentAttachPipe, childAttachPipe, err := newPipe()
+ if err != nil {
+ return nil, nil, errors.Wrapf(err, "error creating socket pair")
+ }
+ pipes.attachPipe = parentAttachPipe
+
+ childrenClosed := false
+ defer func() {
+ if !childrenClosed {
+ errorhandling.CloseQuiet(childSyncPipe)
+ errorhandling.CloseQuiet(childAttachPipe)
+ errorhandling.CloseQuiet(childStartPipe)
+ }
+ }()
+
+ runtimeDir, err := util.GetRuntimeDir()
+ if err != nil {
+ return nil, nil, err
+ }
+
+ finalEnv := make([]string, 0, len(options.Env))
+ for k, v := range options.Env {
+ finalEnv = append(finalEnv, fmt.Sprintf("%s=%s", k, v))
+ }
+
+ processFile, err := prepareProcessExec(c, options.Cmd, finalEnv, options.Terminal, options.Cwd, options.User, sessionID)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ args := r.sharedConmonArgs(c, sessionID, c.execBundlePath(sessionID), c.execPidPath(sessionID), c.execLogPath(sessionID), c.execExitFileDir(sessionID), ociLog, "")
+
+ if options.PreserveFDs > 0 {
+ args = append(args, formatRuntimeOpts("--preserve-fds", fmt.Sprintf("%d", options.PreserveFDs))...)
+ }
+
+ for _, capability := range options.CapAdd {
+ args = append(args, formatRuntimeOpts("--cap", capability)...)
+ }
+
+ if options.Terminal {
+ args = append(args, "-t")
+ }
+
+ if attachStdin {
+ args = append(args, "-i")
+ }
+
+ // Append container ID and command
+ args = append(args, "-e")
+ // TODO make this optional when we can detach
+ args = append(args, "--exec-attach")
+ args = append(args, "--exec-process-spec", processFile.Name())
+
+ if len(options.ExitCommand) > 0 {
+ args = append(args, "--exit-command", options.ExitCommand[0])
+ for _, arg := range options.ExitCommand[1:] {
+ args = append(args, []string{"--exit-command-arg", arg}...)
+ }
+ }
+
+ logrus.WithFields(logrus.Fields{
+ "args": args,
+ }).Debugf("running conmon: %s", r.conmonPath)
+ // TODO: Need to pass this back so we can wait on it.
+ execCmd := exec.Command(r.conmonPath, args...)
+
+ // TODO: This is commented because it doesn't make much sense in HTTP
+ // attach, and I'm not certain it does for non-HTTP attach as well.
+ // if streams != nil {
+ // // Don't add the InputStream to the execCmd. Instead, the data should be passed
+ // // through CopyDetachable
+ // if streams.AttachOutput {
+ // execCmd.Stdout = options.Streams.OutputStream
+ // }
+ // if streams.AttachError {
+ // execCmd.Stderr = options.Streams.ErrorStream
+ // }
+ // }
+
+ conmonEnv, extraFiles, err := r.configureConmonEnv(runtimeDir)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if options.PreserveFDs > 0 {
+ for fd := 3; fd < int(3+options.PreserveFDs); fd++ {
+ execCmd.ExtraFiles = append(execCmd.ExtraFiles, os.NewFile(uintptr(fd), fmt.Sprintf("fd-%d", fd)))
+ }
+ }
+
+ // we don't want to step on users fds they asked to preserve
+ // Since 0-2 are used for stdio, start the fds we pass in at preserveFDs+3
+ execCmd.Env = r.conmonEnv
+ execCmd.Env = append(execCmd.Env, fmt.Sprintf("_OCI_SYNCPIPE=%d", options.PreserveFDs+3), fmt.Sprintf("_OCI_STARTPIPE=%d", options.PreserveFDs+4), fmt.Sprintf("_OCI_ATTACHPIPE=%d", options.PreserveFDs+5))
+ execCmd.Env = append(execCmd.Env, conmonEnv...)
+
+ execCmd.ExtraFiles = append(execCmd.ExtraFiles, childSyncPipe, childStartPipe, childAttachPipe)
+ execCmd.ExtraFiles = append(execCmd.ExtraFiles, extraFiles...)
+ execCmd.Dir = c.execBundlePath(sessionID)
+ execCmd.SysProcAttr = &syscall.SysProcAttr{
+ Setpgid: true,
+ }
+
+ err = startCommandGivenSelinux(execCmd)
+
+ // We don't need children pipes on the parent side
+ errorhandling.CloseQuiet(childSyncPipe)
+ errorhandling.CloseQuiet(childAttachPipe)
+ errorhandling.CloseQuiet(childStartPipe)
+ childrenClosed = true
+
+ if err != nil {
+ return nil, nil, errors.Wrapf(err, "cannot start container %s", c.ID())
+ }
+ if err := r.moveConmonToCgroupAndSignal(c, execCmd, parentStartPipe); err != nil {
+ return nil, nil, err
+ }
+
+ if options.PreserveFDs > 0 {
+ for fd := 3; fd < int(3+options.PreserveFDs); fd++ {
+ // These fds were passed down to the runtime. Close them
+ // and not interfere
+ if err := os.NewFile(uintptr(fd), fmt.Sprintf("fd-%d", fd)).Close(); err != nil {
+ logrus.Debugf("unable to close file fd-%d", fd)
+ }
+ }
+ }
+
+ return execCmd, pipes, nil
+}
+
+// Attach to a container over HTTP
+func attachExecHTTP(c *Container, sessionID string, httpBuf *bufio.ReadWriter, streams *HTTPAttachStreams, pipes *execPipes, detachKeys []byte, isTerminal bool, cancel <-chan bool) error {
+ if pipes == nil || pipes.startPipe == nil || pipes.attachPipe == nil {
+ return errors.Wrapf(define.ErrInvalidArg, "must provide a start and attach pipe to finish an exec attach")
+ }
+
+ defer func() {
+ if !pipes.startClosed {
+ errorhandling.CloseQuiet(pipes.startPipe)
+ pipes.startClosed = true
+ }
+ if !pipes.attachClosed {
+ errorhandling.CloseQuiet(pipes.attachPipe)
+ pipes.attachClosed = true
+ }
+ }()
+
+ logrus.Debugf("Attaching to container %s exec session %s", c.ID(), sessionID)
+
+ // set up the socket path, such that it is the correct length and location for exec
+ sockPath, err := c.execAttachSocketPath(sessionID)
+ if err != nil {
+ return err
+ }
+ socketPath := buildSocketPath(sockPath)
+
+ // 2: read from attachFd that the parent process has set up the console socket
+ if _, err := readConmonPipeData(pipes.attachPipe, ""); err != nil {
+ return err
+ }
+
+ // 2: then attach
+ conn, err := net.DialUnix("unixpacket", nil, &net.UnixAddr{Name: socketPath, Net: "unixpacket"})
+ if err != nil {
+ return errors.Wrapf(err, "failed to connect to container's attach socket: %v", socketPath)
+ }
+ defer func() {
+ if err := conn.Close(); err != nil {
+ logrus.Errorf("unable to close socket: %q", err)
+ }
+ }()
+
+ // Make a channel to pass errors back
+ errChan := make(chan error)
+
+ attachStdout := true
+ attachStderr := true
+ attachStdin := true
+ if streams != nil {
+ attachStdout = streams.Stdout
+ attachStderr = streams.Stderr
+ attachStdin = streams.Stdin
+ }
+
+ // Next, STDIN. Avoid entirely if attachStdin unset.
+ if attachStdin {
+ go func() {
+ logrus.Debugf("Beginning STDIN copy")
+ _, err := utils.CopyDetachable(conn, httpBuf, detachKeys)
+ logrus.Debugf("STDIN copy completed")
+ errChan <- err
+ }()
+ }
+
+ // 4: send start message to child
+ if err := writeConmonPipeData(pipes.startPipe); err != nil {
+ return err
+ }
+
+ // Handle STDOUT/STDERR *after* start message is sent
+ go func() {
+ var err error
+ if isTerminal {
+ // Hack: return immediately if attachStdout not set to
+ // emulate Docker.
+ // Basically, when terminal is set, STDERR goes nowhere.
+ // Everything does over STDOUT.
+ // Therefore, if not attaching STDOUT - we'll never copy
+ // anything from here.
+ logrus.Debugf("Performing terminal HTTP attach for container %s", c.ID())
+ if attachStdout {
+ err = httpAttachTerminalCopy(conn, httpBuf, c.ID())
+ }
+ } else {
+ logrus.Debugf("Performing non-terminal HTTP attach for container %s", c.ID())
+ err = httpAttachNonTerminalCopy(conn, httpBuf, c.ID(), attachStdin, attachStdout, attachStderr)
+ }
+ errChan <- err
+ logrus.Debugf("STDOUT/ERR copy completed")
+ }()
+
+ if cancel != nil {
+ select {
+ case err := <-errChan:
+ return err
+ case <-cancel:
+ return nil
+ }
+ } else {
+ var connErr error = <-errChan
+ return connErr
+ }
+}
diff --git a/libpod/oci_conmon_linux.go b/libpod/oci_conmon_linux.go
index d59ff18ca..9c92b036e 100644
--- a/libpod/oci_conmon_linux.go
+++ b/libpod/oci_conmon_linux.go
@@ -635,297 +635,6 @@ func (r *ConmonOCIRuntime) AttachResize(ctr *Container, newSize remotecommand.Te
return nil
}
-// ExecContainer executes a command in a running container
-// TODO: Split into Create/Start/Attach/Wait
-func (r *ConmonOCIRuntime) ExecContainer(c *Container, sessionID string, options *ExecOptions) (int, chan error, error) {
- if options == nil {
- return -1, nil, errors.Wrapf(define.ErrInvalidArg, "must provide an ExecOptions struct to ExecContainer")
- }
- if len(options.Cmd) == 0 {
- return -1, nil, errors.Wrapf(define.ErrInvalidArg, "must provide a command to execute")
- }
-
- if sessionID == "" {
- return -1, nil, errors.Wrapf(define.ErrEmptyID, "must provide a session ID for exec")
- }
-
- // create sync pipe to receive the pid
- parentSyncPipe, childSyncPipe, err := newPipe()
- if err != nil {
- return -1, nil, errors.Wrapf(err, "error creating socket pair")
- }
-
- defer errorhandling.CloseQuiet(parentSyncPipe)
-
- // create start pipe to set the cgroup before running
- // attachToExec is responsible for closing parentStartPipe
- childStartPipe, parentStartPipe, err := newPipe()
- if err != nil {
- return -1, nil, errors.Wrapf(err, "error creating socket pair")
- }
-
- // We want to make sure we close the parent{Start,Attach}Pipes if we fail
- // but also don't want to close them after attach to exec is called
- attachToExecCalled := false
-
- defer func() {
- if !attachToExecCalled {
- errorhandling.CloseQuiet(parentStartPipe)
- }
- }()
-
- // create the attach pipe to allow attach socket to be created before
- // $RUNTIME exec starts running. This is to make sure we can capture all output
- // from the process through that socket, rather than half reading the log, half attaching to the socket
- // attachToExec is responsible for closing parentAttachPipe
- parentAttachPipe, childAttachPipe, err := newPipe()
- if err != nil {
- return -1, nil, errors.Wrapf(err, "error creating socket pair")
- }
-
- defer func() {
- if !attachToExecCalled {
- errorhandling.CloseQuiet(parentAttachPipe)
- }
- }()
-
- childrenClosed := false
- defer func() {
- if !childrenClosed {
- errorhandling.CloseQuiet(childSyncPipe)
- errorhandling.CloseQuiet(childAttachPipe)
- errorhandling.CloseQuiet(childStartPipe)
- }
- }()
-
- runtimeDir, err := util.GetRuntimeDir()
- if err != nil {
- return -1, nil, err
- }
-
- finalEnv := make([]string, 0, len(options.Env))
- for k, v := range options.Env {
- finalEnv = append(finalEnv, fmt.Sprintf("%s=%s", k, v))
- }
-
- processFile, err := prepareProcessExec(c, options.Cmd, finalEnv, options.Terminal, options.Cwd, options.User, sessionID)
- if err != nil {
- return -1, nil, err
- }
-
- var ociLog string
- if logrus.GetLevel() != logrus.DebugLevel && r.supportsJSON {
- ociLog = c.execOCILog(sessionID)
- }
- args := r.sharedConmonArgs(c, sessionID, c.execBundlePath(sessionID), c.execPidPath(sessionID), c.execLogPath(sessionID), c.execExitFileDir(sessionID), ociLog, "")
-
- if options.PreserveFDs > 0 {
- args = append(args, formatRuntimeOpts("--preserve-fds", fmt.Sprintf("%d", options.PreserveFDs))...)
- }
-
- for _, capability := range options.CapAdd {
- args = append(args, formatRuntimeOpts("--cap", capability)...)
- }
-
- if options.Terminal {
- args = append(args, "-t")
- }
-
- if options.Streams != nil && options.Streams.AttachInput {
- args = append(args, "-i")
- }
-
- // Append container ID and command
- args = append(args, "-e")
- // TODO make this optional when we can detach
- args = append(args, "--exec-attach")
- args = append(args, "--exec-process-spec", processFile.Name())
-
- logrus.WithFields(logrus.Fields{
- "args": args,
- }).Debugf("running conmon: %s", r.conmonPath)
- execCmd := exec.Command(r.conmonPath, args...)
-
- if options.Streams != nil {
- // Don't add the InputStream to the execCmd. Instead, the data should be passed
- // through CopyDetachable
- if options.Streams.AttachOutput {
- execCmd.Stdout = options.Streams.OutputStream
- }
- if options.Streams.AttachError {
- execCmd.Stderr = options.Streams.ErrorStream
- }
- }
-
- conmonEnv, extraFiles, err := r.configureConmonEnv(runtimeDir)
- if err != nil {
- return -1, nil, err
- }
-
- if options.PreserveFDs > 0 {
- for fd := 3; fd < int(3+options.PreserveFDs); fd++ {
- execCmd.ExtraFiles = append(execCmd.ExtraFiles, os.NewFile(uintptr(fd), fmt.Sprintf("fd-%d", fd)))
- }
- }
-
- // we don't want to step on users fds they asked to preserve
- // Since 0-2 are used for stdio, start the fds we pass in at preserveFDs+3
- execCmd.Env = r.conmonEnv
- execCmd.Env = append(execCmd.Env, fmt.Sprintf("_OCI_SYNCPIPE=%d", options.PreserveFDs+3), fmt.Sprintf("_OCI_STARTPIPE=%d", options.PreserveFDs+4), fmt.Sprintf("_OCI_ATTACHPIPE=%d", options.PreserveFDs+5))
- execCmd.Env = append(execCmd.Env, conmonEnv...)
-
- execCmd.ExtraFiles = append(execCmd.ExtraFiles, childSyncPipe, childStartPipe, childAttachPipe)
- execCmd.ExtraFiles = append(execCmd.ExtraFiles, extraFiles...)
- execCmd.Dir = c.execBundlePath(sessionID)
- execCmd.SysProcAttr = &syscall.SysProcAttr{
- Setpgid: true,
- }
-
- err = startCommandGivenSelinux(execCmd)
-
- // We don't need children pipes on the parent side
- errorhandling.CloseQuiet(childSyncPipe)
- errorhandling.CloseQuiet(childAttachPipe)
- errorhandling.CloseQuiet(childStartPipe)
- childrenClosed = true
-
- if err != nil {
- return -1, nil, errors.Wrapf(err, "cannot start container %s", c.ID())
- }
- if err := r.moveConmonToCgroupAndSignal(c, execCmd, parentStartPipe); err != nil {
- return -1, nil, err
- }
-
- if options.PreserveFDs > 0 {
- for fd := 3; fd < int(3+options.PreserveFDs); fd++ {
- // These fds were passed down to the runtime. Close them
- // and not interfere
- if err := os.NewFile(uintptr(fd), fmt.Sprintf("fd-%d", fd)).Close(); err != nil {
- logrus.Debugf("unable to close file fd-%d", fd)
- }
- }
- }
-
- // TODO Only create if !detach
- // Attach to the container before starting it
- attachChan := make(chan error)
- go func() {
- // attachToExec is responsible for closing pipes
- attachChan <- c.attachToExec(options.Streams, options.DetachKeys, sessionID, parentStartPipe, parentAttachPipe)
- close(attachChan)
- }()
- attachToExecCalled = true
-
- if err := execCmd.Wait(); err != nil {
- return -1, nil, errors.Wrapf(err, "cannot run conmon")
- }
-
- pid, err := readConmonPipeData(parentSyncPipe, ociLog)
-
- return pid, attachChan, err
-}
-
-// ExecAttachResize resizes the TTY of the given exec session.
-func (r *ConmonOCIRuntime) ExecAttachResize(ctr *Container, sessionID string, newSize remotecommand.TerminalSize) error {
- controlFile, err := openControlFile(ctr, ctr.execBundlePath(sessionID))
- if err != nil {
- return err
- }
- defer controlFile.Close()
-
- if _, err = fmt.Fprintf(controlFile, "%d %d %d\n", 1, newSize.Height, newSize.Width); err != nil {
- return errors.Wrapf(err, "failed to write to ctl file to resize terminal")
- }
-
- return nil
-}
-
-// ExecStopContainer stops a given exec session in a running container.
-func (r *ConmonOCIRuntime) ExecStopContainer(ctr *Container, sessionID string, timeout uint) error {
- pid, err := ctr.getExecSessionPID(sessionID)
- if err != nil {
- return err
- }
-
- logrus.Debugf("Going to stop container %s exec session %s", ctr.ID(), sessionID)
-
- // Is the session dead?
- // Ping the PID with signal 0 to see if it still exists.
- if err := unix.Kill(pid, 0); err != nil {
- if err == unix.ESRCH {
- return nil
- }
- return errors.Wrapf(err, "error pinging container %s exec session %s PID %d with signal 0", ctr.ID(), sessionID, pid)
- }
-
- if timeout > 0 {
- // Use SIGTERM by default, then SIGSTOP after timeout.
- logrus.Debugf("Killing exec session %s (PID %d) of container %s with SIGTERM", sessionID, pid, ctr.ID())
- if err := unix.Kill(pid, unix.SIGTERM); err != nil {
- if err == unix.ESRCH {
- return nil
- }
- return errors.Wrapf(err, "error killing container %s exec session %s PID %d with SIGTERM", ctr.ID(), sessionID, pid)
- }
-
- // Wait for the PID to stop
- if err := waitPidStop(pid, time.Duration(timeout)*time.Second); err != nil {
- logrus.Warnf("Timed out waiting for container %s exec session %s to stop, resorting to SIGKILL", ctr.ID(), sessionID)
- } else {
- // No error, container is dead
- return nil
- }
- }
-
- // SIGTERM did not work. On to SIGKILL.
- logrus.Debugf("Killing exec session %s (PID %d) of container %s with SIGKILL", sessionID, pid, ctr.ID())
- if err := unix.Kill(pid, unix.SIGTERM); err != nil {
- if err == unix.ESRCH {
- return nil
- }
- return errors.Wrapf(err, "error killing container %s exec session %s PID %d with SIGKILL", ctr.ID(), sessionID, pid)
- }
-
- // Wait for the PID to stop
- if err := waitPidStop(pid, killContainerTimeout*time.Second); err != nil {
- return errors.Wrapf(err, "timed out waiting for container %s exec session %s PID %d to stop after SIGKILL", ctr.ID(), sessionID, pid)
- }
-
- return nil
-}
-
-// ExecUpdateStatus checks if the given exec session is still running.
-func (r *ConmonOCIRuntime) ExecUpdateStatus(ctr *Container, sessionID string) (bool, error) {
- pid, err := ctr.getExecSessionPID(sessionID)
- if err != nil {
- return false, err
- }
-
- logrus.Debugf("Checking status of container %s exec session %s", ctr.ID(), sessionID)
-
- // Is the session dead?
- // Ping the PID with signal 0 to see if it still exists.
- if err := unix.Kill(pid, 0); err != nil {
- if err == unix.ESRCH {
- return false, nil
- }
- return false, errors.Wrapf(err, "error pinging container %s exec session %s PID %d with signal 0", ctr.ID(), sessionID, pid)
- }
-
- return true, nil
-}
-
-// ExecContainerCleanup cleans up files created when a command is run via
-// ExecContainer. This includes the attach socket for the exec session.
-func (r *ConmonOCIRuntime) ExecContainerCleanup(ctr *Container, sessionID string) error {
- // Clean up the sockets dir. Issue #3962
- // Also ignore if it doesn't exist for some reason; hence the conditional return below
- if err := os.RemoveAll(filepath.Join(r.socketsDir, sessionID)); err != nil && !os.IsNotExist(err) {
- return err
- }
- return nil
-}
-
// CheckpointContainer checkpoints the given container.
func (r *ConmonOCIRuntime) CheckpointContainer(ctr *Container, options ContainerCheckpointOptions) error {
if err := label.SetSocketLabel(ctr.ProcessLabel()); err != nil {
@@ -1002,16 +711,6 @@ func (r *ConmonOCIRuntime) AttachSocketPath(ctr *Container) (string, error) {
return filepath.Join(r.socketsDir, ctr.ID(), "attach"), nil
}
-// ExecAttachSocketPath is the path to a container's exec session attach socket.
-func (r *ConmonOCIRuntime) ExecAttachSocketPath(ctr *Container, sessionID string) (string, error) {
- // We don't even use container, so don't validity check it
- if sessionID == "" {
- return "", errors.Wrapf(define.ErrInvalidArg, "must provide a valid session ID to get attach socket path")
- }
-
- return filepath.Join(r.socketsDir, sessionID, "attach"), nil
-}
-
// ExitFilePath is the path to a container's exit file.
func (r *ConmonOCIRuntime) ExitFilePath(ctr *Container) (string, error) {
if ctr == nil {
@@ -1415,15 +1114,21 @@ func (r *ConmonOCIRuntime) configureConmonEnv(runtimeDir string) ([]string, []*o
// sharedConmonArgs takes common arguments for exec and create/restore and formats them for the conmon CLI
func (r *ConmonOCIRuntime) sharedConmonArgs(ctr *Container, cuuid, bundlePath, pidPath, logPath, exitDir, ociLogPath, logTag string) []string {
// set the conmon API version to be able to use the correct sync struct keys
- args := []string{"--api-version", "1"}
+ args := []string{
+ "--api-version", "1",
+ "-c", ctr.ID(),
+ "-u", cuuid,
+ "-r", r.path,
+ "-b", bundlePath,
+ "-p", pidPath,
+ "-n", ctr.Name(),
+ "--exit-dir", exitDir,
+ "--socket-dir-path", r.socketsDir,
+ }
+
if r.cgroupManager == config.SystemdCgroupsManager && !ctr.config.NoCgroups {
args = append(args, "-s")
}
- args = append(args, "-c", ctr.ID())
- args = append(args, "-u", cuuid)
- args = append(args, "-r", r.path)
- args = append(args, "-b", bundlePath)
- args = append(args, "-p", pidPath)
var logDriver string
switch ctr.LogDriver() {
@@ -1444,8 +1149,6 @@ func (r *ConmonOCIRuntime) sharedConmonArgs(ctr *Container, cuuid, bundlePath, p
}
args = append(args, "-l", logDriver)
- args = append(args, "--exit-dir", exitDir)
- args = append(args, "--socket-dir-path", r.socketsDir)
if r.logSizeMax >= 0 {
args = append(args, "--log-size-max", fmt.Sprintf("%v", r.logSizeMax))
}
@@ -1704,6 +1407,8 @@ func httpAttachTerminalCopy(container *net.UnixConn, http *bufio.ReadWriter, cid
buf := make([]byte, bufferSize)
for {
numR, err := container.Read(buf)
+ logrus.Debugf("Read fd(%d) %d/%d bytes for container %s", int(buf[0]), numR, len(buf), cid)
+
if numR > 0 {
switch buf[0] {
case AttachPipeStdout:
diff --git a/libpod/oci_missing.go b/libpod/oci_missing.go
index 172805b0d..4da16876c 100644
--- a/libpod/oci_missing.go
+++ b/libpod/oci_missing.go
@@ -121,10 +121,20 @@ func (r *MissingRuntime) AttachResize(ctr *Container, newSize remotecommand.Term
}
// ExecContainer is not available as the runtime is missing
-func (r *MissingRuntime) ExecContainer(ctr *Container, sessionID string, options *ExecOptions) (int, chan error, error) {
+func (r *MissingRuntime) ExecContainer(ctr *Container, sessionID string, options *ExecOptions, streams *define.AttachStreams) (int, chan error, error) {
return -1, nil, r.printError()
}
+// ExecContainerHTTP is not available as the runtime is missing
+func (r *MissingRuntime) ExecContainerHTTP(ctr *Container, sessionID string, options *ExecOptions, httpConn net.Conn, httpBuf *bufio.ReadWriter, streams *HTTPAttachStreams, cancel <-chan bool) (int, chan error, error) {
+ return -1, nil, r.printError()
+}
+
+// ExecContainerDetached is not available as the runtime is missing
+func (r *MissingRuntime) ExecContainerDetached(ctr *Container, sessionID string, options *ExecOptions, stdin bool) (int, error) {
+ return -1, r.printError()
+}
+
// ExecAttachResize is not available as the runtime is missing.
func (r *MissingRuntime) ExecAttachResize(ctr *Container, sessionID string, newSize remotecommand.TerminalSize) error {
return r.printError()
diff --git a/libpod/options.go b/libpod/options.go
index 05241baf3..ff5e29335 100644
--- a/libpod/options.go
+++ b/libpod/options.go
@@ -4,7 +4,6 @@ import (
"net"
"os"
"path/filepath"
- "regexp"
"syscall"
"github.com/containers/common/pkg/config"
@@ -20,15 +19,6 @@ import (
"github.com/pkg/errors"
)
-var (
- // NameRegex is a regular expression to validate container/pod names.
- // This must NOT be changed from outside of Libpod. It should be a
- // constant, but Go won't let us do that.
- NameRegex = regexp.MustCompile("^[a-zA-Z0-9][a-zA-Z0-9_.-]*$")
- // RegexError is thrown in presence of an invalid container/pod name.
- RegexError = errors.Wrapf(define.ErrInvalidArg, "names must match [a-zA-Z0-9][a-zA-Z0-9_.-]*")
-)
-
// Runtime Creation Options
// WithStorageConfig uses the given configuration to set up container storage.
@@ -665,8 +655,8 @@ func WithName(name string) CtrCreateOption {
}
// Check the name against a regex
- if !NameRegex.MatchString(name) {
- return RegexError
+ if !define.NameRegex.MatchString(name) {
+ return define.RegexError
}
ctr.config.Name = name
@@ -1383,8 +1373,8 @@ func WithVolumeName(name string) VolumeCreateOption {
}
// Check the name against a regex
- if !NameRegex.MatchString(name) {
- return RegexError
+ if !define.NameRegex.MatchString(name) {
+ return define.RegexError
}
volume.config.Name = name
@@ -1502,8 +1492,8 @@ func WithPodName(name string) PodCreateOption {
}
// Check the name against a regex
- if !NameRegex.MatchString(name) {
- return RegexError
+ if !define.NameRegex.MatchString(name) {
+ return define.RegexError
}
pod.config.Name = name
@@ -1520,8 +1510,8 @@ func WithPodHostname(hostname string) PodCreateOption {
}
// Check the hostname against a regex
- if !NameRegex.MatchString(hostname) {
- return RegexError
+ if !define.NameRegex.MatchString(hostname) {
+ return define.RegexError
}
pod.config.Hostname = hostname
diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go
index 1d880531e..655b42e51 100644
--- a/libpod/runtime_ctr.go
+++ b/libpod/runtime_ctr.go
@@ -390,6 +390,8 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool,
}
}
+ logrus.Debugf("Removing container %s", c.ID())
+
// We need to lock the pod before we lock the container.
// To avoid races around removing a container and the pod it is in.
// Don't need to do this in pod removal case - we're evicting the entire
@@ -488,20 +490,25 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool,
}
}
+ var cleanupErr error
+
+ // Clean up network namespace, cgroups, mounts.
+ // Do this before we set ContainerStateRemoving, to ensure that we can
+ // actually remove from the OCI runtime.
+ if err := c.cleanup(ctx); err != nil {
+ cleanupErr = errors.Wrapf(err, "error cleaning up container %s", c.ID())
+ }
+
// Set ContainerStateRemoving
c.state.State = define.ContainerStateRemoving
if err := c.save(); err != nil {
+ if cleanupErr != nil {
+ logrus.Errorf(err.Error())
+ }
return errors.Wrapf(err, "unable to set container %s removing state in database", c.ID())
}
- var cleanupErr error
-
- // Clean up network namespace, cgroups, mounts
- if err := c.cleanup(ctx); err != nil {
- cleanupErr = errors.Wrapf(err, "error cleaning up container %s", c.ID())
- }
-
// Stop the container's storage
if err := c.teardownStorage(); err != nil {
if cleanupErr == nil {
diff --git a/libpod/util.go b/libpod/util.go
index bdfd153ed..ba9f1fa05 100644
--- a/libpod/util.go
+++ b/libpod/util.go
@@ -249,9 +249,8 @@ func hijackWriteErrorAndClose(toWrite error, cid string, terminal bool, httpCon
// length and stream. Accepts an integer indicating which stream we are sending
// to (STDIN = 0, STDOUT = 1, STDERR = 2).
func makeHTTPAttachHeader(stream byte, length uint32) []byte {
- headerBuf := []byte{stream, 0, 0, 0}
- lenBuf := []byte{0, 0, 0, 0}
- binary.BigEndian.PutUint32(lenBuf, length)
- headerBuf = append(headerBuf, lenBuf...)
- return headerBuf
+ header := make([]byte, 8)
+ header[0] = stream
+ binary.BigEndian.PutUint32(header[4:], length)
+ return header
}
diff --git a/nix/default.nix b/nix/default.nix
index 211caee93..cf607c0ad 100644
--- a/nix/default.nix
+++ b/nix/default.nix
@@ -43,7 +43,7 @@ let
]);
src = ./..;
EXTRA_LDFLAGS = ''-linkmode external -extldflags "-static -lm"'';
- BUILDTAGS = ''static apparmor selinux seccomp systemd varlink containers_image_ostree_stub'';
+ BUILDTAGS = ''static netgo apparmor selinux seccomp systemd varlink containers_image_ostree_stub'';
})).override {
gpgme = (static pkgs.gpgme);
libseccomp = (static pkgs.libseccomp);
diff --git a/pkg/api/handlers/compat/containers_attach.go b/pkg/api/handlers/compat/containers_attach.go
index 80ad52aee..012e20daf 100644
--- a/pkg/api/handlers/compat/containers_attach.go
+++ b/pkg/api/handlers/compat/containers_attach.go
@@ -10,9 +10,14 @@ import (
"github.com/gorilla/schema"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
- "k8s.io/client-go/tools/remotecommand"
)
+// AttachHeader is the literal header sent for upgraded/hijacked connections for
+// attach, sourced from Docker at:
+// https://raw.githubusercontent.com/moby/moby/b95fad8e51bd064be4f4e58a996924f343846c85/api/server/router/container/container_routes.go
+// Using literally to ensure compatibility with existing clients.
+const AttachHeader = "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n"
+
func AttachContainer(w http.ResponseWriter, r *http.Request) {
runtime := r.Context().Value("runtime").(*libpod.Runtime)
decoder := r.Context().Value("decoder").(*schema.Decoder)
@@ -89,7 +94,7 @@ func AttachContainer(w http.ResponseWriter, r *http.Request) {
return
}
} else if !(state == define.ContainerStateCreated || state == define.ContainerStateRunning) {
- utils.InternalServerError(w, errors.Wrapf(define.ErrCtrStateInvalid, "can only attach to created or running containers"))
+ utils.InternalServerError(w, errors.Wrapf(define.ErrCtrStateInvalid, "can only attach to created or running containers - currently in state %s", state.String()))
return
}
@@ -106,10 +111,7 @@ func AttachContainer(w http.ResponseWriter, r *http.Request) {
return
}
- // This header string sourced from Docker:
- // https://raw.githubusercontent.com/moby/moby/b95fad8e51bd064be4f4e58a996924f343846c85/api/server/router/container/container_routes.go
- // Using literally to ensure compatability with existing clients.
- fmt.Fprintf(connection, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n")
+ fmt.Fprintf(connection, AttachHeader)
logrus.Debugf("Hijack for attach of container %s successful", ctr.ID())
@@ -124,38 +126,3 @@ func AttachContainer(w http.ResponseWriter, r *http.Request) {
logrus.Debugf("Attach for container %s completed successfully", ctr.ID())
}
-
-func ResizeContainer(w http.ResponseWriter, r *http.Request) {
- runtime := r.Context().Value("runtime").(*libpod.Runtime)
- decoder := r.Context().Value("decoder").(*schema.Decoder)
-
- query := struct {
- Height uint16 `schema:"h"`
- Width uint16 `schema:"w"`
- }{}
- if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- // This is not a 400, despite the fact that is should be, for
- // compatibility reasons.
- utils.InternalServerError(w, errors.Wrapf(err, "error parsing query options"))
- return
- }
-
- name := utils.GetName(r)
- ctr, err := runtime.LookupContainer(name)
- if err != nil {
- utils.ContainerNotFound(w, name, err)
- return
- }
-
- newSize := remotecommand.TerminalSize{
- Width: query.Width,
- Height: query.Height,
- }
- if err := ctr.AttachResize(newSize); err != nil {
- utils.InternalServerError(w, err)
- return
- }
- // This is not a 204, even though we write nothing, for compatibility
- // reasons.
- utils.WriteResponse(w, http.StatusOK, "")
-}
diff --git a/pkg/api/handlers/compat/containers_start.go b/pkg/api/handlers/compat/containers_start.go
index 67bd287ab..cdbc8ff76 100644
--- a/pkg/api/handlers/compat/containers_start.go
+++ b/pkg/api/handlers/compat/containers_start.go
@@ -3,11 +3,12 @@ package compat
import (
"net/http"
+ "github.com/sirupsen/logrus"
+
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/pkg/api/handlers/utils"
"github.com/gorilla/schema"
- "github.com/pkg/errors"
)
func StartContainer(w http.ResponseWriter, r *http.Request) {
@@ -23,8 +24,7 @@ func StartContainer(w http.ResponseWriter, r *http.Request) {
}
if len(query.DetachKeys) > 0 {
// TODO - start does not support adding detach keys
- utils.BadRequest(w, "detachKeys", query.DetachKeys, errors.New("the detachKeys parameter is not supported yet"))
- return
+ logrus.Info("the detach keys parameter is not supported on start container")
}
runtime := r.Context().Value("runtime").(*libpod.Runtime)
name := utils.GetName(r)
@@ -33,7 +33,6 @@ func StartContainer(w http.ResponseWriter, r *http.Request) {
utils.ContainerNotFound(w, name, err)
return
}
-
state, err := con.State()
if err != nil {
utils.InternalServerError(w, err)
@@ -43,7 +42,7 @@ func StartContainer(w http.ResponseWriter, r *http.Request) {
utils.WriteResponse(w, http.StatusNotModified, "")
return
}
- if err := con.Start(r.Context(), false); err != nil {
+ if err := con.Start(r.Context(), len(con.PodID()) > 0); err != nil {
utils.InternalServerError(w, err)
return
}
diff --git a/pkg/api/handlers/compat/container_start.go b/pkg/api/handlers/compat/containers_stop.go
index d26ef2c82..d26ef2c82 100644
--- a/pkg/api/handlers/compat/container_start.go
+++ b/pkg/api/handlers/compat/containers_stop.go
diff --git a/pkg/api/handlers/compat/exec.go b/pkg/api/handlers/compat/exec.go
index ec1a8ac96..6865a3319 100644
--- a/pkg/api/handlers/compat/exec.go
+++ b/pkg/api/handlers/compat/exec.go
@@ -104,4 +104,76 @@ func ExecInspectHandler(w http.ResponseWriter, r *http.Request) {
}
utils.WriteResponse(w, http.StatusOK, inspectOut)
+
+ // Only for the Compat API: we want to remove sessions that were
+ // stopped. This is very hacky, but should suffice for now.
+ if !utils.IsLibpodRequest(r) && inspectOut.CanRemove {
+ logrus.Infof("Pruning stale exec session %s from container %s", sessionID, sessionCtr.ID())
+ if err := sessionCtr.ExecRemove(sessionID, false); err != nil && errors.Cause(err) != define.ErrNoSuchExecSession {
+ logrus.Errorf("Error removing stale exec session %s from container %s: %v", sessionID, sessionCtr.ID(), err)
+ }
+ }
+}
+
+// ExecStartHandler runs a given exec session.
+func ExecStartHandler(w http.ResponseWriter, r *http.Request) {
+ runtime := r.Context().Value("runtime").(*libpod.Runtime)
+
+ sessionID := mux.Vars(r)["id"]
+
+ // TODO: We should read/support Tty and Detach from here.
+ bodyParams := new(handlers.ExecStartConfig)
+
+ if err := json.NewDecoder(r.Body).Decode(&bodyParams); err != nil {
+ utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest,
+ errors.Wrapf(err, "failed to decode parameters for %s", r.URL.String()))
+ return
+ }
+ if bodyParams.Detach {
+ utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest,
+ errors.Errorf("Detached exec is not yet supported"))
+ return
+ }
+ // TODO: Verify TTY setting against what inspect session was made with
+
+ sessionCtr, err := runtime.GetExecSessionContainer(sessionID)
+ if err != nil {
+ utils.Error(w, fmt.Sprintf("No such exec session: %s", sessionID), http.StatusNotFound, err)
+ return
+ }
+
+ logrus.Debugf("Starting exec session %s of container %s", sessionID, sessionCtr.ID())
+
+ state, err := sessionCtr.State()
+ if err != nil {
+ utils.InternalServerError(w, err)
+ return
+ }
+ if state != define.ContainerStateRunning {
+ utils.Error(w, http.StatusText(http.StatusConflict), http.StatusConflict, errors.Errorf("cannot exec in a container that is not running; container %s is %s", sessionCtr.ID(), state.String()))
+ return
+ }
+
+ // Hijack the connection
+ hijacker, ok := w.(http.Hijacker)
+ if !ok {
+ utils.InternalServerError(w, errors.Errorf("unable to hijack connection"))
+ return
+ }
+
+ connection, buffer, err := hijacker.Hijack()
+ if err != nil {
+ utils.InternalServerError(w, errors.Wrapf(err, "error hijacking connection"))
+ return
+ }
+
+ fmt.Fprintf(connection, AttachHeader)
+
+ logrus.Debugf("Hijack for attach of container %s exec session %s successful", sessionCtr.ID(), sessionID)
+
+ if err := sessionCtr.ExecHTTPStartAndAttach(sessionID, connection, buffer, nil, nil, nil); err != nil {
+ logrus.Errorf("Error attaching to container %s exec session %s: %v", sessionCtr.ID(), sessionID, err)
+ }
+
+ logrus.Debugf("Attach for container %s exec session %s completed successfully", sessionCtr.ID(), sessionID)
}
diff --git a/pkg/api/handlers/compat/ping.go b/pkg/api/handlers/compat/ping.go
index 6e77e270f..abee3d8e8 100644
--- a/pkg/api/handlers/compat/ping.go
+++ b/pkg/api/handlers/compat/ping.go
@@ -5,22 +5,22 @@ import (
"net/http"
"github.com/containers/buildah"
- "github.com/containers/libpod/pkg/api/handlers"
+ "github.com/containers/libpod/pkg/api/handlers/utils"
)
// Ping returns headers to client about the service
//
// This handler must always be the same for the compatibility and libpod URL trees!
// Clients will use the Header availability to test which backend engine is in use.
+// Note: Additionally handler supports GET and HEAD methods
func Ping(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("API-Version", handlers.DefaultApiVersion)
+ w.Header().Set("API-Version", utils.ApiVersion[utils.CompatTree][utils.CurrentApiVersion].String())
w.Header().Set("BuildKit-Version", "")
w.Header().Set("Docker-Experimental", "true")
w.Header().Set("Cache-Control", "no-cache")
w.Header().Set("Pragma", "no-cache")
- // API-Version and Libpod-API-Version may not always be equal
- w.Header().Set("Libpod-API-Version", handlers.DefaultApiVersion)
+ w.Header().Set("Libpod-API-Version", utils.ApiVersion[utils.LibpodTree][utils.CurrentApiVersion].String())
w.Header().Set("Libpod-Buildha-Version", buildah.Version)
w.WriteHeader(http.StatusOK)
diff --git a/pkg/api/handlers/compat/resize.go b/pkg/api/handlers/compat/resize.go
new file mode 100644
index 000000000..3ead733bc
--- /dev/null
+++ b/pkg/api/handlers/compat/resize.go
@@ -0,0 +1,68 @@
+package compat
+
+import (
+ "net/http"
+ "strings"
+
+ "github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/pkg/api/handlers/utils"
+ "github.com/gorilla/schema"
+ "github.com/pkg/errors"
+ "k8s.io/client-go/tools/remotecommand"
+)
+
+func ResizeTTY(w http.ResponseWriter, r *http.Request) {
+ runtime := r.Context().Value("runtime").(*libpod.Runtime)
+ decoder := r.Context().Value("decoder").(*schema.Decoder)
+
+ // /containers/{id}/resize
+ query := struct {
+ height uint16 `schema:"h"`
+ width uint16 `schema:"w"`
+ }{
+ // override any golang type defaults
+ }
+
+ if err := decoder.Decode(&query, r.URL.Query()); err != nil {
+ utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest,
+ errors.Wrapf(err, "Failed to parse parameters for %s", r.URL.String()))
+ return
+ }
+
+ sz := remotecommand.TerminalSize{
+ Width: query.width,
+ Height: query.height,
+ }
+
+ var status int
+ name := utils.GetName(r)
+ switch {
+ case strings.Contains(r.URL.Path, "/containers/"):
+ ctnr, err := runtime.LookupContainer(name)
+ if err != nil {
+ utils.ContainerNotFound(w, name, err)
+ return
+ }
+ if err := ctnr.AttachResize(sz); err != nil {
+ utils.InternalServerError(w, errors.Wrapf(err, "cannot resize container"))
+ return
+ }
+ // This is not a 204, even though we write nothing, for compatibility
+ // reasons.
+ status = http.StatusOK
+ case strings.Contains(r.URL.Path, "/exec/"):
+ ctnr, err := runtime.GetExecSessionContainer(name)
+ if err != nil {
+ utils.SessionNotFound(w, name, err)
+ return
+ }
+ if err := ctnr.ExecResize(name, sz); err != nil {
+ utils.InternalServerError(w, errors.Wrapf(err, "cannot resize session"))
+ return
+ }
+ // This is not a 204, even though we write nothing, for compatibility
+ // reasons.
+ status = http.StatusCreated
+ }
+ utils.WriteResponse(w, status, "")
+}
diff --git a/pkg/api/handlers/compat/version.go b/pkg/api/handlers/compat/version.go
index 8786f1d5b..bfc226bb8 100644
--- a/pkg/api/handlers/compat/version.go
+++ b/pkg/api/handlers/compat/version.go
@@ -8,7 +8,6 @@ import (
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/libpod/define"
- "github.com/containers/libpod/pkg/api/handlers"
"github.com/containers/libpod/pkg/api/handlers/utils"
"github.com/containers/libpod/pkg/domain/entities"
docker "github.com/docker/docker/api/types"
@@ -35,34 +34,35 @@ func VersionHandler(w http.ResponseWriter, r *http.Request) {
Name: "Podman Engine",
Version: versionInfo.Version,
Details: map[string]string{
- "APIVersion": handlers.DefaultApiVersion,
+ "APIVersion": utils.ApiVersion[utils.LibpodTree][utils.CurrentApiVersion].String(),
"Arch": goRuntime.GOARCH,
"BuildTime": time.Unix(versionInfo.Built, 0).Format(time.RFC3339),
"Experimental": "true",
"GitCommit": versionInfo.GitCommit,
"GoVersion": versionInfo.GoVersion,
"KernelVersion": infoData.Host.Kernel,
- "MinAPIVersion": handlers.MinimalApiVersion,
+ "MinAPIVersion": utils.ApiVersion[utils.LibpodTree][utils.MinimalApiVersion].String(),
"Os": goRuntime.GOOS,
},
}}
- utils.WriteResponse(w, http.StatusOK, entities.ComponentVersion{Version: docker.Version{
- Platform: struct {
- Name string
- }{
- Name: fmt.Sprintf("%s/%s/%s-%s", goRuntime.GOOS, goRuntime.GOARCH, infoData.Host.Distribution.Distribution, infoData.Host.Distribution.Version),
- },
- APIVersion: components[0].Details["APIVersion"],
- Arch: components[0].Details["Arch"],
- BuildTime: components[0].Details["BuildTime"],
- Components: components,
- Experimental: true,
- GitCommit: components[0].Details["GitCommit"],
- GoVersion: components[0].Details["GoVersion"],
- KernelVersion: components[0].Details["KernelVersion"],
- MinAPIVersion: components[0].Details["MinAPIVersion"],
- Os: components[0].Details["Os"],
- Version: components[0].Version,
- }})
+ utils.WriteResponse(w, http.StatusOK, entities.ComponentVersion{
+ Version: docker.Version{
+ Platform: struct {
+ Name string
+ }{
+ Name: fmt.Sprintf("%s/%s/%s-%s", goRuntime.GOOS, goRuntime.GOARCH, infoData.Host.Distribution.Distribution, infoData.Host.Distribution.Version),
+ },
+ APIVersion: components[0].Details["APIVersion"],
+ Arch: components[0].Details["Arch"],
+ BuildTime: components[0].Details["BuildTime"],
+ Components: components,
+ Experimental: true,
+ GitCommit: components[0].Details["GitCommit"],
+ GoVersion: components[0].Details["GoVersion"],
+ KernelVersion: components[0].Details["KernelVersion"],
+ MinAPIVersion: components[0].Details["MinAPIVersion"],
+ Os: components[0].Details["Os"],
+ Version: components[0].Version,
+ }})
}
diff --git a/pkg/api/handlers/handler.go b/pkg/api/handlers/handler.go
deleted file mode 100644
index 2dd2c886b..000000000
--- a/pkg/api/handlers/handler.go
+++ /dev/null
@@ -1,6 +0,0 @@
-package handlers
-
-const (
- DefaultApiVersion = "1.40" // See https://docs.docker.com/engine/api/v1.40/
- MinimalApiVersion = "1.24"
-)
diff --git a/pkg/api/handlers/libpod/containers_create.go b/pkg/api/handlers/libpod/containers_create.go
index 40b6cacdb..71f440bce 100644
--- a/pkg/api/handlers/libpod/containers_create.go
+++ b/pkg/api/handlers/libpod/containers_create.go
@@ -5,10 +5,9 @@ import (
"encoding/json"
"net/http"
- "github.com/containers/libpod/pkg/domain/entities"
-
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/pkg/api/handlers/utils"
+ "github.com/containers/libpod/pkg/domain/entities"
"github.com/containers/libpod/pkg/specgen"
"github.com/containers/libpod/pkg/specgen/generate"
"github.com/pkg/errors"
diff --git a/pkg/api/handlers/libpod/healthcheck.go b/pkg/api/handlers/libpod/healthcheck.go
index 6eb2ab0e3..0ca3574b7 100644
--- a/pkg/api/handlers/libpod/healthcheck.go
+++ b/pkg/api/handlers/libpod/healthcheck.go
@@ -4,6 +4,7 @@ import (
"net/http"
"github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/pkg/api/handlers/utils"
)
@@ -12,32 +13,27 @@ func RunHealthCheck(w http.ResponseWriter, r *http.Request) {
name := utils.GetName(r)
status, err := runtime.HealthCheck(name)
if err != nil {
- if status == libpod.HealthCheckContainerNotFound {
+ if status == define.HealthCheckContainerNotFound {
utils.ContainerNotFound(w, name, err)
return
}
- if status == libpod.HealthCheckNotDefined {
+ if status == define.HealthCheckNotDefined {
utils.Error(w, "no healthcheck defined", http.StatusConflict, err)
return
}
- if status == libpod.HealthCheckContainerStopped {
+ if status == define.HealthCheckContainerStopped {
utils.Error(w, "container not running", http.StatusConflict, err)
return
}
utils.InternalServerError(w, err)
return
}
- ctr, err := runtime.LookupContainer(name)
- if err != nil {
- utils.InternalServerError(w, err)
- return
+ hcStatus := define.HealthCheckUnhealthy
+ if status == define.HealthCheckSuccess {
+ hcStatus = define.HealthCheckHealthy
}
-
- hcLog, err := ctr.GetHealthCheckLog()
- if err != nil {
- utils.InternalServerError(w, err)
- return
+ report := define.HealthCheckResults{
+ Status: hcStatus,
}
-
- utils.WriteResponse(w, http.StatusOK, hcLog)
+ utils.WriteResponse(w, http.StatusOK, report)
}
diff --git a/pkg/api/handlers/libpod/images.go b/pkg/api/handlers/libpod/images.go
index 93b4564a1..1cbcfb52c 100644
--- a/pkg/api/handlers/libpod/images.go
+++ b/pkg/api/handlers/libpod/images.go
@@ -56,13 +56,6 @@ func ImageExists(w http.ResponseWriter, r *http.Request) {
func ImageTree(w http.ResponseWriter, r *http.Request) {
runtime := r.Context().Value("runtime").(*libpod.Runtime)
name := utils.GetName(r)
-
- img, err := runtime.ImageRuntime().NewFromLocal(name)
- if err != nil {
- utils.Error(w, "Something went wrong.", http.StatusNotFound, errors.Wrapf(err, "Failed to find image %s", name))
- return
- }
-
decoder := r.Context().Value("decoder").(*schema.Decoder)
query := struct {
WhatRequires bool `schema:"whatrequires"`
@@ -74,14 +67,18 @@ func ImageTree(w http.ResponseWriter, r *http.Request) {
errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
return
}
-
- tree, err := img.GenerateTree(query.WhatRequires)
+ ir := abi.ImageEngine{Libpod: runtime}
+ options := entities.ImageTreeOptions{WhatRequires: query.WhatRequires}
+ report, err := ir.Tree(r.Context(), name, options)
if err != nil {
+ if errors.Cause(err) == define.ErrNoSuchImage {
+ utils.Error(w, "Something went wrong.", http.StatusNotFound, errors.Wrapf(err, "Failed to find image %s", name))
+ return
+ }
utils.Error(w, "Server error", http.StatusInternalServerError, errors.Wrapf(err, "failed to generate image tree for %s", name))
return
}
-
- utils.WriteResponse(w, http.StatusOK, tree)
+ utils.WriteResponse(w, http.StatusOK, report)
}
func GetImage(w http.ResponseWriter, r *http.Request) {
diff --git a/pkg/api/handlers/libpod/networks.go b/pkg/api/handlers/libpod/networks.go
index e8a92e93e..7de285e5e 100644
--- a/pkg/api/handlers/libpod/networks.go
+++ b/pkg/api/handlers/libpod/networks.go
@@ -1,39 +1,59 @@
package libpod
import (
+ "encoding/json"
"net/http"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/pkg/api/handlers/utils"
+ "github.com/containers/libpod/pkg/domain/entities"
+ "github.com/containers/libpod/pkg/domain/infra/abi"
"github.com/containers/libpod/pkg/network"
"github.com/gorilla/schema"
"github.com/pkg/errors"
)
-func CreateNetwork(w http.ResponseWriter, r *http.Request) {}
-func ListNetworks(w http.ResponseWriter, r *http.Request) {
+func CreateNetwork(w http.ResponseWriter, r *http.Request) {
runtime := r.Context().Value("runtime").(*libpod.Runtime)
- config, err := runtime.GetConfig()
+ decoder := r.Context().Value("decoder").(*schema.Decoder)
+ options := entities.NetworkCreateOptions{}
+ if err := json.NewDecoder(r.Body).Decode(&options); err != nil {
+ utils.Error(w, "unable to marshall input", http.StatusInternalServerError, errors.Wrap(err, "Decode()"))
+ return
+ }
+ query := struct {
+ Name string `schema:"name"`
+ }{
+ // override any golang type defaults
+ }
+ if err := decoder.Decode(&query, r.URL.Query()); err != nil {
+ utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest,
+ errors.Wrapf(err, "Failed to parse parameters for %s", r.URL.String()))
+ return
+ }
+ ic := abi.ContainerEngine{Libpod: runtime}
+ report, err := ic.NetworkCreate(r.Context(), query.Name, options)
if err != nil {
utils.InternalServerError(w, err)
return
}
- configDir := config.Network.NetworkConfigDir
- if len(configDir) < 1 {
- configDir = network.CNIConfigDir
- }
- networks, err := network.LoadCNIConfsFromDir(configDir)
+ utils.WriteResponse(w, http.StatusOK, report)
+
+}
+func ListNetworks(w http.ResponseWriter, r *http.Request) {
+ runtime := r.Context().Value("runtime").(*libpod.Runtime)
+ options := entities.NetworkListOptions{}
+ ic := abi.ContainerEngine{Libpod: runtime}
+ reports, err := ic.NetworkList(r.Context(), options)
if err != nil {
utils.InternalServerError(w, err)
return
}
- utils.WriteResponse(w, http.StatusOK, networks)
+ utils.WriteResponse(w, http.StatusOK, reports)
}
func RemoveNetwork(w http.ResponseWriter, r *http.Request) {
- // 200 ok
- // 404 no such
- // 500 internal
+ runtime := r.Context().Value("runtime").(*libpod.Runtime)
decoder := r.Context().Value("decoder").(*schema.Decoder)
query := struct {
Force bool `schema:"force"`
@@ -46,22 +66,30 @@ func RemoveNetwork(w http.ResponseWriter, r *http.Request) {
return
}
name := utils.GetName(r)
- if err := network.RemoveNetwork(name); err != nil {
+
+ options := entities.NetworkRmOptions{
+ Force: query.Force,
+ }
+ ic := abi.ContainerEngine{Libpod: runtime}
+ reports, err := ic.NetworkRm(r.Context(), []string{name}, options)
+ if err != nil {
+ utils.InternalServerError(w, err)
+ return
+ }
+ if reports[0].Err != nil {
// If the network cannot be found, we return a 404.
if errors.Cause(err) == network.ErrNetworkNotFound {
utils.Error(w, "Something went wrong", http.StatusNotFound, err)
return
}
- utils.InternalServerError(w, err)
- return
}
- utils.WriteResponse(w, http.StatusOK, "")
+ utils.WriteResponse(w, http.StatusOK, reports)
}
func InspectNetwork(w http.ResponseWriter, r *http.Request) {
+ runtime := r.Context().Value("runtime").(*libpod.Runtime)
decoder := r.Context().Value("decoder").(*schema.Decoder)
query := struct {
- Force bool `schema:"force"`
}{
// override any golang type defaults
}
@@ -71,7 +99,9 @@ func InspectNetwork(w http.ResponseWriter, r *http.Request) {
return
}
name := utils.GetName(r)
- n, err := network.InspectNetwork(name)
+ options := entities.NetworkInspectOptions{}
+ ic := abi.ContainerEngine{Libpod: runtime}
+ reports, err := ic.NetworkInspect(r.Context(), []string{name}, options)
if err != nil {
// If the network cannot be found, we return a 404.
if errors.Cause(err) == network.ErrNetworkNotFound {
@@ -81,5 +111,5 @@ func InspectNetwork(w http.ResponseWriter, r *http.Request) {
utils.InternalServerError(w, err)
return
}
- utils.WriteResponse(w, http.StatusOK, n)
+ utils.WriteResponse(w, http.StatusOK, reports)
}
diff --git a/pkg/api/handlers/libpod/swagger.go b/pkg/api/handlers/libpod/swagger.go
index 46426eb6b..057fbfb41 100644
--- a/pkg/api/handlers/libpod/swagger.go
+++ b/pkg/api/handlers/libpod/swagger.go
@@ -91,6 +91,34 @@ type swagInfoResponse struct {
Body define.Info
}
+// Network rm
+// swagger:response NetworkRmReport
+type swagNetworkRmReport struct {
+ // in:body
+ Body entities.NetworkRmReport
+}
+
+// Network inspect
+// swagger:response NetworkInspectReport
+type swagNetworkInspectReport struct {
+ // in:body
+ Body []entities.NetworkInspectReport
+}
+
+// Network list
+// swagger:response NetworkListReport
+type swagNetworkListReport struct {
+ // in:body
+ Body []entities.NetworkListReport
+}
+
+// Network create
+// swagger:response NetworkCreateReport
+type swagNetworkCreateReport struct {
+ // in:body
+ Body entities.NetworkCreateReport
+}
+
func ServeSwagger(w http.ResponseWriter, r *http.Request) {
path := DefaultPodmanSwaggerSpec
if p, found := os.LookupEnv("PODMAN_SWAGGER_SPEC"); found {
diff --git a/pkg/api/handlers/libpod/system.go b/pkg/api/handlers/libpod/system.go
index 98e33bf10..81ed37b4a 100644
--- a/pkg/api/handlers/libpod/system.go
+++ b/pkg/api/handlers/libpod/system.go
@@ -7,6 +7,7 @@ import (
"github.com/containers/libpod/pkg/api/handlers/compat"
"github.com/containers/libpod/pkg/api/handlers/utils"
"github.com/containers/libpod/pkg/domain/entities"
+ "github.com/containers/libpod/pkg/domain/infra/abi"
"github.com/gorilla/schema"
"github.com/pkg/errors"
)
@@ -69,3 +70,25 @@ func SystemPrune(w http.ResponseWriter, r *http.Request) {
}
utils.WriteResponse(w, http.StatusOK, systemPruneReport)
}
+
+// SystemReset Resets podman storage back to default state
+func SystemReset(w http.ResponseWriter, r *http.Request) {
+ err := r.Context().Value("runtime").(*libpod.Runtime).Reset(r.Context())
+ if err != nil {
+ utils.InternalServerError(w, err)
+ return
+ }
+ utils.WriteResponse(w, http.StatusOK, nil)
+}
+
+func DiskUsage(w http.ResponseWriter, r *http.Request) {
+ // Options are only used by the CLI
+ options := entities.SystemDfOptions{}
+ runtime := r.Context().Value("runtime").(*libpod.Runtime)
+ ic := abi.ContainerEngine{Libpod: runtime}
+ response, err := ic.SystemDf(r.Context(), options)
+ if err != nil {
+ utils.InternalServerError(w, err)
+ }
+ utils.WriteResponse(w, http.StatusOK, response)
+}
diff --git a/pkg/api/handlers/types.go b/pkg/api/handlers/types.go
index 2075d29df..d8cdd9caf 100644
--- a/pkg/api/handlers/types.go
+++ b/pkg/api/handlers/types.go
@@ -170,6 +170,11 @@ type ExecCreateResponse struct {
docker.IDResponse
}
+type ExecStartConfig struct {
+ Detach bool `json:"Detach"`
+ Tty bool `json:"Tty"`
+}
+
func ImageToImageSummary(l *libpodImage.Image) (*entities.ImageSummary, error) {
containers, err := l.Containers()
if err != nil {
diff --git a/pkg/api/handlers/utils/errors.go b/pkg/api/handlers/utils/errors.go
index 3253a9be3..c17720694 100644
--- a/pkg/api/handlers/utils/errors.go
+++ b/pkg/api/handlers/utils/errors.go
@@ -63,6 +63,14 @@ func PodNotFound(w http.ResponseWriter, name string, err error) {
Error(w, msg, http.StatusNotFound, err)
}
+func SessionNotFound(w http.ResponseWriter, name string, err error) {
+ if errors.Cause(err) != define.ErrNoSuchExecSession {
+ InternalServerError(w, err)
+ }
+ msg := fmt.Sprintf("No such exec session: %s", name)
+ Error(w, msg, http.StatusNotFound, err)
+}
+
func ContainerNotRunning(w http.ResponseWriter, containerID string, err error) {
msg := fmt.Sprintf("Container %s is not running", containerID)
Error(w, msg, http.StatusConflict, err)
diff --git a/pkg/api/handlers/utils/handler.go b/pkg/api/handlers/utils/handler.go
index b5bd488fb..2f4a54b98 100644
--- a/pkg/api/handlers/utils/handler.go
+++ b/pkg/api/handlers/utils/handler.go
@@ -9,11 +9,55 @@ import (
"os"
"strings"
+ "github.com/blang/semver"
"github.com/gorilla/mux"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
+type (
+ // VersionTree determines which API endpoint tree for version
+ VersionTree int
+ // VersionLevel determines which API level, current or something from the past
+ VersionLevel int
+)
+
+const (
+ // LibpodTree supports Libpod endpoints
+ LibpodTree = VersionTree(iota)
+ // CompatTree supports Libpod endpoints
+ CompatTree
+
+ // CurrentApiVersion announces what is the current API level
+ CurrentApiVersion = VersionLevel(iota)
+ // MinimalApiVersion announces what is the oldest API level supported
+ MinimalApiVersion
+)
+
+var (
+ // See https://docs.docker.com/engine/api/v1.40/
+ // libpod compat handlers are expected to honor docker API versions
+
+ // ApiVersion provides the current and minimal API versions for compat and libpod endpoint trees
+ // Note: GET|HEAD /_ping is never versioned and provides the API-Version and Libpod-API-Version headers to allow
+ // clients to shop for the Version they wish to support
+ ApiVersion = map[VersionTree]map[VersionLevel]semver.Version{
+ LibpodTree: {
+ CurrentApiVersion: semver.MustParse("1.0.0"),
+ MinimalApiVersion: semver.MustParse("1.0.0"),
+ },
+ CompatTree: {
+ CurrentApiVersion: semver.MustParse("1.40.0"),
+ MinimalApiVersion: semver.MustParse("1.24.0"),
+ },
+ }
+
+ // ErrVersionNotGiven returned when version not given by client
+ ErrVersionNotGiven = errors.New("version not given in URL path")
+ // ErrVersionNotSupported returned when given version is too old
+ ErrVersionNotSupported = errors.New("given version is not supported")
+)
+
// IsLibpodRequest returns true if the request related to a libpod endpoint
// (e.g., /v2/libpod/...).
func IsLibpodRequest(r *http.Request) bool {
@@ -21,6 +65,48 @@ func IsLibpodRequest(r *http.Request) bool {
return len(split) >= 3 && split[2] == "libpod"
}
+// SupportedVersion validates that the version provided by client is included in the given condition
+// https://github.com/blang/semver#ranges provides the details for writing conditions
+// If a version is not given in URL path, ErrVersionNotGiven is returned
+func SupportedVersion(r *http.Request, condition string) (semver.Version, error) {
+ version := semver.Version{}
+ val, ok := mux.Vars(r)["version"]
+ if !ok {
+ return version, ErrVersionNotGiven
+ }
+ safeVal, err := url.PathUnescape(val)
+ if err != nil {
+ return version, errors.Wrapf(err, "unable to unescape given API version: %q", val)
+ }
+ version, err = semver.ParseTolerant(safeVal)
+ if err != nil {
+ return version, errors.Wrapf(err, "unable to parse given API version: %q from %q", safeVal, val)
+ }
+
+ inRange, err := semver.ParseRange(condition)
+ if err != nil {
+ return version, err
+ }
+
+ if inRange(version) {
+ return version, nil
+ }
+ return version, ErrVersionNotSupported
+}
+
+// SupportedVersionWithDefaults validates that the version provided by client valid is supported by server
+// minimal API version <= client path version <= maximum API version focused on the endpoint tree from URL
+func SupportedVersionWithDefaults(r *http.Request) (semver.Version, error) {
+ tree := CompatTree
+ if IsLibpodRequest(r) {
+ tree = LibpodTree
+ }
+
+ return SupportedVersion(r,
+ fmt.Sprintf(">=%s <=%s", ApiVersion[tree][MinimalApiVersion].String(),
+ ApiVersion[tree][CurrentApiVersion].String()))
+}
+
// WriteResponse encodes the given value as JSON or string and renders it for http client
func WriteResponse(w http.ResponseWriter, code int, value interface{}) {
// RFC2616 explicitly states that the following status codes "MUST NOT
diff --git a/pkg/api/handlers/utils/handler_test.go b/pkg/api/handlers/utils/handler_test.go
new file mode 100644
index 000000000..6009432b5
--- /dev/null
+++ b/pkg/api/handlers/utils/handler_test.go
@@ -0,0 +1,139 @@
+package utils
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+
+ "github.com/gorilla/mux"
+)
+
+func TestSupportedVersion(t *testing.T) {
+ req, err := http.NewRequest("GET",
+ fmt.Sprintf("/v%s/libpod/testing/versions", ApiVersion[LibpodTree][CurrentApiVersion]),
+ nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ req = mux.SetURLVars(req, map[string]string{"version": ApiVersion[LibpodTree][CurrentApiVersion].String()})
+
+ rr := httptest.NewRecorder()
+ handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ _, err := SupportedVersionWithDefaults(r)
+ switch {
+ case errors.Is(err, ErrVersionNotGiven): // for compat endpoints version optional
+ w.WriteHeader(http.StatusInternalServerError)
+ fmt.Fprint(w, err.Error())
+ case errors.Is(err, ErrVersionNotSupported): // version given but not supported
+ w.WriteHeader(http.StatusBadRequest)
+ fmt.Fprint(w, err.Error())
+ case err != nil:
+ w.WriteHeader(http.StatusInternalServerError)
+ fmt.Fprint(w, err.Error())
+ default: // all good
+ w.WriteHeader(http.StatusOK)
+ fmt.Fprint(w, "OK")
+ }
+ })
+ handler.ServeHTTP(rr, req)
+
+ if status := rr.Code; status != http.StatusOK {
+ t.Errorf("handler returned wrong status code: got %v want %v",
+ status, http.StatusOK)
+ }
+
+ // Check the response body is what we expect.
+ expected := `OK`
+ if rr.Body.String() != expected {
+ t.Errorf("handler returned unexpected body: got %q want %q",
+ rr.Body.String(), expected)
+ }
+}
+
+func TestUnsupportedVersion(t *testing.T) {
+ version := "999.999.999"
+ req, err := http.NewRequest("GET",
+ fmt.Sprintf("/v%s/libpod/testing/versions", version),
+ nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ req = mux.SetURLVars(req, map[string]string{"version": version})
+
+ rr := httptest.NewRecorder()
+ handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ _, err := SupportedVersionWithDefaults(r)
+ switch {
+ case errors.Is(err, ErrVersionNotGiven): // for compat endpoints version optional
+ w.WriteHeader(http.StatusInternalServerError)
+ fmt.Fprint(w, err.Error())
+ case errors.Is(err, ErrVersionNotSupported): // version given but not supported
+ w.WriteHeader(http.StatusBadRequest)
+ fmt.Fprint(w, err.Error())
+ case err != nil:
+ w.WriteHeader(http.StatusInternalServerError)
+ fmt.Fprint(w, err.Error())
+ default: // all good
+ w.WriteHeader(http.StatusOK)
+ fmt.Fprint(w, "OK")
+ }
+ })
+ handler.ServeHTTP(rr, req)
+
+ if status := rr.Code; status != http.StatusBadRequest {
+ t.Errorf("handler returned wrong status code: got %v want %v",
+ status, http.StatusBadRequest)
+ }
+
+ // Check the response body is what we expect.
+ expected := ErrVersionNotSupported.Error()
+ if rr.Body.String() != expected {
+ t.Errorf("handler returned unexpected body: got %q want %q",
+ rr.Body.String(), expected)
+ }
+}
+
+func TestEqualVersion(t *testing.T) {
+ version := "1.30.0"
+ req, err := http.NewRequest("GET",
+ fmt.Sprintf("/v%s/libpod/testing/versions", version),
+ nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ req = mux.SetURLVars(req, map[string]string{"version": version})
+
+ rr := httptest.NewRecorder()
+ handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ _, err := SupportedVersion(r, "=="+version)
+ switch {
+ case errors.Is(err, ErrVersionNotGiven): // for compat endpoints version optional
+ w.WriteHeader(http.StatusInternalServerError)
+ fmt.Fprint(w, err.Error())
+ case errors.Is(err, ErrVersionNotSupported): // version given but not supported
+ w.WriteHeader(http.StatusBadRequest)
+ fmt.Fprint(w, err.Error())
+ case err != nil:
+ w.WriteHeader(http.StatusInternalServerError)
+ fmt.Fprint(w, err.Error())
+ default: // all good
+ w.WriteHeader(http.StatusOK)
+ fmt.Fprint(w, "OK")
+ }
+ })
+ handler.ServeHTTP(rr, req)
+
+ if status := rr.Code; status != http.StatusOK {
+ t.Errorf("handler returned wrong status code: got %v want %v",
+ status, http.StatusOK)
+ }
+
+ // Check the response body is what we expect.
+ expected := http.StatusText(http.StatusOK)
+ if rr.Body.String() != expected {
+ t.Errorf("handler returned unexpected body: got %q want %q",
+ rr.Body.String(), expected)
+ }
+}
diff --git a/pkg/api/handlers/utils/images.go b/pkg/api/handlers/utils/images.go
index 1c67de9db..7fb31a177 100644
--- a/pkg/api/handlers/utils/images.go
+++ b/pkg/api/handlers/utils/images.go
@@ -62,7 +62,6 @@ func GetImages(w http.ResponseWriter, r *http.Request) ([]*image.Image, error) {
}{
// This is where you can override the golang default value for one of fields
}
- // TODO I think all is implemented with a filter?
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
return nil, err
@@ -71,6 +70,10 @@ func GetImages(w http.ResponseWriter, r *http.Request) ([]*image.Image, error) {
if _, found := r.URL.Query()["digests"]; found && query.Digests {
UnSupportedParameter("digests")
}
+ var (
+ images []*image.Image
+ err error
+ )
if len(query.Filters) > 0 {
for k, v := range query.Filters {
@@ -78,11 +81,33 @@ func GetImages(w http.ResponseWriter, r *http.Request) ([]*image.Image, error) {
filters = append(filters, fmt.Sprintf("%s=%s", k, val))
}
}
- return runtime.ImageRuntime().GetImagesWithFilters(filters)
+ images, err = runtime.ImageRuntime().GetImagesWithFilters(filters)
+ if err != nil {
+ return images, err
+ }
} else {
- return runtime.ImageRuntime().GetImages()
+ images, err = runtime.ImageRuntime().GetImages()
+ if err != nil {
+ return images, err
+ }
}
-
+ if query.All {
+ return images, nil
+ }
+ var returnImages []*image.Image
+ for _, img := range images {
+ if len(img.Names()) == 0 {
+ parent, err := img.IsParent(r.Context())
+ if err != nil {
+ return nil, err
+ }
+ if parent {
+ continue
+ }
+ }
+ returnImages = append(returnImages, img)
+ }
+ return returnImages, nil
}
func GetImage(r *http.Request, name string) (*image.Image, error) {
diff --git a/pkg/api/server/register_containers.go b/pkg/api/server/register_containers.go
index 378d1e06c..0d78e4cdb 100644
--- a/pkg/api/server/register_containers.go
+++ b/pkg/api/server/register_containers.go
@@ -584,9 +584,9 @@ func (s *APIServer) registerContainersHandlers(r *mux.Router) error {
// $ref: "#/responses/NoSuchContainer"
// 500:
// $ref: "#/responses/InternalError"
- r.HandleFunc(VersionedPath("/containers/{name}/resize"), s.APIHandler(compat.ResizeContainer)).Methods(http.MethodPost)
+ r.HandleFunc(VersionedPath("/containers/{name}/resize"), s.APIHandler(compat.ResizeTTY)).Methods(http.MethodPost)
// Added non version path to URI to support docker non versioned paths
- r.HandleFunc("/containers/{name}/resize", s.APIHandler(compat.ResizeContainer)).Methods(http.MethodPost)
+ r.HandleFunc("/containers/{name}/resize", s.APIHandler(compat.ResizeTTY)).Methods(http.MethodPost)
// swagger:operation GET /containers/{name}/export compat exportContainer
// ---
// tags:
@@ -1259,7 +1259,7 @@ func (s *APIServer) registerContainersHandlers(r *mux.Router) error {
// $ref: "#/responses/NoSuchContainer"
// 500:
// $ref: "#/responses/InternalError"
- r.HandleFunc(VersionedPath("/libpod/containers/{name}/resize"), s.APIHandler(compat.ResizeContainer)).Methods(http.MethodPost)
+ r.HandleFunc(VersionedPath("/libpod/containers/{name}/resize"), s.APIHandler(compat.ResizeTTY)).Methods(http.MethodPost)
// swagger:operation GET /libpod/containers/{name}/export libpod libpodExportContainer
// ---
// tags:
diff --git a/pkg/api/server/register_exec.go b/pkg/api/server/register_exec.go
index 71fb50307..1533edba9 100644
--- a/pkg/api/server/register_exec.go
+++ b/pkg/api/server/register_exec.go
@@ -97,10 +97,10 @@ func (s *APIServer) registerExecHandlers(r *mux.Router) error {
// properties:
// Detach:
// type: boolean
- // description: Detach from the command
+ // description: Detach from the command. Not presently supported.
// Tty:
// type: boolean
- // description: Allocate a pseudo-TTY
+ // description: Allocate a pseudo-TTY. Presently ignored.
// produces:
// - application/json
// responses:
@@ -109,12 +109,12 @@ func (s *APIServer) registerExecHandlers(r *mux.Router) error {
// 404:
// $ref: "#/responses/NoSuchExecInstance"
// 409:
- // description: container is stopped or paused
+ // description: container is not running
// 500:
// $ref: "#/responses/InternalError"
- r.Handle(VersionedPath("/exec/{id}/start"), s.APIHandler(compat.UnsupportedHandler)).Methods(http.MethodPost)
+ r.Handle(VersionedPath("/exec/{id}/start"), s.APIHandler(compat.ExecStartHandler)).Methods(http.MethodPost)
// Added non version path to URI to support docker non versioned paths
- r.Handle("/exec/{id}/start", s.APIHandler(compat.UnsupportedHandler)).Methods(http.MethodPost)
+ r.Handle("/exec/{id}/start", s.APIHandler(compat.ExecStartHandler)).Methods(http.MethodPost)
// swagger:operation POST /exec/{id}/resize compat resizeExec
// ---
// tags:
@@ -145,15 +145,15 @@ func (s *APIServer) registerExecHandlers(r *mux.Router) error {
// $ref: "#/responses/NoSuchExecInstance"
// 500:
// $ref: "#/responses/InternalError"
- r.Handle(VersionedPath("/exec/{id}/resize"), s.APIHandler(compat.UnsupportedHandler)).Methods(http.MethodPost)
+ r.Handle(VersionedPath("/exec/{id}/resize"), s.APIHandler(compat.ResizeTTY)).Methods(http.MethodPost)
// Added non version path to URI to support docker non versioned paths
- r.Handle("/exec/{id}/resize", s.APIHandler(compat.UnsupportedHandler)).Methods(http.MethodPost)
+ r.Handle("/exec/{id}/resize", s.APIHandler(compat.ResizeTTY)).Methods(http.MethodPost)
// swagger:operation GET /exec/{id}/json compat inspectExec
// ---
// tags:
// - exec (compat)
// summary: Inspect an exec instance
- // description: Return low-level information about an exec instance.
+ // description: Return low-level information about an exec instance. Stale (stopped) exec sessions will be auto-removed after inspect runs.
// parameters:
// - in: path
// name: id
@@ -264,10 +264,10 @@ func (s *APIServer) registerExecHandlers(r *mux.Router) error {
// properties:
// Detach:
// type: boolean
- // description: Detach from the command
+ // description: Detach from the command. Not presently supported.
// Tty:
// type: boolean
- // description: Allocate a pseudo-TTY
+ // description: Allocate a pseudo-TTY. Presently ignored.
// produces:
// - application/json
// responses:
@@ -276,10 +276,10 @@ func (s *APIServer) registerExecHandlers(r *mux.Router) error {
// 404:
// $ref: "#/responses/NoSuchExecInstance"
// 409:
- // description: container is stopped or paused
+ // description: container is not running.
// 500:
// $ref: "#/responses/InternalError"
- r.Handle(VersionedPath("/libpod/exec/{id}/start"), s.APIHandler(compat.UnsupportedHandler)).Methods(http.MethodPost)
+ r.Handle(VersionedPath("/libpod/exec/{id}/start"), s.APIHandler(compat.ExecStartHandler)).Methods(http.MethodPost)
// swagger:operation POST /libpod/exec/{id}/resize libpod libpodResizeExec
// ---
// tags:
diff --git a/pkg/api/server/register_images.go b/pkg/api/server/register_images.go
index 0e8d68b7e..01854b9c4 100644
--- a/pkg/api/server/register_images.go
+++ b/pkg/api/server/register_images.go
@@ -698,7 +698,7 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// responses:
// 200:
// $ref: '#/responses/LibpodImageTreeResponse'
- // 401:
+ // 404:
// $ref: '#/responses/NoSuchImage'
// 500:
// $ref: '#/responses/InternalError'
@@ -854,7 +854,7 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// 500:
// $ref: '#/responses/InternalError'
r.Handle(VersionedPath("/libpod/images/remove"), s.APIHandler(libpod.ImagesBatchRemove)).Methods(http.MethodDelete)
- // swagger:operation DELETE /libpod/images/{name:.*}/remove libpod libpodRemoveImage
+ // swagger:operation DELETE /libpod/images/{name:.*} libpod libpodRemoveImage
// ---
// tags:
// - images
@@ -883,7 +883,7 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// $ref: '#/responses/ConflictError'
// 500:
// $ref: '#/responses/InternalError'
- r.Handle(VersionedPath("/libpod/images/{name:.*}/remove"), s.APIHandler(libpod.ImagesRemove)).Methods(http.MethodDelete)
+ r.Handle(VersionedPath("/libpod/images/{name:.*}"), s.APIHandler(libpod.ImagesRemove)).Methods(http.MethodDelete)
// swagger:operation POST /libpod/images/pull libpod libpodImagesPull
// ---
// tags:
diff --git a/pkg/api/server/register_networks.go b/pkg/api/server/register_networks.go
new file mode 100644
index 000000000..b1189c1f4
--- /dev/null
+++ b/pkg/api/server/register_networks.go
@@ -0,0 +1,100 @@
+package server
+
+import (
+ "net/http"
+
+ "github.com/containers/libpod/pkg/api/handlers/libpod"
+ "github.com/gorilla/mux"
+)
+
+func (s *APIServer) registerNetworkHandlers(r *mux.Router) error {
+ // swagger:operation DELETE /libpod/networks/{name} libpod libpodRemoveNetwork
+ // ---
+ // tags:
+ // - networks
+ // summary: Remove a network
+ // description: Remove a CNI configured network
+ // parameters:
+ // - in: path
+ // name: name
+ // type: string
+ // required: true
+ // description: the name of the network
+ // - in: query
+ // name: Force
+ // type: boolean
+ // description: remove containers associated with network
+ // produces:
+ // - application/json
+ // responses:
+ // 200:
+ // $ref: "#/responses/NetworkRmReport"
+ // 404:
+ // $ref: "#/responses/NoSuchNetwork"
+ // 500:
+ // $ref: "#/responses/InternalError"
+ r.HandleFunc(VersionedPath("/libpod/networks/{name}"), s.APIHandler(libpod.RemoveNetwork)).Methods(http.MethodDelete)
+ // swagger:operation GET /libpod/networks/{name}/json libpod libpodInspectNetwork
+ // ---
+ // tags:
+ // - networks
+ // summary: Inspect a network
+ // description: Display low level configuration for a CNI network
+ // parameters:
+ // - in: path
+ // name: name
+ // type: string
+ // required: true
+ // description: the name of the network
+ // produces:
+ // - application/json
+ // responses:
+ // 200:
+ // $ref: "#/responses/NetworkInspectReport"
+ // 404:
+ // $ref: "#/responses/NoSuchNetwork"
+ // 500:
+ // $ref: "#/responses/InternalError"
+ r.HandleFunc(VersionedPath("/libpod/networks/{name}/json"), s.APIHandler(libpod.InspectNetwork)).Methods(http.MethodGet)
+ // swagger:operation GET /libpod/networks/json libpod libpodListNetwork
+ // ---
+ // tags:
+ // - networks
+ // summary: List networks
+ // description: Display summary of network configurations
+ // produces:
+ // - application/json
+ // responses:
+ // 200:
+ // $ref: "#/responses/NetworkListReport"
+ // 500:
+ // $ref: "#/responses/InternalError"
+ r.HandleFunc(VersionedPath("/libpod/networks/json"), s.APIHandler(libpod.ListNetworks)).Methods(http.MethodGet)
+ // swagger:operation POST /libpod/networks/create libpod libpodCreateNetwork
+ // ---
+ // tags:
+ // - networks
+ // summary: Create network
+ // description: Create a new CNI network configuration
+ // produces:
+ // - application/json
+ // parameters:
+ // - in: query
+ // name: name
+ // type: string
+ // description: optional name for new network
+ // - in: body
+ // name: create
+ // description: attributes for creating a container
+ // schema:
+ // $ref: "#/definitions/NetworkCreateOptions"
+ // responses:
+ // 200:
+ // $ref: "#/responses/NetworkCreateReport"
+ // 400:
+ // $ref: "#/responses/BadParamError"
+ // 500:
+ // $ref: "#/responses/InternalError"
+ r.HandleFunc(VersionedPath("/libpod/networks/create"), s.APIHandler(libpod.CreateNetwork)).Methods(http.MethodPost)
+ return nil
+}
diff --git a/pkg/api/server/register_system.go b/pkg/api/server/register_system.go
index 7375a75c1..8a942a888 100644
--- a/pkg/api/server/register_system.go
+++ b/pkg/api/server/register_system.go
@@ -12,7 +12,7 @@ func (s *APIServer) registerSystemHandlers(r *mux.Router) error {
r.Handle(VersionedPath("/system/df"), s.APIHandler(compat.GetDiskUsage)).Methods(http.MethodGet)
// Added non version path to URI to support docker non versioned paths
r.Handle("/system/df", s.APIHandler(compat.GetDiskUsage)).Methods(http.MethodGet)
- // Swagger:operation POST /libpod/system/prune libpod pruneSystem
+ // swagger:operation POST /libpod/system/prune libpod pruneSystem
// ---
// tags:
// - system
@@ -27,6 +27,33 @@ func (s *APIServer) registerSystemHandlers(r *mux.Router) error {
// 500:
// $ref: "#/responses/InternalError"
r.Handle(VersionedPath("/libpod/system/prune"), s.APIHandler(libpod.SystemPrune)).Methods(http.MethodPost)
-
+ // swagger:operation POST /libpod/system/reset libpod resetSystem
+ // ---
+ // tags:
+ // - system
+ // summary: Reset podman storage
+ // description: All containers will be stopped and removed, and all images, volumes and container content will be removed.
+ // produces:
+ // - application/json
+ // responses:
+ // 200:
+ // description: no error
+ // 500:
+ // $ref: "#/responses/InternalError"
+ r.Handle(VersionedPath("/libpod/system/reset"), s.APIHandler(libpod.SystemReset)).Methods(http.MethodPost)
+ // swagger:operation GET /libpod/system/df libpod df
+ // ---
+ // tags:
+ // - system
+ // summary: Show disk usage
+ // description: Return information about disk usage for containers, images, and volumes
+ // produces:
+ // - application/json
+ // responses:
+ // 200:
+ // $ref: '#/responses/SystemDiskUse'
+ // 500:
+ // $ref: "#/responses/InternalError"
+ r.Handle(VersionedPath("/libpod/system/df"), s.APIHandler(libpod.DiskUsage)).Methods(http.MethodGet)
return nil
}
diff --git a/pkg/api/server/server.go b/pkg/api/server/server.go
index a6c5d8e1e..d39528f45 100644
--- a/pkg/api/server/server.go
+++ b/pkg/api/server/server.go
@@ -104,6 +104,7 @@ func newServer(runtime *libpod.Runtime, duration time.Duration, listener *net.Li
server.registerInfoHandlers,
server.registerManifestHandlers,
server.registerMonitorHandlers,
+ server.registerNetworkHandlers,
server.registerPingHandlers,
server.registerPlayHandlers,
server.registerPluginsHandlers,
diff --git a/pkg/api/server/swagger.go b/pkg/api/server/swagger.go
index e47f2cc2f..ebd99ba27 100644
--- a/pkg/api/server/swagger.go
+++ b/pkg/api/server/swagger.go
@@ -24,6 +24,15 @@ type swagErrNoSuchContainer struct {
}
}
+// No such network
+// swagger:response NoSuchNetwork
+type swagErrNoSuchNetwork struct {
+ // in:body
+ Body struct {
+ entities.ErrorModel
+ }
+}
+
// No such exec instance
// swagger:response NoSuchExecInstance
type swagErrNoSuchExecInstance struct {
@@ -190,3 +199,21 @@ type swagVersion struct {
entities.SystemVersionReport
}
}
+
+// Disk usage
+// swagger:response SystemDiskUse
+type swagDiskUseResponse struct {
+ // in:body
+ Body struct {
+ entities.SystemDfReport
+ }
+}
+
+// Prune report
+// swagger:response SystemPruneReport
+type swagSystemPruneReport struct {
+ // in:body
+ Body struct {
+ entities.SystemPruneReport
+ }
+}
diff --git a/pkg/api/tags.yaml b/pkg/api/tags.yaml
index 5b5d9f5bb..1ffb5e940 100644
--- a/pkg/api/tags.yaml
+++ b/pkg/api/tags.yaml
@@ -5,9 +5,11 @@ tags:
description: Actions related to exec
- name: images
description: Actions related to images
- - name: pods
- description: Actions related to manifests
- name: manifests
+ description: Actions related to manifests
+ - name: networks
+ description: Actions related to networks
+ - name: pods
description: Actions related to pods
- name: volumes
description: Actions related to volumes
diff --git a/pkg/autoupdate/autoupdate.go b/pkg/autoupdate/autoupdate.go
index 78d5ac474..eca5c342c 100644
--- a/pkg/autoupdate/autoupdate.go
+++ b/pkg/autoupdate/autoupdate.go
@@ -23,6 +23,10 @@ import (
// container labels.
const Label = "io.containers.autoupdate"
+// Label denotes the container label key to specify authfile in
+// container labels.
+const AuthfileLabel = "io.containers.autoupdate.authfile"
+
// Policy represents an auto-update policy.
type Policy string
@@ -63,6 +67,12 @@ func LookupPolicy(s string) (Policy, error) {
return "", errors.Errorf("invalid auto-update policy %q: valid policies are %+q", s, keys)
}
+// Options include parameters for auto updates.
+type Options struct {
+ // Authfile to use when contacting registries.
+ Authfile string
+}
+
// ValidateImageReference checks if the specified imageName is a fully-qualified
// image reference to the docker transport (without digest). Such a reference
// includes a domain, name and tag (e.g., quay.io/podman/stable:latest). The
@@ -96,7 +106,7 @@ func ValidateImageReference(imageName string) error {
//
// It returns a slice of successfully restarted systemd units and a slice of
// errors encountered during auto update.
-func AutoUpdate(runtime *libpod.Runtime) ([]string, []error) {
+func AutoUpdate(runtime *libpod.Runtime, options Options) ([]string, []error) {
// Create a map from `image ID -> []*Container`.
containerMap, errs := imageContainersMap(runtime)
if len(containerMap) == 0 {
@@ -138,7 +148,12 @@ func AutoUpdate(runtime *libpod.Runtime) ([]string, []error) {
if rawImageName == "" {
errs = append(errs, errors.Errorf("error auto-updating container %q: raw-image name is empty", ctr.ID()))
}
- needsUpdate, err := newerImageAvailable(runtime, image, rawImageName)
+ labels := ctr.Labels()
+ authFilePath, exists := labels[AuthfileLabel]
+ if exists {
+ options.Authfile = authFilePath
+ }
+ needsUpdate, err := newerImageAvailable(runtime, image, rawImageName, options)
if err != nil {
errs = append(errs, errors.Wrapf(err, "error auto-updating container %q: image check for %q failed", ctr.ID(), rawImageName))
continue
@@ -148,7 +163,7 @@ func AutoUpdate(runtime *libpod.Runtime) ([]string, []error) {
}
logrus.Infof("Auto-updating container %q using image %q", ctr.ID(), rawImageName)
if _, updated := updatedRawImages[rawImageName]; !updated {
- _, err = updateImage(runtime, rawImageName)
+ _, err = updateImage(runtime, rawImageName, options)
if err != nil {
errs = append(errs, errors.Wrapf(err, "error auto-updating container %q: image update for %q failed", ctr.ID(), rawImageName))
continue
@@ -230,13 +245,15 @@ func imageContainersMap(runtime *libpod.Runtime) (map[string][]*libpod.Container
// newerImageAvailable returns true if there corresponding image on the remote
// registry is newer.
-func newerImageAvailable(runtime *libpod.Runtime, img *image.Image, origName string) (bool, error) {
+func newerImageAvailable(runtime *libpod.Runtime, img *image.Image, origName string, options Options) (bool, error) {
remoteRef, err := docker.ParseReference("//" + origName)
if err != nil {
return false, err
}
- remoteImg, err := remoteRef.NewImage(context.Background(), runtime.SystemContext())
+ sys := runtime.SystemContext()
+ sys.AuthFilePath = options.Authfile
+ remoteImg, err := remoteRef.NewImage(context.Background(), sys)
if err != nil {
return false, err
}
@@ -255,25 +272,22 @@ func newerImageAvailable(runtime *libpod.Runtime, img *image.Image, origName str
}
// updateImage pulls the specified image.
-func updateImage(runtime *libpod.Runtime, name string) (*image.Image, error) {
+func updateImage(runtime *libpod.Runtime, name string, options Options) (*image.Image, error) {
sys := runtime.SystemContext()
registryOpts := image.DockerRegistryOptions{}
signaturePolicyPath := ""
- authFilePath := ""
if sys != nil {
registryOpts.OSChoice = sys.OSChoice
registryOpts.ArchitectureChoice = sys.OSChoice
registryOpts.DockerCertPath = sys.DockerCertPath
-
signaturePolicyPath = sys.SignaturePolicyPath
- authFilePath = sys.AuthFilePath
}
newImage, err := runtime.ImageRuntime().New(context.Background(),
docker.Transport.Name()+"://"+name,
signaturePolicyPath,
- authFilePath,
+ options.Authfile,
os.Stderr,
&registryOpts,
image.SigningOptions{},
diff --git a/pkg/bindings/bindings.go b/pkg/bindings/bindings.go
index 4b07847d1..7e2a444bd 100644
--- a/pkg/bindings/bindings.go
+++ b/pkg/bindings/bindings.go
@@ -8,11 +8,20 @@
package bindings
+import (
+ "github.com/blang/semver"
+)
+
var (
// PTrue is a convenience variable that can be used in bindings where
// a pointer to a bool (optional parameter) is required.
- PTrue bool = true
+ pTrue = true
+ PTrue = &pTrue
// PFalse is a convenience variable that can be used in bindings where
// a pointer to a bool (optional parameter) is required.
- PFalse bool = false
+ pFalse = false
+ PFalse = &pFalse
+
+ // _*YES*- podman will fail to run if this value is wrong
+ APIVersion = semver.MustParse("1.0.0")
)
diff --git a/pkg/bindings/connection.go b/pkg/bindings/connection.go
index da3755fc8..d21d55beb 100644
--- a/pkg/bindings/connection.go
+++ b/pkg/bindings/connection.go
@@ -15,6 +15,7 @@ import (
"strings"
"time"
+ "github.com/blang/semver"
"github.com/containers/libpod/pkg/api/types"
jsoniter "github.com/json-iterator/go"
"github.com/pkg/errors"
@@ -39,6 +40,7 @@ type APIResponse struct {
type Connection struct {
_url *url.URL
client *http.Client
+ conn *net.Conn
}
type valueKey string
@@ -88,26 +90,26 @@ func NewConnection(ctx context.Context, uri string, identity ...string) (context
}
// Now we setup the http client to use the connection above
- var client *http.Client
+ var connection Connection
switch _url.Scheme {
case "ssh":
secure, err = strconv.ParseBool(_url.Query().Get("secure"))
if err != nil {
secure = false
}
- client, err = sshClient(_url, identity[0], secure)
+ connection, err = sshClient(_url, identity[0], secure)
case "unix":
if !strings.HasPrefix(uri, "unix:///") {
// autofix unix://path_element vs unix:///path_element
_url.Path = JoinURL(_url.Host, _url.Path)
_url.Host = ""
}
- client, err = unixClient(_url)
+ connection, err = unixClient(_url)
case "tcp":
if !strings.HasPrefix(uri, "tcp://") {
return nil, errors.New("tcp URIs should begin with tcp://")
}
- client, err = tcpClient(_url)
+ connection, err = tcpClient(_url)
default:
return nil, errors.Errorf("'%s' is not a supported schema", _url.Scheme)
}
@@ -115,26 +117,34 @@ func NewConnection(ctx context.Context, uri string, identity ...string) (context
return nil, errors.Wrapf(err, "Failed to create %sClient", _url.Scheme)
}
- ctx = context.WithValue(ctx, clientKey, &Connection{_url, client})
+ ctx = context.WithValue(ctx, clientKey, &connection)
if err := pingNewConnection(ctx); err != nil {
return nil, err
}
return ctx, nil
}
-func tcpClient(_url *url.URL) (*http.Client, error) {
- return &http.Client{
+func tcpClient(_url *url.URL) (Connection, error) {
+ connection := Connection{
+ _url: _url,
+ }
+ connection.client = &http.Client{
Transport: &http.Transport{
- DialContext: func(_ context.Context, _, _ string) (net.Conn, error) {
- return net.Dial("tcp", _url.Host)
+ DialContext: func(ctx context.Context, _, _ string) (net.Conn, error) {
+ conn, err := net.Dial("tcp", _url.Host)
+ if c, ok := ctx.Value(clientKey).(*Connection); ok {
+ c.conn = &conn
+ }
+ return conn, err
},
DisableCompression: true,
},
- }, nil
+ }
+ return connection, nil
}
// pingNewConnection pings to make sure the RESTFUL service is up
-// and running. it should only be used where initializing a connection
+// and running. it should only be used when initializing a connection
func pingNewConnection(ctx context.Context) error {
client, err := GetClient(ctx)
if err != nil {
@@ -145,16 +155,28 @@ func pingNewConnection(ctx context.Context) error {
if err != nil {
return err
}
+
if response.StatusCode == http.StatusOK {
- return nil
+ v, err := semver.ParseTolerant(response.Header.Get("Libpod-API-Version"))
+ if err != nil {
+ return err
+ }
+
+ switch APIVersion.Compare(v) {
+ case 1, 0:
+ // Server's job when client version is equal or older
+ return nil
+ case -1:
+ return errors.Errorf("server API version is too old. client %q server %q", APIVersion.String(), v.String())
+ }
}
return errors.Errorf("ping response was %q", response.StatusCode)
}
-func sshClient(_url *url.URL, identity string, secure bool) (*http.Client, error) {
+func sshClient(_url *url.URL, identity string, secure bool) (Connection, error) {
auth, err := publicKey(identity)
if err != nil {
- return nil, errors.Wrapf(err, "Failed to parse identity %s: %v\n", _url.String(), identity)
+ return Connection{}, errors.Wrapf(err, "Failed to parse identity %s: %v\n", _url.String(), identity)
}
callback := ssh.InsecureIgnoreHostKey()
@@ -188,26 +210,39 @@ func sshClient(_url *url.URL, identity string, secure bool) (*http.Client, error
},
)
if err != nil {
- return nil, errors.Wrapf(err, "Connection to bastion host (%s) failed.", _url.String())
+ return Connection{}, errors.Wrapf(err, "Connection to bastion host (%s) failed.", _url.String())
}
- return &http.Client{
+
+ connection := Connection{_url: _url}
+ connection.client = &http.Client{
Transport: &http.Transport{
- DialContext: func(_ context.Context, _, _ string) (net.Conn, error) {
- return bastion.Dial("unix", _url.Path)
+ DialContext: func(ctx context.Context, _, _ string) (net.Conn, error) {
+ conn, err := bastion.Dial("unix", _url.Path)
+ if c, ok := ctx.Value(clientKey).(*Connection); ok {
+ c.conn = &conn
+ }
+ return conn, err
},
- }}, nil
+ }}
+ return connection, nil
}
-func unixClient(_url *url.URL) (*http.Client, error) {
- return &http.Client{
+func unixClient(_url *url.URL) (Connection, error) {
+ connection := Connection{_url: _url}
+ connection.client = &http.Client{
Transport: &http.Transport{
DialContext: func(ctx context.Context, _, _ string) (net.Conn, error) {
d := net.Dialer{}
- return d.DialContext(ctx, "unix", _url.Path)
+ conn, err := d.DialContext(ctx, "unix", _url.Path)
+ if c, ok := ctx.Value(clientKey).(*Connection); ok {
+ c.conn = &conn
+ }
+ return conn, err
},
DisableCompression: true,
},
- }, nil
+ }
+ return connection, nil
}
// DoRequest assembles the http request and returns the response
@@ -232,6 +267,7 @@ func (c *Connection) DoRequest(httpBody io.Reader, httpMethod, endpoint string,
if len(queryParams) > 0 {
req.URL.RawQuery = queryParams.Encode()
}
+ req = req.WithContext(context.WithValue(context.Background(), clientKey, c))
// Give the Do three chances in the case of a comm/service hiccup
for i := 0; i < 3; i++ {
response, err = c.client.Do(req) // nolint
@@ -243,6 +279,10 @@ func (c *Connection) DoRequest(httpBody io.Reader, httpMethod, endpoint string,
return &APIResponse{response, req}, err
}
+func (c *Connection) Write(b []byte) (int, error) {
+ return (*c.conn).Write(b)
+}
+
// FiltersToString converts our typical filter format of a
// map[string][]string to a query/html safe string.
func FiltersToString(filters map[string][]string) (string, error) {
@@ -295,8 +335,8 @@ func publicKey(path string) (ssh.AuthMethod, error) {
func hostKey(host string) ssh.PublicKey {
// parse OpenSSH known_hosts file
// ssh or use ssh-keyscan to get initial key
- known_hosts := filepath.Join(homedir.HomeDir(), ".ssh", "known_hosts")
- fd, err := os.Open(known_hosts)
+ knownHosts := filepath.Join(homedir.HomeDir(), ".ssh", "known_hosts")
+ fd, err := os.Open(knownHosts)
if err != nil {
logrus.Error(err)
return nil
diff --git a/pkg/bindings/containers/containers.go b/pkg/bindings/containers/containers.go
index e74a256c7..1ed4919e0 100644
--- a/pkg/bindings/containers/containers.go
+++ b/pkg/bindings/containers/containers.go
@@ -2,9 +2,13 @@ package containers
import (
"context"
+ "encoding/binary"
+ "fmt"
"io"
"net/http"
"net/url"
+ "os"
+ "os/signal"
"strconv"
"strings"
@@ -12,7 +16,14 @@ import (
"github.com/containers/libpod/pkg/api/handlers"
"github.com/containers/libpod/pkg/bindings"
"github.com/containers/libpod/pkg/domain/entities"
+ sig "github.com/containers/libpod/pkg/signal"
"github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/crypto/ssh/terminal"
+)
+
+var (
+ ErrLostSync = errors.New("lost synchronization with attach multiplexed result")
)
// List obtains a list of containers in local storage. All parameters to this method are optional.
@@ -247,7 +258,7 @@ func Unpause(ctx context.Context, nameOrID string) error {
// Wait blocks until the given container reaches a condition. If not provided, the condition will
// default to stopped. If the condition is stopped, an exit code for the container will be provided. The
// nameOrID can be a container name or a partial/full ID.
-func Wait(ctx context.Context, nameOrID string, condition *define.ContainerStatus) (int32, error) { //nolint
+func Wait(ctx context.Context, nameOrID string, condition *define.ContainerStatus) (int32, error) { // nolint
var exitCode int32
conn, err := bindings.GetClient(ctx)
if err != nil {
@@ -333,3 +344,233 @@ func ContainerInit(ctx context.Context, nameOrID string) error {
}
return response.Process(nil)
}
+
+// Attach attaches to a running container
+func Attach(ctx context.Context, nameOrId string, detachKeys *string, logs, stream *bool, stdin io.Reader, stdout io.Writer, stderr io.Writer, attachReady chan bool) error {
+ conn, err := bindings.GetClient(ctx)
+ if err != nil {
+ return err
+ }
+
+ // Do we need to wire in stdin?
+ ctnr, err := Inspect(ctx, nameOrId, bindings.PFalse)
+ if err != nil {
+ return err
+ }
+
+ params := url.Values{}
+ if detachKeys != nil {
+ params.Add("detachKeys", *detachKeys)
+ }
+ if logs != nil {
+ params.Add("logs", fmt.Sprintf("%t", *logs))
+ }
+ if stream != nil {
+ params.Add("stream", fmt.Sprintf("%t", *stream))
+ }
+ if stdin != nil {
+ params.Add("stdin", "true")
+ }
+ if stdout != nil {
+ params.Add("stdout", "true")
+ }
+ if stderr != nil {
+ params.Add("stderr", "true")
+ }
+
+ // Unless all requirements are met, don't use "stdin" is a terminal
+ file, ok := stdin.(*os.File)
+ needTTY := ok && terminal.IsTerminal(int(file.Fd())) && ctnr.Config.Tty
+ if needTTY {
+ state, err := terminal.MakeRaw(int(file.Fd()))
+ if err != nil {
+ return err
+ }
+
+ logrus.SetFormatter(&rawFormatter{})
+
+ defer func() {
+ if err := terminal.Restore(int(file.Fd()), state); err != nil {
+ logrus.Errorf("unable to restore terminal: %q", err)
+ }
+ logrus.SetFormatter(&logrus.TextFormatter{})
+ }()
+
+ winChange := make(chan os.Signal, 1)
+ signal.Notify(winChange, sig.SIGWINCH)
+ winCtx, winCancel := context.WithCancel(ctx)
+ defer winCancel()
+
+ go func() {
+ // Prime the pump, we need one reset to ensure everything is ready
+ winChange <- sig.SIGWINCH
+ for {
+ select {
+ case <-winCtx.Done():
+ return
+ case <-winChange:
+ h, w, err := terminal.GetSize(int(file.Fd()))
+ if err != nil {
+ logrus.Warnf("failed to obtain TTY size: " + err.Error())
+ }
+
+ if err := ResizeContainerTTY(ctx, nameOrId, &h, &w); err != nil {
+ logrus.Warnf("failed to resize TTY: " + err.Error())
+ }
+ }
+ }
+ }()
+ }
+
+ response, err := conn.DoRequest(nil, http.MethodPost, "/containers/%s/attach", params, nameOrId)
+ if err != nil {
+ return err
+ }
+ defer response.Body.Close()
+ // If we are attaching around a start, we need to "signal"
+ // back that we are in fact attached so that started does
+ // not execute before we can attach.
+ if attachReady != nil {
+ attachReady <- true
+ }
+ if !(response.IsSuccess() || response.IsInformational()) {
+ return response.Process(nil)
+ }
+
+ if stdin != nil {
+ go func() {
+ _, err := io.Copy(conn, stdin)
+ if err != nil {
+ logrus.Error("failed to write input to service: " + err.Error())
+ }
+ }()
+ }
+
+ buffer := make([]byte, 1024)
+ if ctnr.Config.Tty {
+ // If not multiplex'ed, read from server and write to stdout
+ _, err := io.Copy(stdout, response.Body)
+ if err != nil {
+ return err
+ }
+ } else {
+ for {
+ // Read multiplexed channels and write to appropriate stream
+ fd, l, err := DemuxHeader(response.Body, buffer)
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ return nil
+ }
+ return err
+ }
+ frame, err := DemuxFrame(response.Body, buffer, l)
+ if err != nil {
+ return err
+ }
+
+ switch {
+ case fd == 0 && stdin != nil:
+ _, err := stdout.Write(frame[0:l])
+ if err != nil {
+ return err
+ }
+ case fd == 1 && stdout != nil:
+ _, err := stdout.Write(frame[0:l])
+ if err != nil {
+ return err
+ }
+ case fd == 2 && stderr != nil:
+ _, err := stderr.Write(frame[0:l])
+ if err != nil {
+ return err
+ }
+ case fd == 3:
+ return fmt.Errorf("error from daemon in stream: %s", frame)
+ default:
+ return fmt.Errorf("unrecognized input header: %d", fd)
+ }
+ }
+ }
+ return nil
+}
+
+// DemuxHeader reads header for stream from server multiplexed stdin/stdout/stderr/2nd error channel
+func DemuxHeader(r io.Reader, buffer []byte) (fd, sz int, err error) {
+ n, err := io.ReadFull(r, buffer[0:8])
+ if err != nil {
+ return
+ }
+ if n < 8 {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+
+ fd = int(buffer[0])
+ if fd < 0 || fd > 3 {
+ err = ErrLostSync
+ return
+ }
+
+ sz = int(binary.BigEndian.Uint32(buffer[4:8]))
+ return
+}
+
+// DemuxFrame reads contents for frame from server multiplexed stdin/stdout/stderr/2nd error channel
+func DemuxFrame(r io.Reader, buffer []byte, length int) (frame []byte, err error) {
+ if len(buffer) < length {
+ buffer = append(buffer, make([]byte, length-len(buffer)+1)...)
+ }
+ n, err := io.ReadFull(r, buffer[0:length])
+ if err != nil {
+ return nil, nil
+ }
+ if n < length {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+
+ return buffer[0:length], nil
+}
+
+// ResizeContainerTTY sets container's TTY height and width in characters
+func ResizeContainerTTY(ctx context.Context, nameOrId string, height *int, width *int) error {
+ return resizeTTY(ctx, bindings.JoinURL("containers", nameOrId, "resize"), height, width)
+}
+
+// ResizeExecTTY sets session's TTY height and width in characters
+func ResizeExecTTY(ctx context.Context, nameOrId string, height *int, width *int) error {
+ return resizeTTY(ctx, bindings.JoinURL("exec", nameOrId, "resize"), height, width)
+}
+
+// resizeTTY set size of TTY of container
+func resizeTTY(ctx context.Context, endpoint string, height *int, width *int) error {
+ conn, err := bindings.GetClient(ctx)
+ if err != nil {
+ return err
+ }
+
+ params := url.Values{}
+ if height != nil {
+ params.Set("h", strconv.Itoa(*height))
+ }
+ if width != nil {
+ params.Set("w", strconv.Itoa(*width))
+ }
+ rsp, err := conn.DoRequest(nil, http.MethodPost, endpoint, params)
+ if err != nil {
+ return err
+ }
+ return rsp.Process(nil)
+}
+
+type rawFormatter struct {
+ logrus.TextFormatter
+}
+
+func (f *rawFormatter) Format(entry *logrus.Entry) ([]byte, error) {
+ buffer, err := f.TextFormatter.Format(entry)
+ if err != nil {
+ return buffer, err
+ }
+ return append(buffer, '\r'), nil
+}
diff --git a/pkg/bindings/images/images.go b/pkg/bindings/images/images.go
index 034ade618..69b9e9bbf 100644
--- a/pkg/bindings/images/images.go
+++ b/pkg/bindings/images/images.go
@@ -74,8 +74,22 @@ func GetImage(ctx context.Context, nameOrID string, size *bool) (*entities.Image
return &inspectedData, response.Process(&inspectedData)
}
-func Tree(ctx context.Context, nameOrId string) error {
- return bindings.ErrNotImplemented
+// Tree retrieves a "tree" based representation of the given image
+func Tree(ctx context.Context, nameOrId string, whatRequires *bool) (*entities.ImageTreeReport, error) {
+ var report entities.ImageTreeReport
+ conn, err := bindings.GetClient(ctx)
+ if err != nil {
+ return nil, err
+ }
+ params := url.Values{}
+ if whatRequires != nil {
+ params.Set("size", strconv.FormatBool(*whatRequires))
+ }
+ response, err := conn.DoRequest(nil, http.MethodGet, "/images/%s/tree", params, nameOrId)
+ if err != nil {
+ return nil, err
+ }
+ return &report, response.Process(&report)
}
// History returns the parent layers of an image.
@@ -132,7 +146,7 @@ func Export(ctx context.Context, nameOrID string, w io.Writer, format *string, c
_, err = io.Copy(w, response.Body)
return err
}
- return nil
+ return response.Process(nil)
}
// Prune removes unused images from local storage. The optional filters can be used to further
diff --git a/pkg/bindings/images/rm.go b/pkg/bindings/images/rm.go
index e3b5590df..05aa3f9ca 100644
--- a/pkg/bindings/images/rm.go
+++ b/pkg/bindings/images/rm.go
@@ -52,7 +52,7 @@ func Remove(ctx context.Context, nameOrID string, force bool) (*entities.ImageRe
params := url.Values{}
params.Set("force", strconv.FormatBool(force))
- response, err := conn.DoRequest(nil, http.MethodDelete, "/images/%s/remove", params, nameOrID)
+ response, err := conn.DoRequest(nil, http.MethodDelete, "/images/%s", params, nameOrID)
if err != nil {
return nil, err
}
diff --git a/pkg/bindings/network/network.go b/pkg/bindings/network/network.go
index c95b22953..7bba4f478 100644
--- a/pkg/bindings/network/network.go
+++ b/pkg/bindings/network/network.go
@@ -3,40 +3,76 @@ package network
import (
"context"
"net/http"
+ "net/url"
+ "strconv"
+ "strings"
- "github.com/containernetworking/cni/libcni"
"github.com/containers/libpod/pkg/bindings"
+ "github.com/containers/libpod/pkg/domain/entities"
+ jsoniter "github.com/json-iterator/go"
)
-func Create() {}
-func Inspect(ctx context.Context, nameOrID string) (map[string]interface{}, error) {
+// Create makes a new CNI network configuration
+func Create(ctx context.Context, options entities.NetworkCreateOptions, name *string) (*entities.NetworkCreateReport, error) {
+ var report entities.NetworkCreateReport
+ conn, err := bindings.GetClient(ctx)
+ if err != nil {
+ return nil, err
+ }
+ params := url.Values{}
+ if name != nil {
+ params.Set("name", *name)
+ }
+ networkConfig, err := jsoniter.MarshalToString(options)
+ if err != nil {
+ return nil, err
+ }
+ stringReader := strings.NewReader(networkConfig)
+ response, err := conn.DoRequest(stringReader, http.MethodPost, "/networks/create", params)
+ if err != nil {
+ return nil, err
+ }
+ return &report, response.Process(&report)
+}
+
+// Inspect returns low level information about a CNI network configuration
+func Inspect(ctx context.Context, nameOrID string) ([]entities.NetworkInspectReport, error) {
+ var reports []entities.NetworkInspectReport
conn, err := bindings.GetClient(ctx)
if err != nil {
return nil, err
}
- n := make(map[string]interface{})
response, err := conn.DoRequest(nil, http.MethodGet, "/networks/%s/json", nil, nameOrID)
if err != nil {
- return n, err
+ return nil, err
}
- return n, response.Process(&n)
+ return reports, response.Process(&reports)
}
-func Remove(ctx context.Context, nameOrID string) error {
+// Remove deletes a defined CNI network configuration by name. The optional force boolean
+// will remove all containers associated with the network when set to true. A slice
+// of NetworkRemoveReports are returned.
+func Remove(ctx context.Context, nameOrID string, force *bool) ([]*entities.NetworkRmReport, error) {
+ var reports []*entities.NetworkRmReport
conn, err := bindings.GetClient(ctx)
if err != nil {
- return err
+ return nil, err
}
- response, err := conn.DoRequest(nil, http.MethodDelete, "/networks/%s", nil, nameOrID)
+ params := url.Values{}
+ if force != nil {
+ params.Set("size", strconv.FormatBool(*force))
+ }
+ response, err := conn.DoRequest(nil, http.MethodDelete, "/networks/%s", params, nameOrID)
if err != nil {
- return err
+ return nil, err
}
- return response.Process(nil)
+ return reports, response.Process(&reports)
}
-func List(ctx context.Context) ([]*libcni.NetworkConfigList, error) {
+// List returns a summary of all CNI network configurations
+func List(ctx context.Context) ([]*entities.NetworkListReport, error) {
var (
- netList []*libcni.NetworkConfigList
+ netList []*entities.NetworkListReport
)
conn, err := bindings.GetClient(ctx)
if err != nil {
diff --git a/pkg/bindings/system/system.go b/pkg/bindings/system/system.go
index caef6af6f..ed960b8e0 100644
--- a/pkg/bindings/system/system.go
+++ b/pkg/bindings/system/system.go
@@ -112,12 +112,40 @@ func Version(ctx context.Context) (*entities.SystemVersionReport, error) {
f, _ := strconv.ParseFloat(component.APIVersion, 64)
b, _ := time.Parse(time.RFC3339, component.BuildTime)
report.Server = &define.Version{
- RemoteAPIVersion: int64(f),
- Version: component.Version.Version,
- GoVersion: component.GoVersion,
- GitCommit: component.GitCommit,
- Built: b.Unix(),
- OsArch: fmt.Sprintf("%s/%s", component.Os, component.Arch),
+ APIVersion: int64(f),
+ Version: component.Version.Version,
+ GoVersion: component.GoVersion,
+ GitCommit: component.GitCommit,
+ Built: b.Unix(),
+ OsArch: fmt.Sprintf("%s/%s", component.Os, component.Arch),
}
return &report, err
}
+
+// Reset removes all unused system data.
+func Reset(ctx context.Context) error {
+ conn, err := bindings.GetClient(ctx)
+ if err != nil {
+ return err
+ }
+ response, err := conn.DoRequest(nil, http.MethodPost, "/system/reset", nil)
+ if err != nil {
+ return err
+ }
+ return response.Process(response)
+}
+
+// DiskUsage returns information about image, container, and volume disk
+// consumption
+func DiskUsage(ctx context.Context) (*entities.SystemDfReport, error) {
+ var report entities.SystemDfReport
+ conn, err := bindings.GetClient(ctx)
+ if err != nil {
+ return nil, err
+ }
+ response, err := conn.DoRequest(nil, http.MethodGet, "/system/df", nil)
+ if err != nil {
+ return nil, err
+ }
+ return &report, response.Process(&report)
+}
diff --git a/pkg/bindings/test/attach_test.go b/pkg/bindings/test/attach_test.go
new file mode 100644
index 000000000..6fb166828
--- /dev/null
+++ b/pkg/bindings/test/attach_test.go
@@ -0,0 +1,110 @@
+package test_bindings
+
+import (
+ "bytes"
+ "fmt"
+ "time"
+
+ "github.com/containers/libpod/libpod/define"
+ "github.com/containers/libpod/pkg/bindings"
+ "github.com/containers/libpod/pkg/bindings/containers"
+ "github.com/containers/libpod/pkg/specgen"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+ "github.com/onsi/gomega/gexec"
+)
+
+var _ = Describe("Podman containers attach", func() {
+ var (
+ bt *bindingTest
+ s *gexec.Session
+ )
+
+ BeforeEach(func() {
+ bt = newBindingTest()
+ bt.RestoreImagesFromCache()
+ s = bt.startAPIService()
+ time.Sleep(1 * time.Second)
+ err := bt.NewConnection()
+ Expect(err).ShouldNot(HaveOccurred())
+ })
+
+ AfterEach(func() {
+ s.Kill()
+ bt.cleanup()
+ })
+
+ It("can run top in container", func() {
+ name := "TopAttachTest"
+ id, err := bt.RunTopContainer(&name, nil, nil)
+ Expect(err).ShouldNot(HaveOccurred())
+
+ tickTock := time.NewTimer(2 * time.Second)
+ go func() {
+ <-tickTock.C
+ timeout := uint(5)
+ err := containers.Stop(bt.conn, id, &timeout)
+ if err != nil {
+ GinkgoWriter.Write([]byte(err.Error()))
+ }
+ }()
+
+ stdout := &bytes.Buffer{}
+ stderr := &bytes.Buffer{}
+ go func() {
+ defer GinkgoRecover()
+
+ err := containers.Attach(bt.conn, id, nil, bindings.PTrue, bindings.PTrue, nil, stdout, stderr, nil)
+ Expect(err).ShouldNot(HaveOccurred())
+ }()
+
+ time.Sleep(5 * time.Second)
+
+ // First character/First line of top output
+ Expect(stdout.String()).Should(ContainSubstring("Mem: "))
+ })
+
+ It("can echo data via cat in container", func() {
+ s := specgen.NewSpecGenerator(alpine.name, false)
+ s.Name = "CatAttachTest"
+ s.Terminal = true
+ s.Command = []string{"/bin/cat"}
+ ctnr, err := containers.CreateWithSpec(bt.conn, s)
+ Expect(err).ShouldNot(HaveOccurred())
+
+ err = containers.Start(bt.conn, ctnr.ID, nil)
+ Expect(err).ShouldNot(HaveOccurred())
+
+ wait := define.ContainerStateRunning
+ _, err = containers.Wait(bt.conn, ctnr.ID, &wait)
+ Expect(err).ShouldNot(HaveOccurred())
+
+ tickTock := time.NewTimer(2 * time.Second)
+ go func() {
+ <-tickTock.C
+ timeout := uint(5)
+ err := containers.Stop(bt.conn, ctnr.ID, &timeout)
+ if err != nil {
+ GinkgoWriter.Write([]byte(err.Error()))
+ }
+ }()
+
+ msg := "Hello, World"
+ stdin := &bytes.Buffer{}
+ stdin.WriteString(msg + "\n")
+
+ stdout := &bytes.Buffer{}
+ stderr := &bytes.Buffer{}
+ go func() {
+ defer GinkgoRecover()
+
+ err := containers.Attach(bt.conn, ctnr.ID, nil, bindings.PFalse, bindings.PTrue, stdin, stdout, stderr, nil)
+ Expect(err).ShouldNot(HaveOccurred())
+ }()
+
+ time.Sleep(5 * time.Second)
+ // Tty==true so we get echo'ed stdin + expected output
+ Expect(stdout.String()).Should(Equal(fmt.Sprintf("%[1]s\r\n%[1]s\r\n", msg)))
+ Expect(stderr.String()).Should(BeEmpty())
+ })
+})
diff --git a/pkg/bindings/test/common_test.go b/pkg/bindings/test/common_test.go
index f33e42440..a86e6f2e3 100644
--- a/pkg/bindings/test/common_test.go
+++ b/pkg/bindings/test/common_test.go
@@ -191,7 +191,7 @@ func (b *bindingTest) restoreImageFromCache(i testImage) {
func (b *bindingTest) RunTopContainer(containerName *string, insidePod *bool, podName *string) (string, error) {
s := specgen.NewSpecGenerator(alpine.name, false)
s.Terminal = false
- s.Command = []string{"top"}
+ s.Command = []string{"/usr/bin/top"}
if containerName != nil {
s.Name = *containerName
}
diff --git a/pkg/bindings/test/containers_test.go b/pkg/bindings/test/containers_test.go
index 328691df2..f725d1cf2 100644
--- a/pkg/bindings/test/containers_test.go
+++ b/pkg/bindings/test/containers_test.go
@@ -56,7 +56,7 @@ var _ = Describe("Podman containers ", func() {
It("podman pause a running container by name", func() {
// Pausing by name should work
var name = "top"
- _, err := bt.RunTopContainer(&name, &bindings.PFalse, nil)
+ _, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
Expect(err).To(BeNil())
err = containers.Pause(bt.conn, name)
Expect(err).To(BeNil())
@@ -70,7 +70,7 @@ var _ = Describe("Podman containers ", func() {
It("podman pause a running container by id", func() {
// Pausing by id should work
var name = "top"
- cid, err := bt.RunTopContainer(&name, &bindings.PFalse, nil)
+ cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
Expect(err).To(BeNil())
err = containers.Pause(bt.conn, cid)
Expect(err).To(BeNil())
@@ -84,7 +84,7 @@ var _ = Describe("Podman containers ", func() {
It("podman unpause a running container by name", func() {
// Unpausing by name should work
var name = "top"
- _, err := bt.RunTopContainer(&name, &bindings.PFalse, nil)
+ _, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
Expect(err).To(BeNil())
err = containers.Pause(bt.conn, name)
Expect(err).To(BeNil())
@@ -100,7 +100,7 @@ var _ = Describe("Podman containers ", func() {
It("podman unpause a running container by ID", func() {
// Unpausing by ID should work
var name = "top"
- _, err := bt.RunTopContainer(&name, &bindings.PFalse, nil)
+ _, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
Expect(err).To(BeNil())
// Pause by name
err = containers.Pause(bt.conn, name)
@@ -119,7 +119,7 @@ var _ = Describe("Podman containers ", func() {
It("podman pause a paused container by name", func() {
// Pausing a paused container by name should fail
var name = "top"
- _, err := bt.RunTopContainer(&name, &bindings.PFalse, nil)
+ _, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
Expect(err).To(BeNil())
err = containers.Pause(bt.conn, name)
Expect(err).To(BeNil())
@@ -132,7 +132,7 @@ var _ = Describe("Podman containers ", func() {
It("podman pause a paused container by id", func() {
// Pausing a paused container by id should fail
var name = "top"
- cid, err := bt.RunTopContainer(&name, &bindings.PFalse, nil)
+ cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
Expect(err).To(BeNil())
err = containers.Pause(bt.conn, cid)
Expect(err).To(BeNil())
@@ -145,7 +145,7 @@ var _ = Describe("Podman containers ", func() {
It("podman pause a stopped container by name", func() {
// Pausing a stopped container by name should fail
var name = "top"
- _, err := bt.RunTopContainer(&name, &bindings.PFalse, nil)
+ _, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
Expect(err).To(BeNil())
err = containers.Stop(bt.conn, name, nil)
Expect(err).To(BeNil())
@@ -158,7 +158,7 @@ var _ = Describe("Podman containers ", func() {
It("podman pause a stopped container by id", func() {
// Pausing a stopped container by id should fail
var name = "top"
- cid, err := bt.RunTopContainer(&name, &bindings.PFalse, nil)
+ cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
Expect(err).To(BeNil())
err = containers.Stop(bt.conn, cid, nil)
Expect(err).To(BeNil())
@@ -171,11 +171,11 @@ var _ = Describe("Podman containers ", func() {
It("podman remove a paused container by id without force", func() {
// Removing a paused container without force should fail
var name = "top"
- cid, err := bt.RunTopContainer(&name, &bindings.PFalse, nil)
+ cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
Expect(err).To(BeNil())
err = containers.Pause(bt.conn, cid)
Expect(err).To(BeNil())
- err = containers.Remove(bt.conn, cid, &bindings.PFalse, &bindings.PFalse)
+ err = containers.Remove(bt.conn, cid, bindings.PFalse, bindings.PFalse)
Expect(err).ToNot(BeNil())
code, _ := bindings.CheckResponseCode(err)
Expect(code).To(BeNumerically("==", http.StatusInternalServerError))
@@ -192,18 +192,18 @@ var _ = Describe("Podman containers ", func() {
// Removing a paused container with force should work
var name = "top"
- cid, err := bt.RunTopContainer(&name, &bindings.PFalse, nil)
+ cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
Expect(err).To(BeNil())
err = containers.Pause(bt.conn, cid)
Expect(err).To(BeNil())
- err = containers.Remove(bt.conn, cid, &bindings.PTrue, &bindings.PFalse)
+ err = containers.Remove(bt.conn, cid, bindings.PTrue, bindings.PFalse)
Expect(err).To(BeNil())
})
It("podman stop a paused container by name", func() {
// Stopping a paused container by name should fail
var name = "top"
- _, err := bt.RunTopContainer(&name, &bindings.PFalse, nil)
+ _, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
Expect(err).To(BeNil())
err = containers.Pause(bt.conn, name)
Expect(err).To(BeNil())
@@ -216,7 +216,7 @@ var _ = Describe("Podman containers ", func() {
It("podman stop a paused container by id", func() {
// Stopping a paused container by id should fail
var name = "top"
- cid, err := bt.RunTopContainer(&name, &bindings.PFalse, nil)
+ cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
Expect(err).To(BeNil())
err = containers.Pause(bt.conn, cid)
Expect(err).To(BeNil())
@@ -229,7 +229,7 @@ var _ = Describe("Podman containers ", func() {
It("podman stop a running container by name", func() {
// Stopping a running container by name should work
var name = "top"
- _, err := bt.RunTopContainer(&name, &bindings.PFalse, nil)
+ _, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
Expect(err).To(BeNil())
err = containers.Stop(bt.conn, name, nil)
Expect(err).To(BeNil())
@@ -243,7 +243,7 @@ var _ = Describe("Podman containers ", func() {
It("podman stop a running container by ID", func() {
// Stopping a running container by ID should work
var name = "top"
- cid, err := bt.RunTopContainer(&name, &bindings.PFalse, nil)
+ cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
Expect(err).To(BeNil())
err = containers.Stop(bt.conn, cid, nil)
Expect(err).To(BeNil())
@@ -302,6 +302,8 @@ var _ = Describe("Podman containers ", func() {
errChan = make(chan error)
go func() {
+ defer GinkgoRecover()
+
_, waitErr := containers.Wait(bt.conn, name, &running)
errChan <- waitErr
close(errChan)
@@ -324,7 +326,7 @@ var _ = Describe("Podman containers ", func() {
// a container that has no healthcheck should be a 409
var name = "top"
- bt.RunTopContainer(&name, &bindings.PFalse, nil)
+ bt.RunTopContainer(&name, bindings.PFalse, nil)
_, err = containers.RunHealthCheck(bt.conn, name)
Expect(err).ToNot(BeNil())
code, _ = bindings.CheckResponseCode(err)
@@ -371,7 +373,7 @@ var _ = Describe("Podman containers ", func() {
_, err = containers.Wait(bt.conn, r.ID, nil)
Expect(err).To(BeNil())
- opts := containers.LogOptions{Stdout: &bindings.PTrue, Follow: &bindings.PTrue}
+ opts := containers.LogOptions{Stdout: bindings.PTrue, Follow: bindings.PTrue}
go func() {
containers.Logs(bt.conn, r.ID, opts, stdoutChan, nil)
}()
@@ -383,7 +385,7 @@ var _ = Describe("Podman containers ", func() {
It("podman top", func() {
var name = "top"
- cid, err := bt.RunTopContainer(&name, &bindings.PFalse, nil)
+ cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
Expect(err).To(BeNil())
// By name
@@ -421,7 +423,7 @@ var _ = Describe("Podman containers ", func() {
It("podman container exists in local storage by name", func() {
// Container existence check by name should work
var name = "top"
- _, err := bt.RunTopContainer(&name, &bindings.PFalse, nil)
+ _, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
Expect(err).To(BeNil())
containerExists, err := containers.Exists(bt.conn, name)
Expect(err).To(BeNil())
@@ -431,7 +433,7 @@ var _ = Describe("Podman containers ", func() {
It("podman container exists in local storage by ID", func() {
// Container existence check by ID should work
var name = "top"
- cid, err := bt.RunTopContainer(&name, &bindings.PFalse, nil)
+ cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
Expect(err).To(BeNil())
containerExists, err := containers.Exists(bt.conn, cid)
Expect(err).To(BeNil())
@@ -441,7 +443,7 @@ var _ = Describe("Podman containers ", func() {
It("podman container exists in local storage by short ID", func() {
// Container existence check by short ID should work
var name = "top"
- cid, err := bt.RunTopContainer(&name, &bindings.PFalse, nil)
+ cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
Expect(err).To(BeNil())
containerExists, err := containers.Exists(bt.conn, cid[0:12])
Expect(err).To(BeNil())
@@ -459,7 +461,7 @@ var _ = Describe("Podman containers ", func() {
It("podman kill a running container by name with SIGINT", func() {
// Killing a running container should work
var name = "top"
- _, err := bt.RunTopContainer(&name, &bindings.PFalse, nil)
+ _, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
Expect(err).To(BeNil())
err = containers.Kill(bt.conn, name, "SIGINT")
Expect(err).To(BeNil())
@@ -470,7 +472,7 @@ var _ = Describe("Podman containers ", func() {
It("podman kill a running container by ID with SIGTERM", func() {
// Killing a running container by ID should work
var name = "top"
- cid, err := bt.RunTopContainer(&name, &bindings.PFalse, nil)
+ cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
Expect(err).To(BeNil())
err = containers.Kill(bt.conn, cid, "SIGTERM")
Expect(err).To(BeNil())
@@ -481,7 +483,7 @@ var _ = Describe("Podman containers ", func() {
It("podman kill a running container by ID with SIGKILL", func() {
// Killing a running container by ID with TERM should work
var name = "top"
- cid, err := bt.RunTopContainer(&name, &bindings.PFalse, nil)
+ cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
Expect(err).To(BeNil())
err = containers.Kill(bt.conn, cid, "SIGKILL")
Expect(err).To(BeNil())
@@ -490,7 +492,7 @@ var _ = Describe("Podman containers ", func() {
It("podman kill a running container by bogus signal", func() {
//Killing a running container by bogus signal should fail
var name = "top"
- cid, err := bt.RunTopContainer(&name, &bindings.PFalse, nil)
+ cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
Expect(err).To(BeNil())
err = containers.Kill(bt.conn, cid, "foobar")
Expect(err).ToNot(BeNil())
@@ -503,9 +505,9 @@ var _ = Describe("Podman containers ", func() {
var name1 = "first"
var name2 = "second"
var latestContainers = 1
- _, err := bt.RunTopContainer(&name1, &bindings.PFalse, nil)
+ _, err := bt.RunTopContainer(&name1, bindings.PFalse, nil)
Expect(err).To(BeNil())
- _, err = bt.RunTopContainer(&name2, &bindings.PFalse, nil)
+ _, err = bt.RunTopContainer(&name2, bindings.PFalse, nil)
Expect(err).To(BeNil())
containerLatestList, err := containers.List(bt.conn, nil, nil, &latestContainers, nil, nil, nil)
Expect(err).To(BeNil())
@@ -534,7 +536,7 @@ var _ = Describe("Podman containers ", func() {
It("podman prune stopped containers", func() {
// Start and stop a container to enter in exited state.
var name = "top"
- _, err := bt.RunTopContainer(&name, &bindings.PFalse, nil)
+ _, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
Expect(err).To(BeNil())
err = containers.Stop(bt.conn, name, nil)
Expect(err).To(BeNil())
@@ -549,7 +551,7 @@ var _ = Describe("Podman containers ", func() {
It("podman prune stopped containers with filters", func() {
// Start and stop a container to enter in exited state.
var name = "top"
- _, err := bt.RunTopContainer(&name, &bindings.PFalse, nil)
+ _, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
Expect(err).To(BeNil())
err = containers.Stop(bt.conn, name, nil)
Expect(err).To(BeNil())
@@ -583,7 +585,7 @@ var _ = Describe("Podman containers ", func() {
It("podman prune running containers", func() {
// Start the container.
var name = "top"
- _, err := bt.RunTopContainer(&name, &bindings.PFalse, nil)
+ _, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
Expect(err).To(BeNil())
// Check if the container is running.
@@ -606,7 +608,7 @@ var _ = Describe("Podman containers ", func() {
It("podman inspect running container", func() {
var name = "top"
- _, err := bt.RunTopContainer(&name, &bindings.PFalse, nil)
+ _, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
Expect(err).To(BeNil())
// Inspecting running container should succeed
_, err = containers.Inspect(bt.conn, name, nil)
@@ -615,7 +617,7 @@ var _ = Describe("Podman containers ", func() {
It("podman inspect stopped container", func() {
var name = "top"
- _, err := bt.RunTopContainer(&name, &bindings.PFalse, nil)
+ _, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
Expect(err).To(BeNil())
err = containers.Stop(bt.conn, name, nil)
Expect(err).To(BeNil())
@@ -626,20 +628,20 @@ var _ = Describe("Podman containers ", func() {
It("podman inspect running container with size", func() {
var name = "top"
- _, err := bt.RunTopContainer(&name, &bindings.PFalse, nil)
+ _, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
Expect(err).To(BeNil())
- _, err = containers.Inspect(bt.conn, name, &bindings.PTrue)
+ _, err = containers.Inspect(bt.conn, name, bindings.PTrue)
Expect(err).To(BeNil())
})
It("podman inspect stopped container with size", func() {
var name = "top"
- _, err := bt.RunTopContainer(&name, &bindings.PFalse, nil)
+ _, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
Expect(err).To(BeNil())
err = containers.Stop(bt.conn, name, nil)
Expect(err).To(BeNil())
// Inspecting stopped container with size should succeed
- _, err = containers.Inspect(bt.conn, name, &bindings.PTrue)
+ _, err = containers.Inspect(bt.conn, name, bindings.PTrue)
Expect(err).To(BeNil())
})
@@ -651,7 +653,7 @@ var _ = Describe("Podman containers ", func() {
It("podman remove running container by name", func() {
var name = "top"
- _, err := bt.RunTopContainer(&name, &bindings.PFalse, nil)
+ _, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
Expect(err).To(BeNil())
// Removing running container should fail
err = containers.Remove(bt.conn, name, nil, nil)
@@ -662,7 +664,7 @@ var _ = Describe("Podman containers ", func() {
It("podman remove running container by ID", func() {
var name = "top"
- cid, err := bt.RunTopContainer(&name, &bindings.PFalse, nil)
+ cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
Expect(err).To(BeNil())
// Removing running container should fail
err = containers.Remove(bt.conn, cid, nil, nil)
@@ -673,10 +675,10 @@ var _ = Describe("Podman containers ", func() {
It("podman forcibly remove running container by name", func() {
var name = "top"
- _, err := bt.RunTopContainer(&name, &bindings.PFalse, nil)
+ _, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
Expect(err).To(BeNil())
// Removing running container should fail
- err = containers.Remove(bt.conn, name, &bindings.PTrue, nil)
+ err = containers.Remove(bt.conn, name, bindings.PTrue, nil)
Expect(err).To(BeNil())
//code, _ := bindings.CheckResponseCode(err)
//Expect(code).To(BeNumerically("==", http.StatusInternalServerError))
@@ -684,10 +686,10 @@ var _ = Describe("Podman containers ", func() {
It("podman forcibly remove running container by ID", func() {
var name = "top"
- cid, err := bt.RunTopContainer(&name, &bindings.PFalse, nil)
+ cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
Expect(err).To(BeNil())
// Removing running container should fail
- err = containers.Remove(bt.conn, cid, &bindings.PTrue, nil)
+ err = containers.Remove(bt.conn, cid, bindings.PTrue, nil)
Expect(err).To(BeNil())
//code, _ := bindings.CheckResponseCode(err)
//Expect(code).To(BeNumerically("==", http.StatusInternalServerError))
@@ -695,10 +697,10 @@ var _ = Describe("Podman containers ", func() {
It("podman remove running container and volume by name", func() {
var name = "top"
- _, err := bt.RunTopContainer(&name, &bindings.PFalse, nil)
+ _, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
Expect(err).To(BeNil())
// Removing running container should fail
- err = containers.Remove(bt.conn, name, nil, &bindings.PTrue)
+ err = containers.Remove(bt.conn, name, nil, bindings.PTrue)
Expect(err).ToNot(BeNil())
code, _ := bindings.CheckResponseCode(err)
Expect(code).To(BeNumerically("==", http.StatusInternalServerError))
@@ -706,10 +708,10 @@ var _ = Describe("Podman containers ", func() {
It("podman remove running container and volume by ID", func() {
var name = "top"
- cid, err := bt.RunTopContainer(&name, &bindings.PFalse, nil)
+ cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
Expect(err).To(BeNil())
// Removing running container should fail
- err = containers.Remove(bt.conn, cid, nil, &bindings.PTrue)
+ err = containers.Remove(bt.conn, cid, nil, bindings.PTrue)
Expect(err).ToNot(BeNil())
code, _ := bindings.CheckResponseCode(err)
Expect(code).To(BeNumerically("==", http.StatusInternalServerError))
@@ -717,10 +719,10 @@ var _ = Describe("Podman containers ", func() {
It("podman forcibly remove running container and volume by name", func() {
var name = "top"
- _, err := bt.RunTopContainer(&name, &bindings.PFalse, nil)
+ _, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
Expect(err).To(BeNil())
// Removing running container should fail
- err = containers.Remove(bt.conn, name, &bindings.PTrue, &bindings.PTrue)
+ err = containers.Remove(bt.conn, name, bindings.PTrue, bindings.PTrue)
Expect(err).To(BeNil())
//code, _ := bindings.CheckResponseCode(err)
//Expect(code).To(BeNumerically("==", http.StatusInternalServerError))
@@ -728,10 +730,10 @@ var _ = Describe("Podman containers ", func() {
It("podman forcibly remove running container and volume by ID", func() {
var name = "top"
- cid, err := bt.RunTopContainer(&name, &bindings.PFalse, nil)
+ cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
Expect(err).To(BeNil())
// Removing running container should fail
- err = containers.Remove(bt.conn, cid, &bindings.PTrue, &bindings.PTrue)
+ err = containers.Remove(bt.conn, cid, bindings.PTrue, bindings.PTrue)
Expect(err).To(BeNil())
//code, _ := bindings.CheckResponseCode(err)
//Expect(code).To(BeNumerically("==", http.StatusInternalServerError))
diff --git a/pkg/bindings/test/exec_test.go b/pkg/bindings/test/exec_test.go
index 1ef2197b6..53b2dcb4a 100644
--- a/pkg/bindings/test/exec_test.go
+++ b/pkg/bindings/test/exec_test.go
@@ -33,7 +33,7 @@ var _ = Describe("Podman containers exec", func() {
It("Podman exec create makes an exec session", func() {
name := "testCtr"
- cid, err := bt.RunTopContainer(&name, &bindings.PFalse, nil)
+ cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
Expect(err).To(BeNil())
execConfig := new(handlers.ExecCreateConfig)
@@ -53,7 +53,7 @@ var _ = Describe("Podman containers exec", func() {
It("Podman exec create with bad command fails", func() {
name := "testCtr"
- _, err := bt.RunTopContainer(&name, &bindings.PFalse, nil)
+ _, err := bt.RunTopContainer(&name, bindings.PFalse, nil)
Expect(err).To(BeNil())
execConfig := new(handlers.ExecCreateConfig)
diff --git a/pkg/bindings/test/images_test.go b/pkg/bindings/test/images_test.go
index 9c8e82149..f2a1a51e5 100644
--- a/pkg/bindings/test/images_test.go
+++ b/pkg/bindings/test/images_test.go
@@ -76,7 +76,7 @@ var _ = Describe("Podman images", func() {
// Expect(data.Size).To(BeZero())
// Enabling the size parameter should result in size being populated
- data, err = images.GetImage(bt.conn, alpine.name, &bindings.PTrue)
+ data, err = images.GetImage(bt.conn, alpine.name, bindings.PTrue)
Expect(err).To(BeNil())
Expect(data.Size).To(BeNumerically(">", 0))
})
@@ -104,7 +104,7 @@ var _ = Describe("Podman images", func() {
// Start a container with alpine image
var top string = "top"
- _, err = bt.RunTopContainer(&top, &bindings.PFalse, nil)
+ _, err = bt.RunTopContainer(&top, bindings.PFalse, nil)
Expect(err).To(BeNil())
// we should now have a container called "top" running
containerResponse, err := containers.Inspect(bt.conn, "top", nil)
@@ -122,7 +122,7 @@ var _ = Describe("Podman images", func() {
Expect(err).To(BeNil())
// To be extra sure, check if the previously created container
// is gone as well.
- _, err = containers.Inspect(bt.conn, "top", &bindings.PFalse)
+ _, err = containers.Inspect(bt.conn, "top", bindings.PFalse)
code, _ = bindings.CheckResponseCode(err)
Expect(code).To(BeNumerically("==", http.StatusNotFound))
@@ -182,13 +182,13 @@ var _ = Describe("Podman images", func() {
// List images with a filter
filters := make(map[string][]string)
filters["reference"] = []string{alpine.name}
- filteredImages, err := images.List(bt.conn, &bindings.PFalse, filters)
+ filteredImages, err := images.List(bt.conn, bindings.PFalse, filters)
Expect(err).To(BeNil())
Expect(len(filteredImages)).To(BeNumerically("==", 1))
// List images with a bad filter
filters["name"] = []string{alpine.name}
- _, err = images.List(bt.conn, &bindings.PFalse, filters)
+ _, err = images.List(bt.conn, bindings.PFalse, filters)
Expect(err).ToNot(BeNil())
code, _ := bindings.CheckResponseCode(err)
Expect(code).To(BeNumerically("==", http.StatusInternalServerError))
diff --git a/pkg/bindings/test/pods_test.go b/pkg/bindings/test/pods_test.go
index 49bbfa246..d8e2a5ef7 100644
--- a/pkg/bindings/test/pods_test.go
+++ b/pkg/bindings/test/pods_test.go
@@ -63,7 +63,7 @@ var _ = Describe("Podman pods", func() {
Expect(err).To(BeNil())
// Adding an alpine container to the existing pod
- _, err = bt.RunTopContainer(nil, &bindings.PTrue, &newpod)
+ _, err = bt.RunTopContainer(nil, bindings.PTrue, &newpod)
Expect(err).To(BeNil())
podSummary, err = pods.List(bt.conn, nil)
// Verify no errors.
@@ -93,7 +93,7 @@ var _ = Describe("Podman pods", func() {
_, err = pods.Start(bt.conn, newpod)
Expect(err).To(BeNil())
- _, err = bt.RunTopContainer(nil, &bindings.PTrue, &newpod)
+ _, err = bt.RunTopContainer(nil, bindings.PTrue, &newpod)
Expect(err).To(BeNil())
// Expected err with invalid filter params
@@ -174,7 +174,7 @@ var _ = Describe("Podman pods", func() {
Expect(code).To(BeNumerically("==", http.StatusNotFound))
// Adding an alpine container to the existing pod
- _, err = bt.RunTopContainer(nil, &bindings.PTrue, &newpod)
+ _, err = bt.RunTopContainer(nil, bindings.PTrue, &newpod)
Expect(err).To(BeNil())
// Binding needs to be modified to inspect the pod state.
diff --git a/pkg/bindings/test/system_test.go b/pkg/bindings/test/system_test.go
index 62ea32377..fb2df258b 100644
--- a/pkg/bindings/test/system_test.go
+++ b/pkg/bindings/test/system_test.go
@@ -5,6 +5,7 @@ import (
"github.com/containers/libpod/pkg/bindings"
"github.com/containers/libpod/pkg/bindings/containers"
+ "github.com/containers/libpod/pkg/bindings/images"
"github.com/containers/libpod/pkg/bindings/pods"
"github.com/containers/libpod/pkg/bindings/system"
"github.com/containers/libpod/pkg/bindings/volumes"
@@ -64,12 +65,12 @@ var _ = Describe("Podman system", func() {
Expect(err).To(BeNil())
// Start and stop a container to enter in exited state.
var name = "top"
- _, err = bt.RunTopContainer(&name, &bindings.PFalse, nil)
+ _, err = bt.RunTopContainer(&name, bindings.PFalse, nil)
Expect(err).To(BeNil())
err = containers.Stop(bt.conn, name, nil)
Expect(err).To(BeNil())
- systemPruneResponse, err := system.Prune(bt.conn, &bindings.PTrue, &bindings.PFalse)
+ systemPruneResponse, err := system.Prune(bt.conn, bindings.PTrue, bindings.PFalse)
Expect(err).To(BeNil())
Expect(len(systemPruneResponse.PodPruneReport)).To(Equal(1))
Expect(len(systemPruneResponse.ContainerPruneReport.ID)).To(Equal(1))
@@ -89,21 +90,21 @@ var _ = Describe("Podman system", func() {
// Start and stop a container to enter in exited state.
var name = "top"
- _, err = bt.RunTopContainer(&name, &bindings.PFalse, nil)
+ _, err = bt.RunTopContainer(&name, bindings.PFalse, nil)
Expect(err).To(BeNil())
err = containers.Stop(bt.conn, name, nil)
Expect(err).To(BeNil())
// Start container and leave in running
var name2 = "top2"
- _, err = bt.RunTopContainer(&name2, &bindings.PFalse, nil)
+ _, err = bt.RunTopContainer(&name2, bindings.PFalse, nil)
Expect(err).To(BeNil())
// Adding an unused volume
_, err = volumes.Create(bt.conn, entities.VolumeCreateOptions{})
Expect(err).To(BeNil())
- systemPruneResponse, err := system.Prune(bt.conn, &bindings.PTrue, &bindings.PFalse)
+ systemPruneResponse, err := system.Prune(bt.conn, bindings.PTrue, bindings.PFalse)
Expect(err).To(BeNil())
Expect(len(systemPruneResponse.PodPruneReport)).To(Equal(1))
Expect(len(systemPruneResponse.ContainerPruneReport.ID)).To(Equal(1))
@@ -123,21 +124,21 @@ var _ = Describe("Podman system", func() {
// Start and stop a container to enter in exited state.
var name = "top"
- _, err = bt.RunTopContainer(&name, &bindings.PFalse, nil)
+ _, err = bt.RunTopContainer(&name, bindings.PFalse, nil)
Expect(err).To(BeNil())
err = containers.Stop(bt.conn, name, nil)
Expect(err).To(BeNil())
// Start second container and leave in running
var name2 = "top2"
- _, err = bt.RunTopContainer(&name2, &bindings.PFalse, nil)
+ _, err = bt.RunTopContainer(&name2, bindings.PFalse, nil)
Expect(err).To(BeNil())
// Adding an unused volume should work
_, err = volumes.Create(bt.conn, entities.VolumeCreateOptions{})
Expect(err).To(BeNil())
- systemPruneResponse, err := system.Prune(bt.conn, &bindings.PTrue, &bindings.PTrue)
+ systemPruneResponse, err := system.Prune(bt.conn, bindings.PTrue, bindings.PTrue)
Expect(err).To(BeNil())
Expect(len(systemPruneResponse.PodPruneReport)).To(Equal(0))
Expect(len(systemPruneResponse.ContainerPruneReport.ID)).To(Equal(1))
@@ -149,4 +150,45 @@ var _ = Describe("Podman system", func() {
// Volume should be pruned now as flag set true
Expect(len(systemPruneResponse.VolumePruneReport)).To(Equal(1))
})
+
+ It("podman system reset", func() {
+ // Adding an unused volume should work
+ _, err := volumes.Create(bt.conn, entities.VolumeCreateOptions{})
+ Expect(err).To(BeNil())
+
+ vols, err := volumes.List(bt.conn, nil)
+ Expect(err).To(BeNil())
+ Expect(len(vols)).To(Equal(1))
+
+ // Start a pod and leave it running
+ _, err = pods.Start(bt.conn, newpod)
+ Expect(err).To(BeNil())
+
+ imageSummary, err := images.List(bt.conn, nil, nil)
+ Expect(err).To(BeNil())
+ // Since in the begin context images are created
+ Expect(len(imageSummary)).To(Equal(3))
+
+ err = system.Reset(bt.conn)
+ Expect(err).To(BeNil())
+
+ // re-establish connection
+ s = bt.startAPIService()
+ time.Sleep(1 * time.Second)
+
+ // No pods
+ podSummary, err := pods.List(bt.conn, nil)
+ Expect(err).To(BeNil())
+ Expect(len(podSummary)).To(Equal(0))
+
+ // No images
+ imageSummary, err = images.List(bt.conn, bindings.PTrue, nil)
+ Expect(err).To(BeNil())
+ Expect(len(imageSummary)).To(Equal(0))
+
+ // no volumes
+ vols, err = volumes.List(bt.conn, nil)
+ Expect(err).To(BeNil())
+ Expect(len(vols)).To(BeZero())
+ })
})
diff --git a/pkg/bindings/test/test_suite_test.go b/pkg/bindings/test/test_suite_test.go
index dc2b49b88..d2c2c7838 100644
--- a/pkg/bindings/test/test_suite_test.go
+++ b/pkg/bindings/test/test_suite_test.go
@@ -5,9 +5,14 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
+ "github.com/sirupsen/logrus"
)
func TestTest(t *testing.T) {
+ if testing.Verbose() {
+ logrus.SetLevel(logrus.DebugLevel)
+ }
+
RegisterFailHandler(Fail)
RunSpecs(t, "Test Suite")
}
diff --git a/pkg/bindings/test/volumes_test.go b/pkg/bindings/test/volumes_test.go
index 59fe48f22..839a4c575 100644
--- a/pkg/bindings/test/volumes_test.go
+++ b/pkg/bindings/test/volumes_test.go
@@ -105,7 +105,7 @@ var _ = Describe("Podman volumes", func() {
zero := uint(0)
err = containers.Stop(connText, "vtest", &zero)
Expect(err).To(BeNil())
- err = volumes.Remove(connText, vol.Name, &bindings.PTrue)
+ err = volumes.Remove(connText, vol.Name, bindings.PTrue)
Expect(err).To(BeNil())
})
diff --git a/pkg/bindings/version.go b/pkg/bindings/version.go
deleted file mode 100644
index c833a644c..000000000
--- a/pkg/bindings/version.go
+++ /dev/null
@@ -1,3 +0,0 @@
-package bindings
-
-func (c Connection) Version() {}
diff --git a/pkg/cgroups/cgroups.go b/pkg/cgroups/cgroups.go
index d51905f4b..3b56f944f 100644
--- a/pkg/cgroups/cgroups.go
+++ b/pkg/cgroups/cgroups.go
@@ -517,6 +517,10 @@ func (c *CgroupControl) AddPid(pid int) error {
}
for _, n := range names {
+ // If we aren't using cgroup2, we won't write correctly to unified hierarchy
+ if !c.cgroup2 && n == "unified" {
+ continue
+ }
p := filepath.Join(c.getCgroupv1Path(n), "tasks")
if err := ioutil.WriteFile(p, pidString, 0644); err != nil {
return errors.Wrapf(err, "write %s", p)
diff --git a/pkg/domain/entities/auto-update.go b/pkg/domain/entities/auto-update.go
index aef8fc46b..c51158816 100644
--- a/pkg/domain/entities/auto-update.go
+++ b/pkg/domain/entities/auto-update.go
@@ -1,5 +1,11 @@
package entities
+// AutoUpdateOptions are the options for running auto-update.
+type AutoUpdateOptions struct {
+ // Authfile to use when contacting registries.
+ Authfile string
+}
+
// AutoUpdateReport contains the results from running auto-update.
type AutoUpdateReport struct {
// Units - the restarted systemd units during auto-update.
diff --git a/pkg/domain/entities/containers.go b/pkg/domain/entities/containers.go
index e5330e1ab..8d85a9b23 100644
--- a/pkg/domain/entities/containers.go
+++ b/pkg/domain/entities/containers.go
@@ -170,7 +170,7 @@ type CheckpointOptions struct {
IgnoreRootFS bool
Keep bool
Latest bool
- LeaveRuninng bool
+ LeaveRunning bool
TCPEstablished bool
}
@@ -242,7 +242,6 @@ type ExecOptions struct {
Latest bool
PreserveFDs uint
Privileged bool
- Streams define.AttachStreams
Tty bool
User string
WorkDir string
@@ -311,6 +310,7 @@ type ContainerRunReport struct {
// cleanup command
type ContainerCleanupOptions struct {
All bool
+ Exec string
Latest bool
Remove bool
RemoveImage bool
diff --git a/pkg/domain/entities/engine_container.go b/pkg/domain/entities/engine_container.go
index 719ac3f9e..3d5161745 100644
--- a/pkg/domain/entities/engine_container.go
+++ b/pkg/domain/entities/engine_container.go
@@ -10,7 +10,7 @@ import (
)
type ContainerEngine interface {
- AutoUpdate(ctx context.Context) (*AutoUpdateReport, []error)
+ AutoUpdate(ctx context.Context, options AutoUpdateOptions) (*AutoUpdateReport, []error)
Config(ctx context.Context) (*config.Config, error)
ContainerAttach(ctx context.Context, nameOrId string, options AttachOptions) error
ContainerCheckpoint(ctx context.Context, namesOrIds []string, options CheckpointOptions) ([]*CheckpointReport, error)
@@ -19,7 +19,8 @@ type ContainerEngine interface {
ContainerCp(ctx context.Context, source, dest string, options ContainerCpOptions) (*ContainerCpReport, error)
ContainerCreate(ctx context.Context, s *specgen.SpecGenerator) (*ContainerCreateReport, error)
ContainerDiff(ctx context.Context, nameOrId string, options DiffOptions) (*DiffReport, error)
- ContainerExec(ctx context.Context, nameOrId string, options ExecOptions) (int, error)
+ ContainerExec(ctx context.Context, nameOrId string, options ExecOptions, streams define.AttachStreams) (int, error)
+ ContainerExecDetached(ctx context.Context, nameOrID string, options ExecOptions) (string, error)
ContainerExists(ctx context.Context, nameOrId string) (*BoolReport, error)
ContainerExport(ctx context.Context, nameOrId string, options ContainerExportOptions) error
ContainerInit(ctx context.Context, namesOrIds []string, options ContainerInitOptions) ([]*ContainerInitReport, error)
diff --git a/pkg/domain/entities/engine_image.go b/pkg/domain/entities/engine_image.go
index ffa71abd6..7d7099838 100644
--- a/pkg/domain/entities/engine_image.go
+++ b/pkg/domain/entities/engine_image.go
@@ -34,4 +34,5 @@ type ImageEngine interface {
ManifestAnnotate(ctx context.Context, names []string, opts ManifestAnnotateOptions) (string, error)
ManifestRemove(ctx context.Context, names []string) (string, error)
ManifestPush(ctx context.Context, names []string, manifestPushOpts ManifestPushOptions) error
+ Sign(ctx context.Context, names []string, options SignOptions) (*SignReport, error)
}
diff --git a/pkg/domain/entities/engine_system.go b/pkg/domain/entities/engine_system.go
index e2000f5cb..a0ecfe9ea 100644
--- a/pkg/domain/entities/engine_system.go
+++ b/pkg/domain/entities/engine_system.go
@@ -9,6 +9,6 @@ import (
type SystemEngine interface {
Renumber(ctx context.Context, flags *pflag.FlagSet, config *PodmanConfig) error
Migrate(ctx context.Context, flags *pflag.FlagSet, config *PodmanConfig, options SystemMigrateOptions) error
- Reset(ctx context.Context, options SystemResetOptions) error
+ Reset(ctx context.Context) error
Shutdown(ctx context.Context)
}
diff --git a/pkg/domain/entities/images.go b/pkg/domain/entities/images.go
index e116a90b9..0f909ab37 100644
--- a/pkg/domain/entities/images.go
+++ b/pkg/domain/entities/images.go
@@ -1,7 +1,6 @@
package entities
import (
- "net/url"
"time"
"github.com/containers/image/v5/manifest"
@@ -221,15 +220,13 @@ type ImageSearchReport struct {
// Image List Options
type ImageListOptions struct {
- All bool `json:"all" schema:"all"`
- Filter []string `json:"Filter,omitempty"`
- Filters url.Values `json:"filters" schema:"filters"`
+ All bool `json:"all" schema:"all"`
+ Filter []string `json:"Filter,omitempty"`
}
type ImagePruneOptions struct {
- All bool `json:"all" schema:"all"`
- Filter []string `json:"filter" schema:"filter"`
- Filters url.Values `json:"filters" schema:"filters"`
+ All bool `json:"all" schema:"all"`
+ Filter []string `json:"filter" schema:"filter"`
}
type ImagePruneReport struct {
@@ -309,3 +306,13 @@ type SetTrustOptions struct {
PubKeysFile []string
Type string
}
+
+// SignOptions describes input options for the CLI signing
+type SignOptions struct {
+ Directory string
+ SignBy string
+ CertDir string
+}
+
+// SignReport describes the result of signing
+type SignReport struct{}
diff --git a/pkg/domain/entities/network.go b/pkg/domain/entities/network.go
index cffd40899..9beeeb042 100644
--- a/pkg/domain/entities/network.go
+++ b/pkg/domain/entities/network.go
@@ -10,6 +10,7 @@ import (
type NetworkListOptions struct {
Format string
Quiet bool
+ Filter string
}
// NetworkListReport describes the results from listing networks
@@ -19,6 +20,7 @@ type NetworkListReport struct {
// NetworkInspectOptions describes options for inspect networks
type NetworkInspectOptions struct {
+ Format string
}
// NetworkInspectReport describes the results from inspect networks
@@ -36,6 +38,7 @@ type NetworkRmReport struct {
}
// NetworkCreateOptions describes options to create a network
+// swagger:model NetworkCreateOptions
type NetworkCreateOptions struct {
DisableDNS bool
Driver string
diff --git a/pkg/domain/infra/abi/auto-update.go b/pkg/domain/infra/abi/auto-update.go
index aa20664b4..9fcc451fd 100644
--- a/pkg/domain/infra/abi/auto-update.go
+++ b/pkg/domain/infra/abi/auto-update.go
@@ -7,7 +7,11 @@ import (
"github.com/containers/libpod/pkg/domain/entities"
)
-func (ic *ContainerEngine) AutoUpdate(ctx context.Context) (*entities.AutoUpdateReport, []error) {
- units, failures := autoupdate.AutoUpdate(ic.Libpod)
+func (ic *ContainerEngine) AutoUpdate(ctx context.Context, options entities.AutoUpdateOptions) (*entities.AutoUpdateReport, []error) {
+ // Convert the entities options to the autoupdate ones. We can't use
+ // them in the entities package as low-level packages must not leak
+ // into the remote client.
+ autoOpts := autoupdate.Options{Authfile: options.Authfile}
+ units, failures := autoupdate.AutoUpdate(ic.Libpod, autoOpts)
return &entities.AutoUpdateReport{Units: units}, failures
}
diff --git a/pkg/domain/infra/abi/containers.go b/pkg/domain/infra/abi/containers.go
index 249e8147c..b4e38ca23 100644
--- a/pkg/domain/infra/abi/containers.go
+++ b/pkg/domain/infra/abi/containers.go
@@ -434,6 +434,7 @@ func (ic *ContainerEngine) ContainerCheckpoint(ctx context.Context, namesOrIds [
TCPEstablished: options.TCPEstablished,
TargetFile: options.Export,
IgnoreRootfs: options.IgnoreRootFS,
+ KeepRunning: options.LeaveRunning,
}
if options.All {
@@ -535,7 +536,22 @@ func (ic *ContainerEngine) ContainerAttach(ctx context.Context, nameOrId string,
return nil
}
-func (ic *ContainerEngine) ContainerExec(ctx context.Context, nameOrId string, options entities.ExecOptions) (int, error) {
+func makeExecConfig(options entities.ExecOptions) *libpod.ExecConfig {
+ execConfig := new(libpod.ExecConfig)
+ execConfig.Command = options.Cmd
+ execConfig.Terminal = options.Tty
+ execConfig.Privileged = options.Privileged
+ execConfig.Environment = options.Envs
+ execConfig.User = options.User
+ execConfig.WorkDir = options.WorkDir
+ execConfig.DetachKeys = &options.DetachKeys
+ execConfig.PreserveFDs = options.PreserveFDs
+ execConfig.AttachStdin = options.Interactive
+
+ return execConfig
+}
+
+func checkExecPreserveFDs(options entities.ExecOptions) (int, error) {
ec := define.ExecErrorCodeGeneric
if options.PreserveFDs > 0 {
entries, err := ioutil.ReadDir("/proc/self/fd")
@@ -558,15 +574,66 @@ func (ic *ContainerEngine) ContainerExec(ctx context.Context, nameOrId string, o
}
}
}
+ return ec, nil
+}
+
+func (ic *ContainerEngine) ContainerExec(ctx context.Context, nameOrId string, options entities.ExecOptions, streams define.AttachStreams) (int, error) {
+ ec, err := checkExecPreserveFDs(options)
+ if err != nil {
+ return ec, err
+ }
ctrs, err := getContainersByContext(false, options.Latest, []string{nameOrId}, ic.Libpod)
if err != nil {
return ec, err
}
ctr := ctrs[0]
- ec, err = terminal.ExecAttachCtr(ctx, ctr, options.Tty, options.Privileged, options.Envs, options.Cmd, options.User, options.WorkDir, &options.Streams, options.PreserveFDs, options.DetachKeys)
+
+ execConfig := makeExecConfig(options)
+
+ ec, err = terminal.ExecAttachCtr(ctx, ctr, execConfig, &streams)
return define.TranslateExecErrorToExitCode(ec, err), err
}
+func (ic *ContainerEngine) ContainerExecDetached(ctx context.Context, nameOrId string, options entities.ExecOptions) (string, error) {
+ _, err := checkExecPreserveFDs(options)
+ if err != nil {
+ return "", err
+ }
+ ctrs, err := getContainersByContext(false, options.Latest, []string{nameOrId}, ic.Libpod)
+ if err != nil {
+ return "", err
+ }
+ ctr := ctrs[0]
+
+ execConfig := makeExecConfig(options)
+
+ // Make an exit command
+ storageConfig := ic.Libpod.StorageConfig()
+ runtimeConfig, err := ic.Libpod.GetConfig()
+ if err != nil {
+ return "", errors.Wrapf(err, "error retrieving Libpod configuration to build exec exit command")
+ }
+ podmanPath, err := os.Executable()
+ if err != nil {
+ return "", errors.Wrapf(err, "error retrieving executable to build exec exit command")
+ }
+ // TODO: Add some ability to toggle syslog
+ exitCommandArgs := generate.CreateExitCommandArgs(storageConfig, runtimeConfig, podmanPath, false, true, true)
+ execConfig.ExitCommand = exitCommandArgs
+
+ // Create and start the exec session
+ id, err := ctr.ExecCreate(execConfig)
+ if err != nil {
+ return "", err
+ }
+
+ // TODO: we should try and retrieve exit code if this fails.
+ if err := ctr.ExecStart(id); err != nil {
+ return "", err
+ }
+ return id, nil
+}
+
func (ic *ContainerEngine) ContainerStart(ctx context.Context, namesOrIds []string, options entities.ContainerStartOptions) ([]*entities.ContainerStartReport, error) {
var reports []*entities.ContainerStartReport
var exitCode = define.ExecErrorCodeGeneric
@@ -835,6 +902,20 @@ func (ic *ContainerEngine) ContainerCleanup(ctx context.Context, namesOrIds []st
for _, ctr := range ctrs {
var err error
report := entities.ContainerCleanupReport{Id: ctr.ID()}
+
+ if options.Exec != "" {
+ if options.Remove {
+ if err := ctr.ExecRemove(options.Exec, false); err != nil {
+ return nil, err
+ }
+ } else {
+ if err := ctr.ExecCleanup(options.Exec); err != nil {
+ return nil, err
+ }
+ }
+ return []*entities.ContainerCleanupReport{}, nil
+ }
+
if options.Remove {
err = ic.Libpod.RemoveContainer(ctx, ctr, false, true)
if err != nil {
diff --git a/pkg/domain/infra/abi/healthcheck.go b/pkg/domain/infra/abi/healthcheck.go
index 351bf4f7e..4e925ef56 100644
--- a/pkg/domain/infra/abi/healthcheck.go
+++ b/pkg/domain/infra/abi/healthcheck.go
@@ -3,7 +3,6 @@ package abi
import (
"context"
- "github.com/containers/libpod/libpod"
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/pkg/domain/entities"
)
@@ -13,9 +12,9 @@ func (ic *ContainerEngine) HealthCheckRun(ctx context.Context, nameOrId string,
if err != nil {
return nil, err
}
- hcStatus := "unhealthy"
- if status == libpod.HealthCheckSuccess {
- hcStatus = "healthy"
+ hcStatus := define.HealthCheckUnhealthy
+ if status == define.HealthCheckSuccess {
+ hcStatus = define.HealthCheckHealthy
}
report := define.HealthCheckResults{
Status: hcStatus,
diff --git a/pkg/domain/infra/abi/images.go b/pkg/domain/infra/abi/images.go
index 7ab5131f0..6e774df8e 100644
--- a/pkg/domain/infra/abi/images.go
+++ b/pkg/domain/infra/abi/images.go
@@ -4,14 +4,22 @@ import (
"context"
"fmt"
"io"
+ "io/ioutil"
+ "net/url"
"os"
+ "path/filepath"
+ "strconv"
"strings"
+ "github.com/containers/libpod/pkg/rootless"
+
"github.com/containers/common/pkg/config"
"github.com/containers/image/v5/docker"
dockerarchive "github.com/containers/image/v5/docker/archive"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/signature"
+ "github.com/containers/image/v5/transports"
"github.com/containers/image/v5/transports/alltransports"
"github.com/containers/image/v5/types"
"github.com/containers/libpod/libpod/define"
@@ -19,6 +27,7 @@ import (
libpodImage "github.com/containers/libpod/libpod/image"
"github.com/containers/libpod/pkg/domain/entities"
domainUtils "github.com/containers/libpod/pkg/domain/utils"
+ "github.com/containers/libpod/pkg/trust"
"github.com/containers/libpod/pkg/util"
"github.com/containers/storage"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
@@ -26,6 +35,9 @@ import (
"github.com/sirupsen/logrus"
)
+// SignatureStoreDir defines default directory to store signatures
+const SignatureStoreDir = "/var/lib/containers/sigstore"
+
func (ir *ImageEngine) Exists(_ context.Context, nameOrId string) (*entities.BoolReport, error) {
_, err := ir.Libpod.ImageRuntime().NewFromLocal(nameOrId)
if err != nil && errors.Cause(err) != define.ErrNoSuchImage {
@@ -549,3 +561,145 @@ func (ir *ImageEngine) Shutdown(_ context.Context) {
_ = ir.Libpod.Shutdown(false)
})
}
+
+func (ir *ImageEngine) Sign(ctx context.Context, names []string, options entities.SignOptions) (*entities.SignReport, error) {
+ dockerRegistryOptions := image.DockerRegistryOptions{
+ DockerCertPath: options.CertDir,
+ }
+
+ mech, err := signature.NewGPGSigningMechanism()
+ if err != nil {
+ return nil, errors.Wrap(err, "error initializing GPG")
+ }
+ defer mech.Close()
+ if err := mech.SupportsSigning(); err != nil {
+ return nil, errors.Wrap(err, "signing is not supported")
+ }
+ sc := ir.Libpod.SystemContext()
+ sc.DockerCertPath = options.CertDir
+
+ systemRegistriesDirPath := trust.RegistriesDirPath(sc)
+ registryConfigs, err := trust.LoadAndMergeConfig(systemRegistriesDirPath)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error reading registry configuration")
+ }
+
+ for _, signimage := range names {
+ var sigStoreDir string
+ srcRef, err := alltransports.ParseImageName(signimage)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error parsing image name")
+ }
+ rawSource, err := srcRef.NewImageSource(ctx, sc)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error getting image source")
+ }
+ err = rawSource.Close()
+ if err != nil {
+ logrus.Errorf("unable to close new image source %q", err)
+ }
+ getManifest, _, err := rawSource.GetManifest(ctx, nil)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error getting getManifest")
+ }
+ dockerReference := rawSource.Reference().DockerReference()
+ if dockerReference == nil {
+ return nil, errors.Errorf("cannot determine canonical Docker reference for destination %s", transports.ImageName(rawSource.Reference()))
+ }
+
+ // create the signstore file
+ rtc, err := ir.Libpod.GetConfig()
+ if err != nil {
+ return nil, err
+ }
+ newImage, err := ir.Libpod.ImageRuntime().New(ctx, signimage, rtc.Engine.SignaturePolicyPath, "", os.Stderr, &dockerRegistryOptions, image.SigningOptions{SignBy: options.SignBy}, nil, util.PullImageMissing)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error pulling image %s", signimage)
+ }
+ if sigStoreDir == "" {
+ if rootless.IsRootless() {
+ sigStoreDir = filepath.Join(filepath.Dir(ir.Libpod.StorageConfig().GraphRoot), "sigstore")
+ } else {
+ registryInfo := trust.HaveMatchRegistry(rawSource.Reference().DockerReference().String(), registryConfigs)
+ if registryInfo != nil {
+ if sigStoreDir = registryInfo.SigStoreStaging; sigStoreDir == "" {
+ sigStoreDir = registryInfo.SigStore
+
+ }
+ }
+ }
+ }
+ sigStoreDir, err = isValidSigStoreDir(sigStoreDir)
+ if err != nil {
+ return nil, errors.Wrapf(err, "invalid signature storage %s", sigStoreDir)
+ }
+ repos, err := newImage.RepoDigests()
+ if err != nil {
+ return nil, errors.Wrapf(err, "error calculating repo digests for %s", signimage)
+ }
+ if len(repos) == 0 {
+ logrus.Errorf("no repodigests associated with the image %s", signimage)
+ continue
+ }
+
+ // create signature
+ newSig, err := signature.SignDockerManifest(getManifest, dockerReference.String(), mech, options.SignBy)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error creating new signature")
+ }
+
+ trimmedDigest := strings.TrimPrefix(repos[0], strings.Split(repos[0], "/")[0])
+ sigStoreDir = filepath.Join(sigStoreDir, strings.Replace(trimmedDigest, ":", "=", 1))
+ if err := os.MkdirAll(sigStoreDir, 0751); err != nil {
+ // The directory is allowed to exist
+ if !os.IsExist(err) {
+ logrus.Errorf("error creating directory %s: %s", sigStoreDir, err)
+ continue
+ }
+ }
+ sigFilename, err := getSigFilename(sigStoreDir)
+ if err != nil {
+ logrus.Errorf("error creating sigstore file: %v", err)
+ continue
+ }
+ err = ioutil.WriteFile(filepath.Join(sigStoreDir, sigFilename), newSig, 0644)
+ if err != nil {
+ logrus.Errorf("error storing signature for %s", rawSource.Reference().DockerReference().String())
+ continue
+ }
+ }
+ return nil, nil
+}
+
+func getSigFilename(sigStoreDirPath string) (string, error) {
+ sigFileSuffix := 1
+ sigFiles, err := ioutil.ReadDir(sigStoreDirPath)
+ if err != nil {
+ return "", err
+ }
+ sigFilenames := make(map[string]bool)
+ for _, file := range sigFiles {
+ sigFilenames[file.Name()] = true
+ }
+ for {
+ sigFilename := "signature-" + strconv.Itoa(sigFileSuffix)
+ if _, exists := sigFilenames[sigFilename]; !exists {
+ return sigFilename, nil
+ }
+ sigFileSuffix++
+ }
+}
+
+func isValidSigStoreDir(sigStoreDir string) (string, error) {
+ writeURIs := map[string]bool{"file": true}
+ url, err := url.Parse(sigStoreDir)
+ if err != nil {
+ return sigStoreDir, errors.Wrapf(err, "invalid directory %s", sigStoreDir)
+ }
+ _, exists := writeURIs[url.Scheme]
+ if !exists {
+ return sigStoreDir, errors.Errorf("writing to %s is not supported. Use a supported scheme", sigStoreDir)
+ }
+ sigStoreDir = url.Path
+ return sigStoreDir, nil
+}
diff --git a/pkg/domain/infra/abi/images_list.go b/pkg/domain/infra/abi/images_list.go
index c559e250c..3034e36ec 100644
--- a/pkg/domain/infra/abi/images_list.go
+++ b/pkg/domain/infra/abi/images_list.go
@@ -13,14 +13,7 @@ func (ir *ImageEngine) List(ctx context.Context, opts entities.ImageListOptions)
err error
)
- // TODO: Future work support for domain.Filters
- // filters := utils.ToLibpodFilters(opts.Filters)
-
- if len(opts.Filter) > 0 {
- images, err = ir.Libpod.ImageRuntime().GetImagesWithFilters(opts.Filter)
- } else {
- images, err = ir.Libpod.ImageRuntime().GetImages()
- }
+ images, err = ir.Libpod.ImageRuntime().GetImagesWithFilters(opts.Filter)
if err != nil {
return nil, err
}
@@ -40,9 +33,18 @@ func (ir *ImageEngine) List(ctx context.Context, opts entities.ImageListOptions)
}
}
} else {
- repoTags, _ = img.RepoTags()
- if len(repoTags) == 0 {
- continue
+ repoTags, err = img.RepoTags()
+ if err != nil {
+ return nil, err
+ }
+ if len(img.Names()) == 0 {
+ parent, err := img.IsParent(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if parent {
+ continue
+ }
}
}
diff --git a/pkg/domain/infra/abi/network.go b/pkg/domain/infra/abi/network.go
index 5c39b5374..8e3515824 100644
--- a/pkg/domain/infra/abi/network.go
+++ b/pkg/domain/infra/abi/network.go
@@ -6,7 +6,9 @@ import (
"fmt"
"io/ioutil"
"path/filepath"
+ "strings"
+ "github.com/containernetworking/cni/libcni"
cniversion "github.com/containernetworking/cni/pkg/version"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/pkg/domain/entities"
@@ -15,32 +17,32 @@ import (
"github.com/pkg/errors"
)
-func getCNIConfDir(r *libpod.Runtime) (string, error) {
- config, err := r.GetConfig()
- if err != nil {
- return "", err
- }
- configPath := config.Network.NetworkConfigDir
-
- if len(config.Network.NetworkConfigDir) < 1 {
- configPath = network.CNIConfigDir
- }
- return configPath, nil
-}
-
func (ic *ContainerEngine) NetworkList(ctx context.Context, options entities.NetworkListOptions) ([]*entities.NetworkListReport, error) {
var reports []*entities.NetworkListReport
- cniConfigPath, err := getCNIConfDir(ic.Libpod)
+
+ config, err := ic.Libpod.GetConfig()
if err != nil {
return nil, err
}
- networks, err := network.LoadCNIConfsFromDir(cniConfigPath)
+
+ networks, err := network.LoadCNIConfsFromDir(network.GetCNIConfDir(config))
if err != nil {
return nil, err
}
+ var tokens []string
+ // tokenize the networkListOptions.Filter in key=value.
+ if len(options.Filter) > 0 {
+ tokens = strings.Split(options.Filter, "=")
+ if len(tokens) != 2 {
+ return nil, fmt.Errorf("invalid filter syntax : %s", options.Filter)
+ }
+ }
+
for _, n := range networks {
- reports = append(reports, &entities.NetworkListReport{NetworkConfigList: n})
+ if ifPassesFilterTest(n, tokens) {
+ reports = append(reports, &entities.NetworkListReport{NetworkConfigList: n})
+ }
}
return reports, nil
}
@@ -49,8 +51,14 @@ func (ic *ContainerEngine) NetworkInspect(ctx context.Context, namesOrIds []stri
var (
rawCNINetworks []entities.NetworkInspectReport
)
+
+ config, err := ic.Libpod.GetConfig()
+ if err != nil {
+ return nil, err
+ }
+
for _, name := range namesOrIds {
- rawList, err := network.InspectNetwork(name)
+ rawList, err := network.InspectNetwork(config, name)
if err != nil {
return nil, err
}
@@ -61,6 +69,12 @@ func (ic *ContainerEngine) NetworkInspect(ctx context.Context, namesOrIds []stri
func (ic *ContainerEngine) NetworkRm(ctx context.Context, namesOrIds []string, options entities.NetworkRmOptions) ([]*entities.NetworkRmReport, error) {
var reports []*entities.NetworkRmReport
+
+ config, err := ic.Libpod.GetConfig()
+ if err != nil {
+ return nil, err
+ }
+
for _, name := range namesOrIds {
report := entities.NetworkRmReport{Name: name}
containers, err := ic.Libpod.GetAllContainers()
@@ -80,7 +94,7 @@ func (ic *ContainerEngine) NetworkRm(ctx context.Context, namesOrIds []string, o
}
}
}
- if err := network.RemoveNetwork(name); err != nil {
+ if err := network.RemoveNetwork(config, name); err != nil {
report.Err = err
}
reports = append(reports, &report)
@@ -117,10 +131,10 @@ func createBridge(r *libpod.Runtime, name string, options entities.NetworkCreate
// if range is provided, make sure it is "in" network
if subnet.IP != nil {
// if network is provided, does it conflict with existing CNI or live networks
- err = network.ValidateUserNetworkIsAvailable(subnet)
+ err = network.ValidateUserNetworkIsAvailable(runtimeConfig, subnet)
} else {
// if no network is provided, figure out network
- subnet, err = network.GetFreeNetwork()
+ subnet, err = network.GetFreeNetwork(runtimeConfig)
}
if err != nil {
return "", err
@@ -158,13 +172,13 @@ func createBridge(r *libpod.Runtime, name string, options entities.NetworkCreate
return "", errors.Errorf("the ip range %s does not fall within the subnet range %s", options.Range.String(), subnet.String())
}
}
- bridgeDeviceName, err := network.GetFreeDeviceName()
+ bridgeDeviceName, err := network.GetFreeDeviceName(runtimeConfig)
if err != nil {
return "", err
}
if len(name) > 0 {
- netNames, err := network.GetNetworkNamesFromFileSystem()
+ netNames, err := network.GetNetworkNamesFromFileSystem(runtimeConfig)
if err != nil {
return "", err
}
@@ -205,11 +219,7 @@ func createBridge(r *libpod.Runtime, name string, options entities.NetworkCreate
if err != nil {
return "", err
}
- cniConfigPath, err := getCNIConfDir(r)
- if err != nil {
- return "", err
- }
- cniPathName := filepath.Join(cniConfigPath, fmt.Sprintf("%s.conflist", name))
+ cniPathName := filepath.Join(network.GetCNIConfDir(runtimeConfig), fmt.Sprintf("%s.conflist", name))
err = ioutil.WriteFile(cniPathName, b, 0644)
return cniPathName, err
}
@@ -222,12 +232,18 @@ func createMacVLAN(r *libpod.Runtime, name string, options entities.NetworkCreat
if err != nil {
return "", err
}
+
+ config, err := r.GetConfig()
+ if err != nil {
+ return "", err
+ }
+
// Make sure the host-device exists
if !util.StringInSlice(options.MacVLAN, liveNetNames) {
return "", errors.Errorf("failed to find network interface %q", options.MacVLAN)
}
if len(name) > 0 {
- netNames, err := network.GetNetworkNamesFromFileSystem()
+ netNames, err := network.GetNetworkNamesFromFileSystem(config)
if err != nil {
return "", err
}
@@ -235,7 +251,7 @@ func createMacVLAN(r *libpod.Runtime, name string, options entities.NetworkCreat
return "", errors.Errorf("the network name %s is already used", name)
}
} else {
- name, err = network.GetFreeDeviceName()
+ name, err = network.GetFreeDeviceName(config)
if err != nil {
return "", err
}
@@ -248,11 +264,29 @@ func createMacVLAN(r *libpod.Runtime, name string, options entities.NetworkCreat
if err != nil {
return "", err
}
- cniConfigPath, err := getCNIConfDir(r)
- if err != nil {
- return "", err
- }
- cniPathName := filepath.Join(cniConfigPath, fmt.Sprintf("%s.conflist", name))
+ cniPathName := filepath.Join(network.GetCNIConfDir(config), fmt.Sprintf("%s.conflist", name))
err = ioutil.WriteFile(cniPathName, b, 0644)
return cniPathName, err
}
+
+func ifPassesFilterTest(netconf *libcni.NetworkConfigList, filter []string) bool {
+ result := false
+ if len(filter) == 0 {
+ // No filter, so pass
+ return true
+ }
+ switch strings.ToLower(filter[0]) {
+ case "name":
+ if filter[1] == netconf.Name {
+ result = true
+ }
+ case "plugin":
+ plugins := network.GetCNIPlugins(netconf)
+ if strings.Contains(plugins, filter[1]) {
+ result = true
+ }
+ default:
+ result = false
+ }
+ return result
+}
diff --git a/pkg/domain/infra/abi/system.go b/pkg/domain/infra/abi/system.go
index d701d65de..af2ec5f7b 100644
--- a/pkg/domain/infra/abi/system.go
+++ b/pkg/domain/infra/abi/system.go
@@ -375,7 +375,7 @@ func sizeOfPath(path string) (int64, error) {
return size, err
}
-func (se *SystemEngine) Reset(ctx context.Context, options entities.SystemResetOptions) error {
+func (se *SystemEngine) Reset(ctx context.Context) error {
return se.Libpod.Reset(ctx)
}
diff --git a/pkg/domain/infra/abi/terminal/terminal_linux.go b/pkg/domain/infra/abi/terminal/terminal_linux.go
index 15701342f..8d9cdde03 100644
--- a/pkg/domain/infra/abi/terminal/terminal_linux.go
+++ b/pkg/domain/infra/abi/terminal/terminal_linux.go
@@ -15,13 +15,13 @@ import (
)
// ExecAttachCtr execs and attaches to a container
-func ExecAttachCtr(ctx context.Context, ctr *libpod.Container, tty, privileged bool, env map[string]string, cmd []string, user, workDir string, streams *define.AttachStreams, preserveFDs uint, detachKeys string) (int, error) {
+func ExecAttachCtr(ctx context.Context, ctr *libpod.Container, execConfig *libpod.ExecConfig, streams *define.AttachStreams) (int, error) {
resize := make(chan remotecommand.TerminalSize)
haveTerminal := terminal.IsTerminal(int(os.Stdin.Fd()))
// Check if we are attached to a terminal. If we are, generate resize
// events, and set the terminal to raw mode
- if haveTerminal && tty {
+ if haveTerminal && execConfig.Terminal {
cancel, oldTermState, err := handleTerminalAttach(ctx, resize)
if err != nil {
return -1, err
@@ -34,16 +34,6 @@ func ExecAttachCtr(ctx context.Context, ctr *libpod.Container, tty, privileged b
}()
}
- execConfig := new(libpod.ExecConfig)
- execConfig.Command = cmd
- execConfig.Terminal = tty
- execConfig.Privileged = privileged
- execConfig.Environment = env
- execConfig.User = user
- execConfig.WorkDir = workDir
- execConfig.DetachKeys = &detachKeys
- execConfig.PreserveFDs = preserveFDs
-
return ctr.Exec(execConfig, streams, resize)
}
diff --git a/pkg/domain/infra/tunnel/auto-update.go b/pkg/domain/infra/tunnel/auto-update.go
index fac033050..5c2dd360d 100644
--- a/pkg/domain/infra/tunnel/auto-update.go
+++ b/pkg/domain/infra/tunnel/auto-update.go
@@ -7,6 +7,6 @@ import (
"github.com/pkg/errors"
)
-func (ic *ContainerEngine) AutoUpdate(ctx context.Context) (*entities.AutoUpdateReport, []error) {
+func (ic *ContainerEngine) AutoUpdate(ctx context.Context, options entities.AutoUpdateOptions) (*entities.AutoUpdateReport, []error) {
return nil, []error{errors.New("not implemented")}
}
diff --git a/pkg/domain/infra/tunnel/containers.go b/pkg/domain/infra/tunnel/containers.go
index 49a3069d6..30c4a8359 100644
--- a/pkg/domain/infra/tunnel/containers.go
+++ b/pkg/domain/infra/tunnel/containers.go
@@ -4,14 +4,17 @@ import (
"context"
"io"
"os"
+ "strings"
"github.com/containers/common/pkg/config"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/libpod/libpod/define"
+ "github.com/containers/libpod/pkg/bindings"
"github.com/containers/libpod/pkg/bindings/containers"
"github.com/containers/libpod/pkg/domain/entities"
"github.com/containers/libpod/pkg/specgen"
"github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
)
func (ic *ContainerEngine) ContainerRunlabel(ctx context.Context, label string, image string, args []string, options entities.ContainerRunlabelOptions) error {
@@ -84,10 +87,25 @@ func (ic *ContainerEngine) ContainerStop(ctx context.Context, namesOrIds []strin
}
for _, c := range ctrs {
report := entities.StopReport{Id: c.ID}
- report.Err = containers.Stop(ic.ClientCxt, c.ID, &options.Timeout)
- // TODO we need to associate errors returned by http with common
- // define.errors so that we can equity tests. this will allow output
- // to be the same as the native client
+ if err = containers.Stop(ic.ClientCxt, c.ID, &options.Timeout); err != nil {
+ // These first two are considered non-fatal under the right conditions
+ if errors.Cause(err).Error() == define.ErrCtrStopped.Error() {
+ logrus.Debugf("Container %s is already stopped", c.ID)
+ reports = append(reports, &report)
+ continue
+ } else if options.All && errors.Cause(err).Error() == define.ErrCtrStateInvalid.Error() {
+ logrus.Debugf("Container %s is not running, could not stop", c.ID)
+ reports = append(reports, &report)
+ continue
+ }
+
+ // TODO we need to associate errors returned by http with common
+ // define.errors so that we can equity tests. this will allow output
+ // to be the same as the native client
+ report.Err = err
+ reports = append(reports, &report)
+ continue
+ }
reports = append(reports, &report)
}
return reports, nil
@@ -267,7 +285,7 @@ func (ic *ContainerEngine) ContainerCheckpoint(ctx context.Context, namesOrIds [
}
}
for _, c := range ctrs {
- report, err := containers.Checkpoint(ic.ClientCxt, c.ID, &options.Keep, &options.LeaveRuninng, &options.TCPEstablished, &options.IgnoreRootFS, &options.Export)
+ report, err := containers.Checkpoint(ic.ClientCxt, c.ID, &options.Keep, &options.LeaveRunning, &options.TCPEstablished, &options.IgnoreRootFS, &options.Export)
if err != nil {
reports = append(reports, &entities.CheckpointReport{Id: c.ID, Err: err})
}
@@ -324,15 +342,59 @@ func (ic *ContainerEngine) ContainerLogs(ctx context.Context, containers []strin
}
func (ic *ContainerEngine) ContainerAttach(ctx context.Context, nameOrId string, options entities.AttachOptions) error {
- return errors.New("not implemented")
+ return containers.Attach(ic.ClientCxt, nameOrId, &options.DetachKeys, nil, bindings.PTrue, options.Stdin, options.Stdout, options.Stderr, nil)
}
-func (ic *ContainerEngine) ContainerExec(ctx context.Context, nameOrId string, options entities.ExecOptions) (int, error) {
+func (ic *ContainerEngine) ContainerExec(ctx context.Context, nameOrId string, options entities.ExecOptions, streams define.AttachStreams) (int, error) {
return 125, errors.New("not implemented")
}
+func (ic *ContainerEngine) ContainerExecDetached(ctx context.Context, nameOrID string, options entities.ExecOptions) (string, error) {
+ return "", errors.New("not implemented")
+}
+
+func startAndAttach(ic *ContainerEngine, name string, detachKeys *string, input, output, errput *os.File) error { //nolint
+ attachErr := make(chan error)
+ attachReady := make(chan bool)
+ go func() {
+ err := containers.Attach(ic.ClientCxt, name, detachKeys, bindings.PFalse, bindings.PTrue, input, output, errput, attachReady)
+ attachErr <- err
+ }()
+ // Wait for the attach to actually happen before starting
+ // the container.
+ <-attachReady
+ if err := containers.Start(ic.ClientCxt, name, detachKeys); err != nil {
+ return err
+ }
+ return <-attachErr
+}
+
func (ic *ContainerEngine) ContainerStart(ctx context.Context, namesOrIds []string, options entities.ContainerStartOptions) ([]*entities.ContainerStartReport, error) {
- return nil, errors.New("not implemented")
+ var reports []*entities.ContainerStartReport
+ for _, name := range namesOrIds {
+ report := entities.ContainerStartReport{
+ Id: name,
+ RawInput: name,
+ ExitCode: 125,
+ }
+ if options.Attach {
+ report.Err = startAndAttach(ic, name, &options.DetachKeys, options.Stdin, options.Stdout, options.Stderr)
+ if report.Err == nil {
+ exitCode, err := containers.Wait(ic.ClientCxt, name, nil)
+ if err == nil {
+ report.ExitCode = int(exitCode)
+ }
+ } else {
+ report.ExitCode = define.ExitCode(report.Err)
+ }
+ reports = append(reports, &report)
+ return reports, nil
+ }
+ report.Err = containers.Start(ic.ClientCxt, name, &options.DetachKeys)
+ report.ExitCode = define.ExitCode(report.Err)
+ reports = append(reports, &report)
+ }
+ return reports, nil
}
func (ic *ContainerEngine) ContainerList(ctx context.Context, options entities.ContainerListOptions) ([]entities.ListContainer, error) {
@@ -340,7 +402,30 @@ func (ic *ContainerEngine) ContainerList(ctx context.Context, options entities.C
}
func (ic *ContainerEngine) ContainerRun(ctx context.Context, opts entities.ContainerRunOptions) (*entities.ContainerRunReport, error) {
- return nil, errors.New("not implemented")
+ if opts.Rm {
+ logrus.Info("the remote client does not support --rm yet")
+ }
+ con, err := containers.CreateWithSpec(ic.ClientCxt, opts.Spec)
+ if err != nil {
+ return nil, err
+ }
+ report := entities.ContainerRunReport{Id: con.ID}
+ // Attach
+ if !opts.Detach {
+ err = startAndAttach(ic, con.ID, &opts.DetachKeys, opts.InputStream, opts.OutputStream, opts.ErrorStream)
+ if err == nil {
+ exitCode, err := containers.Wait(ic.ClientCxt, con.ID, nil)
+ if err == nil {
+ report.ExitCode = int(exitCode)
+ }
+ }
+ } else {
+ err = containers.Start(ic.ClientCxt, con.ID, nil)
+ }
+ if err != nil {
+ report.ExitCode = define.ExitCode(err)
+ }
+ return &report, err
}
func (ic *ContainerEngine) ContainerDiff(ctx context.Context, nameOrId string, _ entities.DiffOptions) (*entities.DiffReport, error) {
@@ -360,6 +445,11 @@ func (ic *ContainerEngine) ContainerInit(ctx context.Context, namesOrIds []strin
}
for _, ctr := range ctrs {
err := containers.ContainerInit(ic.ClientCxt, ctr.ID)
+ // When using all, it is NOT considered an error if a container
+ // has already been init'd.
+ if err != nil && options.All && strings.Contains(errors.Cause(err).Error(), define.ErrCtrStateInvalid.Error()) {
+ err = nil
+ }
reports = append(reports, &entities.ContainerInitReport{
Err: err,
Id: ctr.ID,
@@ -381,7 +471,29 @@ func (ic *ContainerEngine) Config(_ context.Context) (*config.Config, error) {
}
func (ic *ContainerEngine) ContainerPort(ctx context.Context, nameOrId string, options entities.ContainerPortOptions) ([]*entities.ContainerPortReport, error) {
- return nil, errors.New("not implemented")
+ var (
+ reports []*entities.ContainerPortReport
+ namesOrIds []string
+ )
+ if len(nameOrId) > 0 {
+ namesOrIds = append(namesOrIds, nameOrId)
+ }
+ ctrs, err := getContainersByContext(ic.ClientCxt, options.All, namesOrIds)
+ if err != nil {
+ return nil, err
+ }
+ for _, con := range ctrs {
+ if con.State != define.ContainerStateRunning.String() {
+ continue
+ }
+ if len(con.Ports) > 0 {
+ reports = append(reports, &entities.ContainerPortReport{
+ Id: con.ID,
+ Ports: con.Ports,
+ })
+ }
+ }
+ return reports, nil
}
func (ic *ContainerEngine) ContainerCp(ctx context.Context, source, dest string, options entities.ContainerCpOptions) (*entities.ContainerCpReport, error) {
diff --git a/pkg/domain/infra/tunnel/helpers.go b/pkg/domain/infra/tunnel/helpers.go
index 682d60d6a..862c7a5d6 100644
--- a/pkg/domain/infra/tunnel/helpers.go
+++ b/pkg/domain/infra/tunnel/helpers.go
@@ -20,7 +20,7 @@ func getContainersByContext(contextWithConnection context.Context, all bool, nam
if all && len(namesOrIds) > 0 {
return nil, errors.New("cannot lookup containers and all")
}
- c, err := containers.List(contextWithConnection, nil, &bindings.PTrue, nil, nil, nil, &bindings.PTrue)
+ c, err := containers.List(contextWithConnection, nil, bindings.PTrue, nil, nil, nil, bindings.PTrue)
if err != nil {
return nil, err
}
@@ -37,7 +37,7 @@ func getContainersByContext(contextWithConnection context.Context, all bool, nam
}
}
if !found {
- return nil, errors.Errorf("unable to find container %q", id)
+ return nil, errors.Wrapf(define.ErrNoSuchCtr, "unable to find container %q", id)
}
}
return cons, nil
diff --git a/pkg/domain/infra/tunnel/images.go b/pkg/domain/infra/tunnel/images.go
index 00893194c..3d5626c45 100644
--- a/pkg/domain/infra/tunnel/images.go
+++ b/pkg/domain/infra/tunnel/images.go
@@ -4,6 +4,7 @@ import (
"context"
"io/ioutil"
"os"
+ "strings"
"github.com/containers/common/pkg/config"
"github.com/containers/image/v5/docker/reference"
@@ -25,8 +26,13 @@ func (ir *ImageEngine) Remove(ctx context.Context, imagesArg []string, opts enti
}
func (ir *ImageEngine) List(ctx context.Context, opts entities.ImageListOptions) ([]*entities.ImageSummary, error) {
- images, err := images.List(ir.ClientCxt, &opts.All, opts.Filters)
+ filters := make(map[string][]string, len(opts.Filter))
+ for _, filter := range opts.Filter {
+ f := strings.Split(filter, "=")
+ filters[f[0]] = f[1:]
+ }
+ images, err := images.List(ir.ClientCxt, &opts.All, filters)
if err != nil {
return nil, err
}
@@ -61,7 +67,13 @@ func (ir *ImageEngine) History(ctx context.Context, nameOrId string, opts entiti
}
func (ir *ImageEngine) Prune(ctx context.Context, opts entities.ImagePruneOptions) (*entities.ImagePruneReport, error) {
- results, err := images.Prune(ir.ClientCxt, &opts.All, opts.Filters)
+ filters := make(map[string][]string, len(opts.Filter))
+ for _, filter := range opts.Filter {
+ f := strings.Split(filter, "=")
+ filters[f[0]] = f[1:]
+ }
+
+ results, err := images.Prune(ir.ClientCxt, &opts.All, filters)
if err != nil {
return nil, err
}
@@ -112,7 +124,7 @@ func (ir *ImageEngine) Tag(ctx context.Context, nameOrId string, tags []string,
func (ir *ImageEngine) Untag(ctx context.Context, nameOrId string, tags []string, options entities.ImageUntagOptions) error {
// Remove all tags if none are provided
if len(tags) == 0 {
- newImage, err := images.GetImage(ir.ClientCxt, nameOrId, &bindings.PFalse)
+ newImage, err := images.GetImage(ir.ClientCxt, nameOrId, bindings.PFalse)
if err != nil {
return err
}
@@ -190,7 +202,6 @@ func (ir *ImageEngine) Save(ctx context.Context, nameOrId string, tags []string,
f *os.File
err error
)
-
switch options.Format {
case "oci-dir", "docker-dir":
f, err = ioutil.TempFile("", "podman_save")
@@ -258,9 +269,13 @@ func (ir *ImageEngine) Build(ctx context.Context, containerFiles []string, opts
}
func (ir *ImageEngine) Tree(ctx context.Context, nameOrId string, opts entities.ImageTreeOptions) (*entities.ImageTreeReport, error) {
- return nil, errors.New("not implemented yet")
+ return images.Tree(ir.ClientCxt, nameOrId, &opts.WhatRequires)
}
// Shutdown Libpod engine
func (ir *ImageEngine) Shutdown(_ context.Context) {
}
+
+func (ir *ImageEngine) Sign(ctx context.Context, names []string, options entities.SignOptions) (*entities.SignReport, error) {
+ return nil, errors.New("not implemented yet")
+}
diff --git a/pkg/domain/infra/tunnel/manifest.go b/pkg/domain/infra/tunnel/manifest.go
index 9c1f5349a..beac378fe 100644
--- a/pkg/domain/infra/tunnel/manifest.go
+++ b/pkg/domain/infra/tunnel/manifest.go
@@ -57,46 +57,21 @@ func (ir *ImageEngine) ManifestAdd(ctx context.Context, opts entities.ManifestAd
}
manifestAddOpts.Annotation = annotations
}
- listID, err := manifests.Add(ctx, opts.Images[1], manifestAddOpts)
+ listID, err := manifests.Add(ir.ClientCxt, opts.Images[1], manifestAddOpts)
if err != nil {
return listID, errors.Wrapf(err, "error adding to manifest list %s", opts.Images[1])
}
return listID, nil
}
-// FIXME There is no endpoint for annotate and therefor this code is currently invalid
// ManifestAnnotate updates an entry of the manifest list
func (ir *ImageEngine) ManifestAnnotate(ctx context.Context, names []string, opts entities.ManifestAnnotateOptions) (string, error) {
return "", errors.New("not implemented")
- // manifestAnnotateOpts := image.ManifestAnnotateOpts{
- // Arch: opts.Arch,
- // Features: opts.Features,
- // OS: opts.OS,
- // OSFeatures: opts.OSFeatures,
- // OSVersion: opts.OSVersion,
- // Variant: opts.Variant,
- // }
- // if len(opts.Annotation) > 0 {
- // annotations := make(map[string]string)
- // for _, annotationSpec := range opts.Annotation {
- // spec := strings.SplitN(annotationSpec, "=", 2)
- // if len(spec) != 2 {
- // return "", errors.Errorf("no value given for annotation %q", spec[0])
- // }
- // annotations[spec[0]] = spec[1]
- // }
- // manifestAnnotateOpts.Annotation = annotations
- // }
- // updatedListID, err := manifests.Annotate(ctx, names[0], names[1], manifestAnnotateOpts)
- // if err != nil {
- // return updatedListID, errors.Wrapf(err, "error annotating %s of manifest list %s", names[1], names[0])
- // }
- // return fmt.Sprintf("%s :%s", updatedListID, names[1]), nil
}
// ManifestRemove removes the digest from manifest list
func (ir *ImageEngine) ManifestRemove(ctx context.Context, names []string) (string, error) {
- updatedListID, err := manifests.Remove(ctx, names[0], names[1])
+ updatedListID, err := manifests.Remove(ir.ClientCxt, names[0], names[1])
if err != nil {
return updatedListID, errors.Wrapf(err, "error removing from manifest %s", names[0])
}
@@ -105,6 +80,6 @@ func (ir *ImageEngine) ManifestRemove(ctx context.Context, names []string) (stri
// ManifestPush pushes a manifest list or image index to the destination
func (ir *ImageEngine) ManifestPush(ctx context.Context, names []string, opts entities.ManifestPushOptions) error {
- _, err := manifests.Push(ctx, names[0], &names[1], &opts.All)
+ _, err := manifests.Push(ir.ClientCxt, names[0], &names[1], &opts.All)
return err
}
diff --git a/pkg/domain/infra/tunnel/network.go b/pkg/domain/infra/tunnel/network.go
index 4ff72dcfc..7725d8257 100644
--- a/pkg/domain/infra/tunnel/network.go
+++ b/pkg/domain/infra/tunnel/network.go
@@ -2,22 +2,39 @@ package tunnel
import (
"context"
- "errors"
+ "github.com/containers/libpod/pkg/bindings/network"
"github.com/containers/libpod/pkg/domain/entities"
)
func (ic *ContainerEngine) NetworkList(ctx context.Context, options entities.NetworkListOptions) ([]*entities.NetworkListReport, error) {
- return nil, errors.New("not implemented")
+ return network.List(ic.ClientCxt)
}
func (ic *ContainerEngine) NetworkInspect(ctx context.Context, namesOrIds []string, options entities.NetworkInspectOptions) ([]entities.NetworkInspectReport, error) {
- return nil, errors.New("not implemented")
+ var reports []entities.NetworkInspectReport
+ for _, name := range namesOrIds {
+ report, err := network.Inspect(ic.ClientCxt, name)
+ if err != nil {
+ return nil, err
+ }
+ reports = append(reports, report...)
+ }
+ return reports, nil
}
+
func (ic *ContainerEngine) NetworkRm(ctx context.Context, namesOrIds []string, options entities.NetworkRmOptions) ([]*entities.NetworkRmReport, error) {
- return nil, errors.New("not implemented")
+ var reports []*entities.NetworkRmReport
+ for _, name := range namesOrIds {
+ report, err := network.Remove(ic.ClientCxt, name, &options.Force)
+ if err != nil {
+ report[0].Err = err
+ }
+ reports = append(reports, report...)
+ }
+ return reports, nil
}
func (ic *ContainerEngine) NetworkCreate(ctx context.Context, name string, options entities.NetworkCreateOptions) (*entities.NetworkCreateReport, error) {
- return nil, errors.New("not implemented")
+ return network.Create(ic.ClientCxt, options, &name)
}
diff --git a/pkg/domain/infra/tunnel/runtime.go b/pkg/domain/infra/tunnel/runtime.go
index c111f99e9..357e2c390 100644
--- a/pkg/domain/infra/tunnel/runtime.go
+++ b/pkg/domain/infra/tunnel/runtime.go
@@ -13,3 +13,8 @@ type ImageEngine struct {
type ContainerEngine struct {
ClientCxt context.Context
}
+
+// Container-related runtime using an ssh-tunnel to utilize Podman service
+type SystemEngine struct {
+ ClientCxt context.Context
+}
diff --git a/pkg/domain/infra/tunnel/system.go b/pkg/domain/infra/tunnel/system.go
index dafada805..829af31f6 100644
--- a/pkg/domain/infra/tunnel/system.go
+++ b/pkg/domain/infra/tunnel/system.go
@@ -27,8 +27,13 @@ func (ic *ContainerEngine) SystemPrune(ctx context.Context, options entities.Sys
return system.Prune(ic.ClientCxt, &options.All, &options.Volume)
}
+// Reset removes all storage
+func (ic *SystemEngine) Reset(ctx context.Context) error {
+ return system.Reset(ic.ClientCxt)
+}
+
func (ic *ContainerEngine) SystemDf(ctx context.Context, options entities.SystemDfOptions) (*entities.SystemDfReport, error) {
- panic(errors.New("system df is not supported on remote clients"))
+ return system.DiskUsage(ic.ClientCxt)
}
func (ic *ContainerEngine) Unshare(ctx context.Context, args []string) error {
diff --git a/pkg/network/devices.go b/pkg/network/devices.go
index 78e1a5aa5..8eac32142 100644
--- a/pkg/network/devices.go
+++ b/pkg/network/devices.go
@@ -4,6 +4,7 @@ import (
"fmt"
"os/exec"
+ "github.com/containers/common/pkg/config"
"github.com/containers/libpod/pkg/util"
"github.com/containers/libpod/utils"
"github.com/sirupsen/logrus"
@@ -11,12 +12,12 @@ import (
// GetFreeDeviceName returns a device name that is unused; used when no network
// name is provided by user
-func GetFreeDeviceName() (string, error) {
+func GetFreeDeviceName(config *config.Config) (string, error) {
var (
deviceNum uint
deviceName string
)
- networkNames, err := GetNetworkNamesFromFileSystem()
+ networkNames, err := GetNetworkNamesFromFileSystem(config)
if err != nil {
return "", err
}
@@ -24,7 +25,7 @@ func GetFreeDeviceName() (string, error) {
if err != nil {
return "", err
}
- bridgeNames, err := GetBridgeNamesFromFileSystem()
+ bridgeNames, err := GetBridgeNamesFromFileSystem(config)
if err != nil {
return "", err
}
diff --git a/pkg/network/files.go b/pkg/network/files.go
index 116189c43..81c0e1a28 100644
--- a/pkg/network/files.go
+++ b/pkg/network/files.go
@@ -9,9 +9,17 @@ import (
"github.com/containernetworking/cni/libcni"
"github.com/containernetworking/plugins/plugins/ipam/host-local/backend/allocator"
+ "github.com/containers/common/pkg/config"
"github.com/pkg/errors"
)
+func GetCNIConfDir(config *config.Config) string {
+ if len(config.Network.NetworkConfigDir) < 1 {
+ return CNIConfigDir
+ }
+ return config.Network.NetworkConfigDir
+}
+
// LoadCNIConfsFromDir loads all the CNI configurations from a dir
func LoadCNIConfsFromDir(dir string) ([]*libcni.NetworkConfigList, error) {
var configs []*libcni.NetworkConfigList
@@ -33,8 +41,8 @@ func LoadCNIConfsFromDir(dir string) ([]*libcni.NetworkConfigList, error) {
// GetCNIConfigPathByName finds a CNI network by name and
// returns its configuration file path
-func GetCNIConfigPathByName(name string) (string, error) {
- files, err := libcni.ConfFiles(CNIConfigDir, []string{".conflist"})
+func GetCNIConfigPathByName(config *config.Config, name string) (string, error) {
+ files, err := libcni.ConfFiles(GetCNIConfDir(config), []string{".conflist"})
if err != nil {
return "", err
}
@@ -52,8 +60,8 @@ func GetCNIConfigPathByName(name string) (string, error) {
// ReadRawCNIConfByName reads the raw CNI configuration for a CNI
// network by name
-func ReadRawCNIConfByName(name string) ([]byte, error) {
- confFile, err := GetCNIConfigPathByName(name)
+func ReadRawCNIConfByName(config *config.Config, name string) ([]byte, error) {
+ confFile, err := GetCNIConfigPathByName(config, name)
if err != nil {
return nil, err
}
@@ -73,9 +81,10 @@ func GetCNIPlugins(list *libcni.NetworkConfigList) string {
// GetNetworksFromFilesystem gets all the networks from the cni configuration
// files
-func GetNetworksFromFilesystem() ([]*allocator.Net, error) {
+func GetNetworksFromFilesystem(config *config.Config) ([]*allocator.Net, error) {
var cniNetworks []*allocator.Net
- networks, err := LoadCNIConfsFromDir(CNIConfigDir)
+
+ networks, err := LoadCNIConfsFromDir(GetCNIConfDir(config))
if err != nil {
return nil, err
}
@@ -96,9 +105,10 @@ func GetNetworksFromFilesystem() ([]*allocator.Net, error) {
// GetNetworkNamesFromFileSystem gets all the names from the cni network
// configuration files
-func GetNetworkNamesFromFileSystem() ([]string, error) {
+func GetNetworkNamesFromFileSystem(config *config.Config) ([]string, error) {
var networkNames []string
- networks, err := LoadCNIConfsFromDir(CNIConfigDir)
+
+ networks, err := LoadCNIConfsFromDir(GetCNIConfDir(config))
if err != nil {
return nil, err
}
@@ -133,9 +143,10 @@ func GetInterfaceNameFromConfig(path string) (string, error) {
// GetBridgeNamesFromFileSystem is a convenience function to get all the bridge
// names from the configured networks
-func GetBridgeNamesFromFileSystem() ([]string, error) {
+func GetBridgeNamesFromFileSystem(config *config.Config) ([]string, error) {
var bridgeNames []string
- networks, err := LoadCNIConfsFromDir(CNIConfigDir)
+
+ networks, err := LoadCNIConfsFromDir(GetCNIConfDir(config))
if err != nil {
return nil, err
}
diff --git a/pkg/network/network.go b/pkg/network/network.go
index bb6f13579..5e9062019 100644
--- a/pkg/network/network.go
+++ b/pkg/network/network.go
@@ -7,6 +7,7 @@ import (
"github.com/containernetworking/cni/pkg/types"
"github.com/containernetworking/plugins/plugins/ipam/host-local/backend/allocator"
+ "github.com/containers/common/pkg/config"
"github.com/containers/libpod/pkg/util"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -56,8 +57,8 @@ func GetLiveNetworkNames() ([]string, error) {
// GetFreeNetwork looks for a free network according to existing cni configuration
// files and network interfaces.
-func GetFreeNetwork() (*net.IPNet, error) {
- networks, err := GetNetworksFromFilesystem()
+func GetFreeNetwork(config *config.Config) (*net.IPNet, error) {
+ networks, err := GetNetworksFromFilesystem(config)
if err != nil {
return nil, err
}
@@ -131,8 +132,8 @@ func networkIntersect(n1, n2 *net.IPNet) bool {
// ValidateUserNetworkIsAvailable returns via an error if a network is available
// to be used
-func ValidateUserNetworkIsAvailable(userNet *net.IPNet) error {
- networks, err := GetNetworksFromFilesystem()
+func ValidateUserNetworkIsAvailable(config *config.Config, userNet *net.IPNet) error {
+ networks, err := GetNetworksFromFilesystem(config)
if err != nil {
return err
}
@@ -153,8 +154,8 @@ func ValidateUserNetworkIsAvailable(userNet *net.IPNet) error {
// RemoveNetwork removes a given network by name. If the network has container associated with it, that
// must be handled outside the context of this.
-func RemoveNetwork(name string) error {
- cniPath, err := GetCNIConfigPathByName(name)
+func RemoveNetwork(config *config.Config, name string) error {
+ cniPath, err := GetCNIConfigPathByName(config, name)
if err != nil {
return err
}
@@ -181,8 +182,8 @@ func RemoveNetwork(name string) error {
}
// InspectNetwork reads a CNI config and returns its configuration
-func InspectNetwork(name string) (map[string]interface{}, error) {
- b, err := ReadRawCNIConfByName(name)
+func InspectNetwork(config *config.Config, name string) (map[string]interface{}, error) {
+ b, err := ReadRawCNIConfByName(config, name)
if err != nil {
return nil, err
}
diff --git a/pkg/specgen/generate/container_create.go b/pkg/specgen/generate/container_create.go
index f3aaf96bf..ffd7fd4dd 100644
--- a/pkg/specgen/generate/container_create.go
+++ b/pkg/specgen/generate/container_create.go
@@ -111,7 +111,8 @@ func MakeContainer(ctx context.Context, rt *libpod.Runtime, s *specgen.SpecGener
if err != nil {
return nil, err
}
- options = append(options, createExitCommandOption(s, rt.StorageConfig(), rtc, podmanPath))
+ // TODO: Enable syslog support - we'll need to put this in SpecGen.
+ options = append(options, libpod.WithExitCommand(CreateExitCommandArgs(rt.StorageConfig(), rtc, podmanPath, false, s.Remove, false)))
runtimeSpec, err := SpecGenToOCI(ctx, s, rt, rtc, newImage, finalMounts)
if err != nil {
@@ -228,7 +229,7 @@ func createContainerOptions(ctx context.Context, rt *libpod.Runtime, s *specgen.
return options, nil
}
-func createExitCommandOption(s *specgen.SpecGenerator, storageConfig storage.StoreOptions, config *config.Config, podmanPath string) libpod.CtrCreateOption {
+func CreateExitCommandArgs(storageConfig storage.StoreOptions, config *config.Config, podmanPath string, syslog, rm bool, exec bool) []string {
// We need a cleanup process for containers in the current model.
// But we can't assume that the caller is Podman - it could be another
// user of the API.
@@ -255,14 +256,18 @@ func createExitCommandOption(s *specgen.SpecGenerator, storageConfig storage.Sto
command = append(command, []string{"--events-backend", config.Engine.EventsLogger}...)
}
- // TODO Mheon wants to leave this for now
- //if s.sys {
- // command = append(command, "--syslog", "true")
- //}
+ if syslog {
+ command = append(command, "--syslog", "true")
+ }
command = append(command, []string{"container", "cleanup"}...)
- if s.Remove {
+ if rm {
command = append(command, "--rm")
}
- return libpod.WithExitCommand(command)
+
+ if exec {
+ command = append(command, "--exec")
+ }
+
+ return command
}
diff --git a/pkg/specgen/namespaces.go b/pkg/specgen/namespaces.go
index 11dee1986..da1f8e8fc 100644
--- a/pkg/specgen/namespaces.go
+++ b/pkg/specgen/namespaces.go
@@ -40,6 +40,9 @@ const (
KeepID NamespaceMode = "keep-id"
// KeepId indicates to automatically create a user namespace
Auto NamespaceMode = "auto"
+ // DefaultKernelNamespaces is a comma-separated list of default kernel
+ // namespaces.
+ DefaultKernelNamespaces = "cgroup,ipc,net,uts"
)
// Namespace describes the namespace
diff --git a/pkg/util/mountOpts_linux.go b/pkg/util/mountOpts_linux.go
index 3eac4dd25..bc7c675f3 100644
--- a/pkg/util/mountOpts_linux.go
+++ b/pkg/util/mountOpts_linux.go
@@ -7,7 +7,7 @@ import (
)
func getDefaultMountOptions(path string) (defaultMountOptions, error) {
- opts := defaultMountOptions{true, true, true}
+ opts := defaultMountOptions{false, true, true}
if path == "" {
return opts, nil
}
diff --git a/pkg/varlinkapi/containers.go b/pkg/varlinkapi/containers.go
index 258cb8652..291353cad 100644
--- a/pkg/varlinkapi/containers.go
+++ b/pkg/varlinkapi/containers.go
@@ -901,12 +901,12 @@ func (i *VarlinkAPI) ExecContainer(call iopodman.VarlinkCall, opts iopodman.Exec
// HealthCheckRun executes defined container's healthcheck command and returns the container's health status.
func (i *VarlinkAPI) HealthCheckRun(call iopodman.VarlinkCall, nameOrID string) error {
hcStatus, err := i.Runtime.HealthCheck(nameOrID)
- if err != nil && hcStatus != libpod.HealthCheckFailure {
+ if err != nil && hcStatus != define.HealthCheckFailure {
return call.ReplyErrorOccurred(err.Error())
}
- status := libpod.HealthCheckUnhealthy
- if hcStatus == libpod.HealthCheckSuccess {
- status = libpod.HealthCheckHealthy
+ status := define.HealthCheckUnhealthy
+ if hcStatus == define.HealthCheckSuccess {
+ status = define.HealthCheckHealthy
}
return call.ReplyHealthCheckRun(status)
}
diff --git a/pkg/varlinkapi/system.go b/pkg/varlinkapi/system.go
index 82efe9b5d..308f02274 100644
--- a/pkg/varlinkapi/system.go
+++ b/pkg/varlinkapi/system.go
@@ -28,7 +28,7 @@ func (i *VarlinkAPI) GetVersion(call iopodman.VarlinkCall) error {
versionInfo.GitCommit,
time.Unix(versionInfo.Built, 0).Format(time.RFC3339),
versionInfo.OsArch,
- versionInfo.RemoteAPIVersion,
+ versionInfo.APIVersion,
)
}
diff --git a/pkg/varlinkapi/transfers.go b/pkg/varlinkapi/transfers.go
index 9df8ffcdc..aed6e054d 100644
--- a/pkg/varlinkapi/transfers.go
+++ b/pkg/varlinkapi/transfers.go
@@ -39,7 +39,7 @@ func (i *VarlinkAPI) SendFile(call iopodman.VarlinkCall, ftype string, length in
logrus.Debugf("successfully received %s", outputFile.Name())
// Send an ACK to the client
- call.Call.Writer.WriteString(outputFile.Name())
+ call.Call.Writer.WriteString(outputFile.Name() + ":")
call.Call.Writer.Flush()
return nil
diff --git a/test/apiv2/01-basic.at b/test/apiv2/01-basic.at
index 0e94ddb7a..18ec9bbe8 100644
--- a/test/apiv2/01-basic.at
+++ b/test/apiv2/01-basic.at
@@ -10,13 +10,13 @@ t HEAD /_ping 200
t GET /libpod/_ping 200 OK
for i in /version version; do
- t GET $i 200 \
- .Components[0].Name="Podman Engine" \
- .Components[0].Details.APIVersion=1.40 \
- .Components[0].Details.MinAPIVersion=1.24 \
- .Components[0].Details.Os=linux \
- .ApiVersion=1.40 \
- .MinAPIVersion=1.24 \
+ t GET $i 200 \
+ .Components[0].Name="Podman Engine" \
+ .Components[0].Details.APIVersion=1.0.0 \
+ .Components[0].Details.MinAPIVersion=1.0.0 \
+ .Components[0].Details.Os=linux \
+ .ApiVersion=1.0.0 \
+ .MinAPIVersion=1.0.0 \
.Os=linux
done
diff --git a/test/e2e/common_test.go b/test/e2e/common_test.go
index 68f733b41..80ee83f44 100644
--- a/test/e2e/common_test.go
+++ b/test/e2e/common_test.go
@@ -35,7 +35,7 @@ var (
INTEGRATION_ROOT string
CGROUP_MANAGER = "systemd"
ARTIFACT_DIR = "/tmp/.artifacts"
- RESTORE_IMAGES = []string{ALPINE, BB}
+ RESTORE_IMAGES = []string{ALPINE, BB, nginx}
defaultWaitTimeout = 90
)
@@ -53,6 +53,7 @@ type PodmanTestIntegration struct {
Host HostOS
Timings []string
TmpDir string
+ RemoteStartErr error
}
var LockTmpDir string
@@ -259,12 +260,12 @@ func PodmanTestCreateUtil(tempDir string, remote bool) *PodmanTestIntegration {
p.PodmanTest.RemotePodmanBinary = podmanRemoteBinary
uuid := stringid.GenerateNonCryptoID()
if !rootless.IsRootless() {
- p.VarlinkEndpoint = fmt.Sprintf("unix:/run/podman/io.podman-%s", uuid)
+ p.RemoteSocket = fmt.Sprintf("unix:/run/podman/podman-%s.sock", uuid)
} else {
runtimeDir := os.Getenv("XDG_RUNTIME_DIR")
- socket := fmt.Sprintf("io.podman-%s", uuid)
+ socket := fmt.Sprintf("podman-%s.sock", uuid)
fqpath := filepath.Join(runtimeDir, socket)
- p.VarlinkEndpoint = fmt.Sprintf("unix:%s", fqpath)
+ p.RemoteSocket = fmt.Sprintf("unix:%s", fqpath)
}
}
@@ -441,7 +442,7 @@ func (p *PodmanTestIntegration) Cleanup() {
session := p.Podman([]string{"rm", "-fa"})
session.Wait(90)
- p.StopVarlink()
+ p.StopRemoteService()
// Nuke tempdir
if err := os.RemoveAll(p.TempDir); err != nil {
fmt.Printf("%q\n", err)
@@ -451,17 +452,6 @@ func (p *PodmanTestIntegration) Cleanup() {
resetRegistriesConfigEnv()
}
-// CleanupPod cleans up the temporary store
-func (p *PodmanTestIntegration) CleanupPod() {
- // Remove all containers
- session := p.Podman([]string{"pod", "rm", "-fa"})
- session.Wait(90)
- // Nuke tempdir
- if err := os.RemoveAll(p.TempDir); err != nil {
- fmt.Printf("%q\n", err)
- }
-}
-
// CleanupVolume cleans up the temporary store
func (p *PodmanTestIntegration) CleanupVolume() {
// Remove all containers
diff --git a/test/e2e/config.go b/test/e2e/config.go
index 0e1850614..71c4dee31 100644
--- a/test/e2e/config.go
+++ b/test/e2e/config.go
@@ -27,4 +27,8 @@ var (
// v2fail is a temporary variable to help us track
// tests that fail in v2
v2fail = "does not pass integration tests with v2 podman"
+
+ // v2remotefail is a temporary variable to help us track
+ // tests that fail in v2 remote
+ v2remotefail = "does not pass integration tests with v2 podman remote"
)
diff --git a/test/e2e/container_inspect_test.go b/test/e2e/container_inspect_test.go
index 91c025197..9cbcbbc7c 100644
--- a/test/e2e/container_inspect_test.go
+++ b/test/e2e/container_inspect_test.go
@@ -27,7 +27,7 @@ var _ = Describe("Podman container inspect", func() {
})
AfterEach(func() {
- podmanTest.CleanupPod()
+ podmanTest.Cleanup()
f := CurrentGinkgoTestDescription()
processTestResult(f)
})
diff --git a/test/e2e/create_test.go b/test/e2e/create_test.go
index 1041b30bb..0a6373bfa 100644
--- a/test/e2e/create_test.go
+++ b/test/e2e/create_test.go
@@ -35,13 +35,13 @@ var _ = Describe("Podman create", func() {
})
It("podman create container based on a local image", func() {
- session := podmanTest.Podman([]string{"create", ALPINE, "ls"})
+ session := podmanTest.Podman([]string{"create", "--name", "local_image_test", ALPINE, "ls"})
session.WaitWithDefaultTimeout()
cid := session.OutputToString()
Expect(session.ExitCode()).To(Equal(0))
Expect(podmanTest.NumberOfContainers()).To(Equal(1))
- check := podmanTest.Podman([]string{"inspect", "-l"})
+ check := podmanTest.Podman([]string{"inspect", "local_image_test"})
check.WaitWithDefaultTimeout()
data := check.InspectContainerToJSON()
Expect(data[0].ID).To(ContainSubstring(cid))
@@ -80,12 +80,12 @@ var _ = Describe("Podman create", func() {
})
It("podman create adds annotation", func() {
- session := podmanTest.Podman([]string{"create", "--annotation", "HELLO=WORLD", ALPINE, "ls"})
+ session := podmanTest.Podman([]string{"create", "--annotation", "HELLO=WORLD", "--name", "annotate_test", ALPINE, "ls"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
Expect(podmanTest.NumberOfContainers()).To(Equal(1))
- check := podmanTest.Podman([]string{"inspect", "-l"})
+ check := podmanTest.Podman([]string{"inspect", "annotate_test"})
check.WaitWithDefaultTimeout()
data := check.InspectContainerToJSON()
value, ok := data[0].Config.Annotations["HELLO"]
@@ -94,12 +94,12 @@ var _ = Describe("Podman create", func() {
})
It("podman create --entrypoint command", func() {
- session := podmanTest.Podman([]string{"create", "--entrypoint", "/bin/foobar", ALPINE})
+ session := podmanTest.Podman([]string{"create", "--name", "entrypoint_test", "--entrypoint", "/bin/foobar", ALPINE})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
Expect(podmanTest.NumberOfContainers()).To(Equal(1))
- result := podmanTest.Podman([]string{"inspect", "-l", "--format", "{{.Config.Entrypoint}}"})
+ result := podmanTest.Podman([]string{"inspect", "entrypoint_test", "--format", "{{.Config.Entrypoint}}"})
result.WaitWithDefaultTimeout()
Expect(result.ExitCode()).To(Equal(0))
Expect(result.OutputToString()).To(Equal("/bin/foobar"))
@@ -119,18 +119,19 @@ var _ = Describe("Podman create", func() {
It("podman create --entrypoint json", func() {
jsonString := `[ "/bin/foo", "-c"]`
- session := podmanTest.Podman([]string{"create", "--entrypoint", jsonString, ALPINE})
+ session := podmanTest.Podman([]string{"create", "--name", "entrypoint_json", "--entrypoint", jsonString, ALPINE})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
Expect(podmanTest.NumberOfContainers()).To(Equal(1))
- result := podmanTest.Podman([]string{"inspect", "-l", "--format", "{{.Config.Entrypoint}}"})
+ result := podmanTest.Podman([]string{"inspect", "entrypoint_json", "--format", "{{.Config.Entrypoint}}"})
result.WaitWithDefaultTimeout()
Expect(result.ExitCode()).To(Equal(0))
Expect(result.OutputToString()).To(Equal("/bin/foo -c"))
})
It("podman create --mount flag with multiple mounts", func() {
+ Skip(v2remotefail)
vol1 := filepath.Join(podmanTest.TempDir, "vol-test1")
err := os.MkdirAll(vol1, 0755)
Expect(err).To(BeNil())
@@ -156,6 +157,7 @@ var _ = Describe("Podman create", func() {
if podmanTest.Host.Arch == "ppc64le" {
Skip("skip failing test on ppc64le")
}
+ Skip(v2remotefail)
mountPath := filepath.Join(podmanTest.TempDir, "secrets")
os.Mkdir(mountPath, 0755)
session := podmanTest.Podman([]string{"create", "--name", "test", "--mount", fmt.Sprintf("type=bind,src=%s,target=/create/test", mountPath), ALPINE, "grep", "/create/test", "/proc/self/mountinfo"})
@@ -205,7 +207,7 @@ var _ = Describe("Podman create", func() {
session = podmanTest.Podman([]string{"logs", "test_tmpfs"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
- Expect(session.OutputToString()).To(ContainSubstring("/create/test rw,nosuid,nodev,noexec,relatime - tmpfs"))
+ Expect(session.OutputToString()).To(ContainSubstring("/create/test rw,nosuid,nodev,relatime - tmpfs"))
})
It("podman create --pod automatically", func() {
@@ -233,11 +235,11 @@ var _ = Describe("Podman create", func() {
})
It("podman create --pull", func() {
- session := podmanTest.PodmanNoCache([]string{"create", "--pull", "never", "--name=foo", "nginx"})
+ session := podmanTest.PodmanNoCache([]string{"create", "--pull", "never", "--name=foo", "debian"})
session.WaitWithDefaultTimeout()
Expect(session).To(ExitWithError())
- session = podmanTest.PodmanNoCache([]string{"create", "--pull", "always", "--name=foo", "nginx"})
+ session = podmanTest.PodmanNoCache([]string{"create", "--pull", "always", "--name=foo", "debian"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
})
diff --git a/test/e2e/events_test.go b/test/e2e/events_test.go
index 4cd5de05e..460554b77 100644
--- a/test/e2e/events_test.go
+++ b/test/e2e/events_test.go
@@ -19,7 +19,6 @@ var _ = Describe("Podman events", func() {
)
BeforeEach(func() {
- SkipIfRootlessV2()
tempdir, err = CreateTempDirInTempDir()
if err != nil {
os.Exit(1)
@@ -119,6 +118,7 @@ var _ = Describe("Podman events", func() {
})
It("podman events format", func() {
+ Skip(v2remotefail)
info := GetHostDistributionInfo()
if info.Distribution != "fedora" {
Skip("need to verify images have correct packages for journald")
@@ -138,5 +138,19 @@ var _ = Describe("Podman events", func() {
_, exist := eventsMap["Status"]
Expect(exist).To(BeTrue())
Expect(test.ExitCode()).To(BeZero())
+
+ test = podmanTest.Podman([]string{"events", "--stream=false", "--format", "{{json.}}"})
+ test.WaitWithDefaultTimeout()
+ fmt.Println(test.OutputToStringArray())
+ jsonArr = test.OutputToStringArray()
+ Expect(len(jsonArr)).To(Not(BeZero()))
+ eventsMap = make(map[string]string)
+ err = json.Unmarshal([]byte(jsonArr[0]), &eventsMap)
+ if err != nil {
+ os.Exit(1)
+ }
+ _, exist = eventsMap["Status"]
+ Expect(exist).To(BeTrue())
+ Expect(test.ExitCode()).To(BeZero())
})
})
diff --git a/test/e2e/exec_test.go b/test/e2e/exec_test.go
index 8b95794d2..87dddb233 100644
--- a/test/e2e/exec_test.go
+++ b/test/e2e/exec_test.go
@@ -18,6 +18,7 @@ var _ = Describe("Podman exec", func() {
)
BeforeEach(func() {
+ Skip(v2remotefail)
tempdir, err = CreateTempDirInTempDir()
if err != nil {
os.Exit(1)
@@ -282,4 +283,31 @@ var _ = Describe("Podman exec", func() {
Expect(exec.ExitCode()).To(Equal(0))
Expect(strings.Contains(exec.OutputToString(), fmt.Sprintf("%s(%s)", gid, groupName))).To(BeTrue())
})
+
+ It("podman exec --detach", func() {
+ ctrName := "testctr"
+ ctr := podmanTest.Podman([]string{"run", "-t", "-i", "-d", "--name", ctrName, ALPINE, "top"})
+ ctr.WaitWithDefaultTimeout()
+ Expect(ctr.ExitCode()).To(Equal(0))
+
+ exec1 := podmanTest.Podman([]string{"exec", "-t", "-i", "-d", ctrName, "top"})
+ exec1.WaitWithDefaultTimeout()
+ Expect(ctr.ExitCode()).To(Equal(0))
+
+ data := podmanTest.InspectContainer(ctrName)
+ Expect(len(data)).To(Equal(1))
+ Expect(len(data[0].ExecIDs)).To(Equal(1))
+ Expect(strings.Contains(exec1.OutputToString(), data[0].ExecIDs[0])).To(BeTrue())
+
+ exec2 := podmanTest.Podman([]string{"exec", "-t", "-i", ctrName, "ps", "-a"})
+ exec2.WaitWithDefaultTimeout()
+ Expect(ctr.ExitCode()).To(Equal(0))
+ Expect(strings.Count(exec2.OutputToString(), "top")).To(Equal(2))
+
+ // Ensure that stop with a running detached exec session is
+ // clean.
+ stop := podmanTest.Podman([]string{"stop", ctrName})
+ stop.WaitWithDefaultTimeout()
+ Expect(stop.ExitCode()).To(Equal(0))
+ })
})
diff --git a/test/e2e/export_test.go b/test/e2e/export_test.go
index 1c84c6f4d..fb2582796 100644
--- a/test/e2e/export_test.go
+++ b/test/e2e/export_test.go
@@ -34,7 +34,6 @@ var _ = Describe("Podman export", func() {
})
It("podman export output flag", func() {
- SkipIfRemote()
_, ec, cid := podmanTest.RunLsContainer("")
Expect(ec).To(Equal(0))
@@ -50,7 +49,6 @@ var _ = Describe("Podman export", func() {
})
It("podman container export output flag", func() {
- SkipIfRemote()
_, ec, cid := podmanTest.RunLsContainer("")
Expect(ec).To(Equal(0))
diff --git a/test/e2e/healthcheck_run_test.go b/test/e2e/healthcheck_run_test.go
index 19a8658ac..8e63d9f4c 100644
--- a/test/e2e/healthcheck_run_test.go
+++ b/test/e2e/healthcheck_run_test.go
@@ -176,6 +176,7 @@ var _ = Describe("Podman healthcheck run", func() {
})
It("podman healthcheck single healthy result changes failed to healthy", func() {
+ Skip(v2remotefail)
session := podmanTest.Podman([]string{"run", "-dt", "--name", "hc", "--health-retries", "2", "--health-cmd", "ls /foo || exit 1", ALPINE, "top"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
diff --git a/test/e2e/images_test.go b/test/e2e/images_test.go
index d7295b67a..1b23aba36 100644
--- a/test/e2e/images_test.go
+++ b/test/e2e/images_test.go
@@ -90,7 +90,7 @@ var _ = Describe("Podman images", func() {
session = podmanTest.PodmanNoCache([]string{"images", "-qn"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
- Expect(len(session.OutputToStringArray())).To(BeNumerically("==", 2))
+ Expect(len(session.OutputToStringArray())).To(BeNumerically("==", 3))
})
It("podman images with digests", func() {
@@ -151,9 +151,7 @@ var _ = Describe("Podman images", func() {
})
It("podman images filter reference", func() {
- if podmanTest.RemoteTest {
- Skip("Does not work on remote client")
- }
+ SkipIfRemote()
podmanTest.RestoreAllArtifacts()
result := podmanTest.PodmanNoCache([]string{"images", "-q", "-f", "reference=docker.io*"})
result.WaitWithDefaultTimeout()
@@ -163,13 +161,13 @@ var _ = Describe("Podman images", func() {
retapline := podmanTest.PodmanNoCache([]string{"images", "-f", "reference=a*pine"})
retapline.WaitWithDefaultTimeout()
Expect(retapline).Should(Exit(0))
- Expect(len(retapline.OutputToStringArray())).To(Equal(2))
+ Expect(len(retapline.OutputToStringArray())).To(Equal(3))
Expect(retapline.LineInOutputContains("alpine")).To(BeTrue())
retapline = podmanTest.PodmanNoCache([]string{"images", "-f", "reference=alpine"})
retapline.WaitWithDefaultTimeout()
Expect(retapline).Should(Exit(0))
- Expect(len(retapline.OutputToStringArray())).To(Equal(2))
+ Expect(len(retapline.OutputToStringArray())).To(Equal(3))
Expect(retapline.LineInOutputContains("alpine")).To(BeTrue())
retnone := podmanTest.PodmanNoCache([]string{"images", "-q", "-f", "reference=bogus"})
@@ -179,9 +177,7 @@ var _ = Describe("Podman images", func() {
})
It("podman images filter before image", func() {
- if podmanTest.RemoteTest {
- Skip("Does not work on remote client")
- }
+ SkipIfRemote()
dockerfile := `FROM docker.io/library/alpine:latest
RUN apk update && apk add man
`
@@ -193,9 +189,7 @@ RUN apk update && apk add man
})
It("podman images filter after image", func() {
- if podmanTest.RemoteTest {
- Skip("Does not work on remote client")
- }
+ SkipIfRemote()
podmanTest.RestoreAllArtifacts()
rmi := podmanTest.PodmanNoCache([]string{"rmi", "busybox"})
rmi.WaitWithDefaultTimeout()
@@ -211,9 +205,7 @@ RUN apk update && apk add man
})
It("podman image list filter after image", func() {
- if podmanTest.RemoteTest {
- Skip("Does not work on remote client")
- }
+ SkipIfRemote()
podmanTest.RestoreAllArtifacts()
rmi := podmanTest.PodmanNoCache([]string{"image", "rm", "busybox"})
rmi.WaitWithDefaultTimeout()
@@ -229,9 +221,7 @@ RUN apk update && apk add man
})
It("podman images filter dangling", func() {
- if podmanTest.RemoteTest {
- Skip("Does not work on remote client")
- }
+ SkipIfRemote()
dockerfile := `FROM docker.io/library/alpine:latest
`
podmanTest.BuildImage(dockerfile, "foobar.com/before:latest", "false")
@@ -307,9 +297,7 @@ RUN apk update && apk add man
})
It("podman images --all flag", func() {
- if podmanTest.RemoteTest {
- Skip("Does not work on remote client")
- }
+ SkipIfRemote()
podmanTest.RestoreAllArtifacts()
dockerfile := `FROM docker.io/library/alpine:latest
RUN mkdir hello
@@ -320,12 +308,12 @@ ENV foo=bar
session := podmanTest.PodmanNoCache([]string{"images"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
- Expect(len(session.OutputToStringArray())).To(Equal(4))
+ Expect(len(session.OutputToStringArray())).To(Equal(5))
session2 := podmanTest.PodmanNoCache([]string{"images", "--all"})
session2.WaitWithDefaultTimeout()
Expect(session2).Should(Exit(0))
- Expect(len(session2.OutputToStringArray())).To(Equal(6))
+ Expect(len(session2.OutputToStringArray())).To(Equal(7))
})
It("podman images filter by label", func() {
@@ -342,10 +330,7 @@ LABEL "com.example.vendor"="Example Vendor"
})
It("podman with images with no layers", func() {
- if podmanTest.RemoteTest {
- Skip("Does not work on remote client")
- }
-
+ SkipIfRemote()
dockerfile := strings.Join([]string{
`FROM scratch`,
`LABEL org.opencontainers.image.authors="<somefolks@example.org>"`,
diff --git a/test/e2e/info_test.go b/test/e2e/info_test.go
index 7cb299e0f..dbdaa05a7 100644
--- a/test/e2e/info_test.go
+++ b/test/e2e/info_test.go
@@ -33,7 +33,6 @@ var _ = Describe("Podman Info", func() {
podmanTest.Cleanup()
f := CurrentGinkgoTestDescription()
processTestResult(f)
-
})
It("podman info json output", func() {
diff --git a/test/e2e/init_test.go b/test/e2e/init_test.go
index 919fe4abf..721017d0c 100644
--- a/test/e2e/init_test.go
+++ b/test/e2e/init_test.go
@@ -75,6 +75,7 @@ var _ = Describe("Podman init", func() {
})
It("podman init latest container", func() {
+ SkipIfRemote()
session := podmanTest.Podman([]string{"create", "-d", ALPINE, "ls"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
@@ -119,10 +120,10 @@ var _ = Describe("Podman init", func() {
})
It("podman init running container errors", func() {
- session := podmanTest.Podman([]string{"run", "-d", ALPINE, "top"})
+ session := podmanTest.Podman([]string{"run", "--name", "init_test", "-d", ALPINE, "top"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
- init := podmanTest.Podman([]string{"init", "--latest"})
+ init := podmanTest.Podman([]string{"init", "init_test"})
init.WaitWithDefaultTimeout()
Expect(init.ExitCode()).To(Equal(125))
})
diff --git a/test/e2e/inspect_test.go b/test/e2e/inspect_test.go
index ebac087ac..77cfe4fd3 100644
--- a/test/e2e/inspect_test.go
+++ b/test/e2e/inspect_test.go
@@ -43,7 +43,6 @@ var _ = Describe("Podman inspect", func() {
})
It("podman inspect bogus container", func() {
- SkipIfRemote()
session := podmanTest.Podman([]string{"inspect", "foobar4321"})
session.WaitWithDefaultTimeout()
Expect(session).To(ExitWithError())
@@ -67,7 +66,6 @@ var _ = Describe("Podman inspect", func() {
})
It("podman inspect container with GO format for ConmonPidFile", func() {
- SkipIfRemote()
session, ec, _ := podmanTest.RunLsContainer("test1")
Expect(ec).To(Equal(0))
@@ -77,11 +75,10 @@ var _ = Describe("Podman inspect", func() {
})
It("podman inspect container with size", func() {
- SkipIfRemote()
- _, ec, _ := podmanTest.RunLsContainer("")
+ _, ec, _ := podmanTest.RunLsContainer("sizetest")
Expect(ec).To(Equal(0))
- result := podmanTest.Podman([]string{"inspect", "--size", "-l"})
+ result := podmanTest.Podman([]string{"inspect", "--size", "sizetest"})
result.WaitWithDefaultTimeout()
Expect(result.ExitCode()).To(Equal(0))
conData := result.InspectContainerToJSON()
@@ -90,7 +87,6 @@ var _ = Describe("Podman inspect", func() {
})
It("podman inspect container and image", func() {
- SkipIfRemote()
ls, ec, _ := podmanTest.RunLsContainer("")
Expect(ec).To(Equal(0))
cid := ls.OutputToString()
@@ -102,7 +98,6 @@ var _ = Describe("Podman inspect", func() {
})
It("podman inspect container and filter for Image{ID}", func() {
- SkipIfRemote()
ls, ec, _ := podmanTest.RunLsContainer("")
Expect(ec).To(Equal(0))
cid := ls.OutputToString()
@@ -119,7 +114,6 @@ var _ = Describe("Podman inspect", func() {
})
It("podman inspect container and filter for CreateCommand", func() {
- SkipIfRemote()
ls, ec, _ := podmanTest.RunLsContainer("")
Expect(ec).To(Equal(0))
cid := ls.OutputToString()
diff --git a/test/e2e/kill_test.go b/test/e2e/kill_test.go
index 834f86b77..3f192fb55 100644
--- a/test/e2e/kill_test.go
+++ b/test/e2e/kill_test.go
@@ -100,6 +100,7 @@ var _ = Describe("Podman kill", func() {
})
It("podman kill latest container", func() {
+ SkipIfRemote()
session := podmanTest.RunTopContainer("")
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
diff --git a/test/e2e/libpod_suite_remote_test.go b/test/e2e/libpod_suite_remote_test.go
new file mode 100644
index 000000000..79d18115c
--- /dev/null
+++ b/test/e2e/libpod_suite_remote_test.go
@@ -0,0 +1,214 @@
+// +build remoteclient
+
+package integration
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "syscall"
+ "time"
+
+ "github.com/containers/libpod/pkg/rootless"
+ "github.com/onsi/ginkgo"
+)
+
+func SkipIfRemote() {
+ ginkgo.Skip("This function is not enabled for remote podman")
+}
+
+func SkipIfRootless() {
+ if os.Geteuid() != 0 {
+ ginkgo.Skip("This function is not enabled for rootless podman")
+ }
+}
+func SkipIfRootlessV2() {
+ if os.Geteuid() != 0 {
+ ginkgo.Skip("This function is not enabled for v2 rootless podman")
+ }
+}
+
+// Podman is the exec call to podman on the filesystem
+func (p *PodmanTestIntegration) Podman(args []string) *PodmanSessionIntegration {
+ var remoteArgs = []string{"--remote", p.RemoteSocket}
+ remoteArgs = append(remoteArgs, args...)
+ podmanSession := p.PodmanBase(remoteArgs, false, false)
+ return &PodmanSessionIntegration{podmanSession}
+}
+
+// PodmanExtraFiles is the exec call to podman on the filesystem and passes down extra files
+func (p *PodmanTestIntegration) PodmanExtraFiles(args []string, extraFiles []*os.File) *PodmanSessionIntegration {
+ var remoteArgs = []string{"--remote", p.RemoteSocket}
+ remoteArgs = append(remoteArgs, args...)
+ podmanSession := p.PodmanAsUserBase(remoteArgs, 0, 0, "", nil, false, false, extraFiles)
+ return &PodmanSessionIntegration{podmanSession}
+}
+
+// PodmanNoCache calls podman with out adding the imagecache
+func (p *PodmanTestIntegration) PodmanNoCache(args []string) *PodmanSessionIntegration {
+ var remoteArgs = []string{"--remote", p.RemoteSocket}
+ remoteArgs = append(remoteArgs, args...)
+ podmanSession := p.PodmanBase(remoteArgs, false, true)
+ return &PodmanSessionIntegration{podmanSession}
+}
+
+// PodmanNoEvents calls the Podman command without an imagecache and without an
+// events backend. It is used mostly for caching and uncaching images.
+func (p *PodmanTestIntegration) PodmanNoEvents(args []string) *PodmanSessionIntegration {
+ podmanSession := p.PodmanBase(args, true, true)
+ return &PodmanSessionIntegration{podmanSession}
+}
+
+func (p *PodmanTestIntegration) setDefaultRegistriesConfigEnv() {
+ defaultFile := filepath.Join(INTEGRATION_ROOT, "test/registries.conf")
+ os.Setenv("REGISTRIES_CONFIG_PATH", defaultFile)
+}
+
+func (p *PodmanTestIntegration) setRegistriesConfigEnv(b []byte) {
+ outfile := filepath.Join(p.TempDir, "registries.conf")
+ os.Setenv("REGISTRIES_CONFIG_PATH", outfile)
+ ioutil.WriteFile(outfile, b, 0644)
+}
+
+func resetRegistriesConfigEnv() {
+ os.Setenv("REGISTRIES_CONFIG_PATH", "")
+}
+func PodmanTestCreate(tempDir string) *PodmanTestIntegration {
+ pti := PodmanTestCreateUtil(tempDir, true)
+ pti.StartRemoteService()
+ return pti
+}
+
+func (p *PodmanTestIntegration) StartRemoteService() {
+ if os.Geteuid() == 0 {
+ os.MkdirAll("/run/podman", 0755)
+ }
+ remoteSocket := p.RemoteSocket
+ args := []string{"system", "service", "--timeout", "0", remoteSocket}
+ podmanOptions := getRemoteOptions(p, args)
+ command := exec.Command(p.PodmanBinary, podmanOptions...)
+ fmt.Printf("Running: %s %s\n", p.PodmanBinary, strings.Join(podmanOptions, " "))
+ command.Start()
+ command.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
+ p.RemoteCommand = command
+ p.RemoteSession = command.Process
+ err := p.DelayForService()
+ p.RemoteStartErr = err
+}
+
+func (p *PodmanTestIntegration) StopRemoteService() {
+ var out bytes.Buffer
+ var pids []int
+ remoteSession := p.RemoteSession
+
+ if !rootless.IsRootless() {
+ if err := remoteSession.Kill(); err != nil {
+ fmt.Fprintf(os.Stderr, "error on remote stop-kill %q", err)
+ }
+ if _, err := remoteSession.Wait(); err != nil {
+ fmt.Fprintf(os.Stderr, "error on remote stop-wait %q", err)
+ }
+
+ } else {
+ //p.ResetVarlinkAddress()
+ parentPid := fmt.Sprintf("%d", p.RemoteSession.Pid)
+ pgrep := exec.Command("pgrep", "-P", parentPid)
+ fmt.Printf("running: pgrep %s\n", parentPid)
+ pgrep.Stdout = &out
+ err := pgrep.Run()
+ if err != nil {
+ fmt.Fprint(os.Stderr, "unable to find remote pid")
+ }
+
+ for _, s := range strings.Split(out.String(), "\n") {
+ if len(s) == 0 {
+ continue
+ }
+ p, err := strconv.Atoi(s)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "unable to convert %s to int", s)
+ }
+ if p != 0 {
+ pids = append(pids, p)
+ }
+ }
+
+ pids = append(pids, p.RemoteSession.Pid)
+ for _, pid := range pids {
+ syscall.Kill(pid, syscall.SIGKILL)
+ }
+ }
+ socket := strings.Split(p.RemoteSocket, ":")[1]
+ if err := os.Remove(socket); err != nil {
+ fmt.Println(err)
+ }
+}
+
+//MakeOptions assembles all the podman main options
+func (p *PodmanTestIntegration) makeOptions(args []string, noEvents, noCache bool) []string {
+ return args
+}
+
+//MakeOptions assembles all the podman main options
+func getRemoteOptions(p *PodmanTestIntegration, args []string) []string {
+ podmanOptions := strings.Split(fmt.Sprintf("--root %s --runroot %s --runtime %s --conmon %s --cni-config-dir %s --cgroup-manager %s",
+ p.CrioRoot, p.RunRoot, p.OCIRuntime, p.ConmonBinary, p.CNIConfigDir, p.CgroupManager), " ")
+ if os.Getenv("HOOK_OPTION") != "" {
+ podmanOptions = append(podmanOptions, os.Getenv("HOOK_OPTION"))
+ }
+ podmanOptions = append(podmanOptions, strings.Split(p.StorageOptions, " ")...)
+ podmanOptions = append(podmanOptions, args...)
+ return podmanOptions
+}
+
+func (p *PodmanTestIntegration) RestoreArtifactToCache(image string) error {
+ fmt.Printf("Restoring %s...\n", image)
+ dest := strings.Split(image, "/")
+ destName := fmt.Sprintf("/tmp/%s.tar", strings.Replace(strings.Join(strings.Split(dest[len(dest)-1], "/"), ""), ":", "-", -1))
+ p.CrioRoot = p.ImageCacheDir
+ restore := p.PodmanNoEvents([]string{"load", "-q", "-i", destName})
+ restore.WaitWithDefaultTimeout()
+ return nil
+}
+
+// SeedImages restores all the artifacts into the main store for remote tests
+func (p *PodmanTestIntegration) SeedImages() error {
+ return p.RestoreAllArtifacts()
+}
+
+// RestoreArtifact puts the cached image into our test store
+func (p *PodmanTestIntegration) RestoreArtifact(image string) error {
+ fmt.Printf("Restoring %s...\n", image)
+ dest := strings.Split(image, "/")
+ destName := fmt.Sprintf("/tmp/%s.tar", strings.Replace(strings.Join(strings.Split(dest[len(dest)-1], "/"), ""), ":", "-", -1))
+ args := []string{"load", "-q", "-i", destName}
+ podmanOptions := getRemoteOptions(p, args)
+ command := exec.Command(p.PodmanBinary, podmanOptions...)
+ fmt.Printf("Running: %s %s\n", p.PodmanBinary, strings.Join(podmanOptions, " "))
+ command.Start()
+ command.Wait()
+ return nil
+}
+
+func (p *PodmanTestIntegration) DelayForService() error {
+ for i := 0; i < 5; i++ {
+ session := p.Podman([]string{"info"})
+ session.WaitWithDefaultTimeout()
+ if session.ExitCode() == 0 {
+ return nil
+ } else if i == 4 {
+ break
+ }
+ time.Sleep(2 * time.Second)
+ }
+ return errors.New("Service not detected")
+}
+
+func populateCache(podman *PodmanTestIntegration) {}
+func removeCache() {}
diff --git a/test/e2e/libpod_suite_test.go b/test/e2e/libpod_suite_test.go
index d26f5784d..009f70914 100644
--- a/test/e2e/libpod_suite_test.go
+++ b/test/e2e/libpod_suite_test.go
@@ -127,8 +127,8 @@ func (p *PodmanTestIntegration) RestoreArtifactToCache(image string) error {
return nil
}
-func (p *PodmanTestIntegration) StopVarlink() {}
-func (p *PodmanTestIntegration) DelayForVarlink() {}
+func (p *PodmanTestIntegration) StopRemoteService() {}
+func (p *PodmanTestIntegration) DelayForVarlink() {}
func populateCache(podman *PodmanTestIntegration) {
for _, image := range CACHE_IMAGES {
@@ -151,5 +151,5 @@ func (p *PodmanTestIntegration) SeedImages() error {
}
// We don't support running Varlink when local
-func (p *PodmanTestIntegration) StartVarlink() {
+func (p *PodmanTestIntegration) StartRemoteService() {
}
diff --git a/test/e2e/libpod_suite_remoteclient_test.go b/test/e2e/libpod_suite_varlink_test.go
index b5da041ab..cbaed71cc 100644
--- a/test/e2e/libpod_suite_remoteclient_test.go
+++ b/test/e2e/libpod_suite_varlink_test.go
@@ -1,11 +1,10 @@
-// +build remoteclient
+// +build remoteclientvarlink
package integration
import (
"bytes"
"fmt"
- "github.com/containers/libpod/pkg/rootless"
"io/ioutil"
"os"
"os/exec"
@@ -15,6 +14,8 @@ import (
"syscall"
"time"
+ "github.com/containers/libpod/pkg/rootless"
+
"github.com/onsi/ginkgo"
)
@@ -69,24 +70,24 @@ func resetRegistriesConfigEnv() {
}
func PodmanTestCreate(tempDir string) *PodmanTestIntegration {
pti := PodmanTestCreateUtil(tempDir, true)
- pti.StartVarlink()
+ pti.StartRemoteService()
return pti
}
func (p *PodmanTestIntegration) ResetVarlinkAddress() {
- os.Unsetenv("PODMAN_VARLINK_ADDRESS")
+ //os.Unsetenv("PODMAN_VARLINK_ADDRESS")
}
func (p *PodmanTestIntegration) SetVarlinkAddress(addr string) {
- os.Setenv("PODMAN_VARLINK_ADDRESS", addr)
+ //os.Setenv("PODMAN_VARLINK_ADDRESS", addr)
}
func (p *PodmanTestIntegration) StartVarlink() {
if os.Geteuid() == 0 {
os.MkdirAll("/run/podman", 0755)
}
- varlinkEndpoint := p.VarlinkEndpoint
- p.SetVarlinkAddress(p.VarlinkEndpoint)
+ varlinkEndpoint := p.RemoteSocket
+ p.SetVarlinkAddress(p.RemoteSocket)
args := []string{"varlink", "--timeout", "0", varlinkEndpoint}
podmanOptions := getVarlinkOptions(p, args)
@@ -94,15 +95,15 @@ func (p *PodmanTestIntegration) StartVarlink() {
fmt.Printf("Running: %s %s\n", p.PodmanBinary, strings.Join(podmanOptions, " "))
command.Start()
command.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
- p.VarlinkCommand = command
- p.VarlinkSession = command.Process
- p.DelayForVarlink()
+ p.RemoteCommand = command
+ p.RemoteSession = command.Process
+ p.DelayForService()
}
func (p *PodmanTestIntegration) StopVarlink() {
var out bytes.Buffer
var pids []int
- varlinkSession := p.VarlinkSession
+ varlinkSession := p.RemoteSession
if !rootless.IsRootless() {
if err := varlinkSession.Kill(); err != nil {
@@ -114,7 +115,7 @@ func (p *PodmanTestIntegration) StopVarlink() {
} else {
p.ResetVarlinkAddress()
- parentPid := fmt.Sprintf("%d", p.VarlinkSession.Pid)
+ parentPid := fmt.Sprintf("%d", p.RemoteSession.Pid)
pgrep := exec.Command("pgrep", "-P", parentPid)
fmt.Printf("running: pgrep %s\n", parentPid)
pgrep.Stdout = &out
@@ -136,12 +137,12 @@ func (p *PodmanTestIntegration) StopVarlink() {
}
}
- pids = append(pids, p.VarlinkSession.Pid)
+ pids = append(pids, p.RemoteSession.Pid)
for _, pid := range pids {
syscall.Kill(pid, syscall.SIGKILL)
}
}
- socket := strings.Split(p.VarlinkEndpoint, ":")[1]
+ socket := strings.Split(p.RemoteSocket, ":")[1]
if err := os.Remove(socket); err != nil {
fmt.Println(err)
}
diff --git a/test/e2e/logs_test.go b/test/e2e/logs_test.go
index 0438a31cb..8924db670 100644
--- a/test/e2e/logs_test.go
+++ b/test/e2e/logs_test.go
@@ -19,6 +19,7 @@ var _ = Describe("Podman logs", func() {
)
BeforeEach(func() {
+ SkipIfRemote() // v2remotefail
tempdir, err = CreateTempDirInTempDir()
if err != nil {
os.Exit(1)
@@ -172,6 +173,24 @@ var _ = Describe("Podman logs", func() {
Expect(string(out)).To(ContainSubstring("alpine"))
})
+ It("podman journald logs for container name", func() {
+ Skip("need to verify images have correct packages for journald")
+ containerName := "inside-journal"
+ logc := podmanTest.Podman([]string{"run", "--log-driver", "journald", "-d", "--name", containerName, ALPINE, "sh", "-c", "echo podman; sleep 0.1; echo podman; sleep 0.1; echo podman"})
+ logc.WaitWithDefaultTimeout()
+ Expect(logc.ExitCode()).To(Equal(0))
+ cid := logc.OutputToString()
+
+ wait := podmanTest.Podman([]string{"wait", "-l"})
+ wait.WaitWithDefaultTimeout()
+ Expect(wait.ExitCode()).To(BeZero())
+
+ cmd := exec.Command("journalctl", "--no-pager", "-o", "json", "--output-fields=CONTAINER_NAME", "-u", fmt.Sprintf("libpod-conmon-%s.scope", cid))
+ out, err := cmd.CombinedOutput()
+ Expect(err).To(BeNil())
+ Expect(string(out)).To(ContainSubstring(containerName))
+ })
+
It("podman journald logs for container", func() {
Skip("need to verify images have correct packages for journald")
logc := podmanTest.Podman([]string{"run", "--log-driver", "journald", "-dt", ALPINE, "sh", "-c", "echo podman; echo podman; echo podman"})
diff --git a/test/e2e/manifest_test.go b/test/e2e/manifest_test.go
index f622bf042..c47e20276 100644
--- a/test/e2e/manifest_test.go
+++ b/test/e2e/manifest_test.go
@@ -102,6 +102,7 @@ var _ = Describe("Podman manifest", func() {
})
It("podman manifest annotate", func() {
+ SkipIfRemote()
session := podmanTest.Podman([]string{"manifest", "create", "foo"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
@@ -154,6 +155,7 @@ var _ = Describe("Podman manifest", func() {
})
It("podman manifest push", func() {
+ Skip(v2remotefail)
session := podmanTest.Podman([]string{"manifest", "create", "foo"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
@@ -183,6 +185,7 @@ var _ = Describe("Podman manifest", func() {
})
It("podman manifest push purge", func() {
+ Skip(v2remotefail)
session := podmanTest.Podman([]string{"manifest", "create", "foo"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
diff --git a/test/e2e/network_test.go b/test/e2e/network_test.go
index 8d575d7f9..e293876b9 100644
--- a/test/e2e/network_test.go
+++ b/test/e2e/network_test.go
@@ -105,6 +105,32 @@ var _ = Describe("Podman network", func() {
Expect(session.LineInOutputContains("podman-integrationtest")).To(BeTrue())
})
+ It("podman network list --filter success", func() {
+ // Setup, use uuid to prevent conflict with other tests
+ uuid := stringid.GenerateNonCryptoID()
+ secondPath := filepath.Join(cniPath, fmt.Sprintf("%s.conflist", uuid))
+ writeConf([]byte(secondConf), secondPath)
+ defer removeConf(secondPath)
+
+ session := podmanTest.Podman([]string{"network", "ls", "--filter", "plugin=bridge"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(session.LineInOutputContains("podman-integrationtest")).To(BeTrue())
+ })
+
+ It("podman network list --filter failure", func() {
+ // Setup, use uuid to prevent conflict with other tests
+ uuid := stringid.GenerateNonCryptoID()
+ secondPath := filepath.Join(cniPath, fmt.Sprintf("%s.conflist", uuid))
+ writeConf([]byte(secondConf), secondPath)
+ defer removeConf(secondPath)
+
+ session := podmanTest.Podman([]string{"network", "ls", "--filter", "plugin=test"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(session.LineInOutputContains("podman-integrationtest")).To(BeFalse())
+ })
+
It("podman network rm no args", func() {
session := podmanTest.Podman([]string{"network", "rm"})
session.WaitWithDefaultTimeout()
@@ -152,6 +178,19 @@ var _ = Describe("Podman network", func() {
Expect(session.IsJSONOutputValid()).To(BeTrue())
})
+ It("podman network inspect", func() {
+ // Setup, use uuid to prevent conflict with other tests
+ uuid := stringid.GenerateNonCryptoID()
+ secondPath := filepath.Join(cniPath, fmt.Sprintf("%s.conflist", uuid))
+ writeConf([]byte(secondConf), secondPath)
+ defer removeConf(secondPath)
+
+ session := podmanTest.Podman([]string{"network", "inspect", "podman-integrationtest", "--format", "{{.cniVersion}}"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(session.LineInOutputContains("0.3.0")).To(BeTrue())
+ })
+
It("podman inspect container single CNI network", func() {
netName := "testNetSingleCNI"
network := podmanTest.Podman([]string{"network", "create", "--subnet", "10.50.50.0/24", netName})
diff --git a/test/e2e/pod_create_test.go b/test/e2e/pod_create_test.go
index e0a10c202..e56db54a2 100644
--- a/test/e2e/pod_create_test.go
+++ b/test/e2e/pod_create_test.go
@@ -2,7 +2,9 @@ package integration
import (
"fmt"
+ "io/ioutil"
"os"
+ "path/filepath"
"strings"
. "github.com/containers/libpod/test/utils"
@@ -28,7 +30,7 @@ var _ = Describe("Podman pod create", func() {
})
AfterEach(func() {
- podmanTest.CleanupPod()
+ podmanTest.Cleanup()
f := CurrentGinkgoTestDescription()
processTestResult(f)
@@ -282,4 +284,26 @@ var _ = Describe("Podman pod create", func() {
podCreate.WaitWithDefaultTimeout()
Expect(podCreate.ExitCode()).To(Equal(125))
})
+
+ It("podman create pod and print id to external file", func() {
+ // Switch to temp dir and restore it afterwards
+ cwd, err := os.Getwd()
+ Expect(err).To(BeNil())
+ Expect(os.Chdir(os.TempDir())).To(BeNil())
+ targetPath := filepath.Join(os.TempDir(), "dir")
+ Expect(os.MkdirAll(targetPath, 0755)).To(BeNil())
+ targetFile := filepath.Join(targetPath, "idFile")
+ defer Expect(os.RemoveAll(targetFile)).To(BeNil())
+ defer Expect(os.Chdir(cwd)).To(BeNil())
+
+ session := podmanTest.Podman([]string{"pod", "create", "--name=abc", "--pod-id-file", targetFile})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ id, _ := ioutil.ReadFile(targetFile)
+ check := podmanTest.Podman([]string{"pod", "inspect", "abc"})
+ check.WaitWithDefaultTimeout()
+ data := check.InspectPodToJSON()
+ Expect(data.ID).To(Equal(string(id)))
+ })
})
diff --git a/test/e2e/pod_infra_container_test.go b/test/e2e/pod_infra_container_test.go
index 3cc6fa9e8..9b6f9b657 100644
--- a/test/e2e/pod_infra_container_test.go
+++ b/test/e2e/pod_infra_container_test.go
@@ -29,7 +29,7 @@ var _ = Describe("Podman pod create", func() {
})
AfterEach(func() {
- podmanTest.CleanupPod()
+ podmanTest.Cleanup()
f := CurrentGinkgoTestDescription()
processTestResult(f)
diff --git a/test/e2e/pod_inspect_test.go b/test/e2e/pod_inspect_test.go
index f87bbe047..8040adf1e 100644
--- a/test/e2e/pod_inspect_test.go
+++ b/test/e2e/pod_inspect_test.go
@@ -26,7 +26,7 @@ var _ = Describe("Podman pod inspect", func() {
})
AfterEach(func() {
- podmanTest.CleanupPod()
+ podmanTest.Cleanup()
f := CurrentGinkgoTestDescription()
processTestResult(f)
diff --git a/test/e2e/pod_kill_test.go b/test/e2e/pod_kill_test.go
index a3efec46c..af3d2af73 100644
--- a/test/e2e/pod_kill_test.go
+++ b/test/e2e/pod_kill_test.go
@@ -27,7 +27,7 @@ var _ = Describe("Podman pod kill", func() {
})
AfterEach(func() {
- podmanTest.CleanupPod()
+ podmanTest.Cleanup()
f := CurrentGinkgoTestDescription()
processTestResult(f)
@@ -100,6 +100,7 @@ var _ = Describe("Podman pod kill", func() {
})
It("podman pod kill latest pod", func() {
+ SkipIfRemote()
_, ec, podid := podmanTest.CreatePod("")
Expect(ec).To(Equal(0))
diff --git a/test/e2e/pod_pause_test.go b/test/e2e/pod_pause_test.go
index 7067c9a87..df6e5482e 100644
--- a/test/e2e/pod_pause_test.go
+++ b/test/e2e/pod_pause_test.go
@@ -29,7 +29,7 @@ var _ = Describe("Podman pod pause", func() {
})
AfterEach(func() {
- podmanTest.CleanupPod()
+ podmanTest.Cleanup()
f := CurrentGinkgoTestDescription()
processTestResult(f)
diff --git a/test/e2e/pod_pod_namespaces.go b/test/e2e/pod_pod_namespaces.go
index 09f716806..806ec3884 100644
--- a/test/e2e/pod_pod_namespaces.go
+++ b/test/e2e/pod_pod_namespaces.go
@@ -29,7 +29,7 @@ var _ = Describe("Podman pod create", func() {
})
AfterEach(func() {
- podmanTest.CleanupPod()
+ podmanTest.Cleanup()
f := CurrentGinkgoTestDescription()
processTestResult(f)
diff --git a/test/e2e/pod_prune_test.go b/test/e2e/pod_prune_test.go
index d98383331..1711b55d4 100644
--- a/test/e2e/pod_prune_test.go
+++ b/test/e2e/pod_prune_test.go
@@ -26,7 +26,7 @@ var _ = Describe("Podman pod prune", func() {
})
AfterEach(func() {
- podmanTest.CleanupPod()
+ podmanTest.Cleanup()
f := CurrentGinkgoTestDescription()
processTestResult(f)
diff --git a/test/e2e/pod_ps_test.go b/test/e2e/pod_ps_test.go
index 5f8712a7a..81d97b72d 100644
--- a/test/e2e/pod_ps_test.go
+++ b/test/e2e/pod_ps_test.go
@@ -28,7 +28,7 @@ var _ = Describe("Podman ps", func() {
})
AfterEach(func() {
- podmanTest.CleanupPod()
+ podmanTest.Cleanup()
f := CurrentGinkgoTestDescription()
processTestResult(f)
@@ -83,6 +83,7 @@ var _ = Describe("Podman ps", func() {
})
It("podman pod ps latest", func() {
+ SkipIfRemote()
_, ec, podid1 := podmanTest.CreatePod("")
Expect(ec).To(Equal(0))
diff --git a/test/e2e/pod_restart_test.go b/test/e2e/pod_restart_test.go
index 691fe5f0c..72e804353 100644
--- a/test/e2e/pod_restart_test.go
+++ b/test/e2e/pod_restart_test.go
@@ -26,7 +26,7 @@ var _ = Describe("Podman pod restart", func() {
})
AfterEach(func() {
- podmanTest.CleanupPod()
+ podmanTest.Cleanup()
f := CurrentGinkgoTestDescription()
processTestResult(f)
@@ -134,6 +134,7 @@ var _ = Describe("Podman pod restart", func() {
})
It("podman pod restart latest pod", func() {
+ SkipIfRemote()
_, ec, _ := podmanTest.CreatePod("foobar99")
Expect(ec).To(Equal(0))
diff --git a/test/e2e/pod_rm_test.go b/test/e2e/pod_rm_test.go
index 90f178be6..e10b3c98f 100644
--- a/test/e2e/pod_rm_test.go
+++ b/test/e2e/pod_rm_test.go
@@ -29,7 +29,7 @@ var _ = Describe("Podman pod rm", func() {
})
AfterEach(func() {
- podmanTest.CleanupPod()
+ podmanTest.Cleanup()
f := CurrentGinkgoTestDescription()
processTestResult(f)
@@ -60,6 +60,7 @@ var _ = Describe("Podman pod rm", func() {
})
It("podman pod rm latest pod", func() {
+ SkipIfRemote()
_, ec, podid := podmanTest.CreatePod("")
Expect(ec).To(Equal(0))
diff --git a/test/e2e/pod_start_test.go b/test/e2e/pod_start_test.go
index 2722cb5b3..8e78cadfd 100644
--- a/test/e2e/pod_start_test.go
+++ b/test/e2e/pod_start_test.go
@@ -26,7 +26,7 @@ var _ = Describe("Podman pod start", func() {
})
AfterEach(func() {
- podmanTest.CleanupPod()
+ podmanTest.Cleanup()
f := CurrentGinkgoTestDescription()
processTestResult(f)
@@ -103,6 +103,7 @@ var _ = Describe("Podman pod start", func() {
})
It("podman pod start latest pod", func() {
+ SkipIfRemote()
_, ec, _ := podmanTest.CreatePod("foobar99")
Expect(ec).To(Equal(0))
diff --git a/test/e2e/pod_stats_test.go b/test/e2e/pod_stats_test.go
index 347f33e62..9bba59073 100644
--- a/test/e2e/pod_stats_test.go
+++ b/test/e2e/pod_stats_test.go
@@ -35,7 +35,7 @@ var _ = Describe("Podman pod stats", func() {
})
AfterEach(func() {
- podmanTest.CleanupPod()
+ podmanTest.Cleanup()
f := CurrentGinkgoTestDescription()
processTestResult(f)
diff --git a/test/e2e/pod_stop_test.go b/test/e2e/pod_stop_test.go
index a61917adb..298f3da2f 100644
--- a/test/e2e/pod_stop_test.go
+++ b/test/e2e/pod_stop_test.go
@@ -26,7 +26,7 @@ var _ = Describe("Podman pod stop", func() {
})
AfterEach(func() {
- podmanTest.CleanupPod()
+ podmanTest.Cleanup()
f := CurrentGinkgoTestDescription()
processTestResult(f)
@@ -144,6 +144,7 @@ var _ = Describe("Podman pod stop", func() {
})
It("podman pod stop latest pod", func() {
+ SkipIfRemote()
_, ec, _ := podmanTest.CreatePod("foobar99")
Expect(ec).To(Equal(0))
diff --git a/test/e2e/pod_top_test.go b/test/e2e/pod_top_test.go
index c313b0675..de011eda7 100644
--- a/test/e2e/pod_top_test.go
+++ b/test/e2e/pod_top_test.go
@@ -30,7 +30,7 @@ var _ = Describe("Podman top", func() {
})
AfterEach(func() {
- podmanTest.CleanupPod()
+ podmanTest.Cleanup()
f := CurrentGinkgoTestDescription()
processTestResult(f)
diff --git a/test/e2e/prune_test.go b/test/e2e/prune_test.go
index a09b6f37a..e77e6dd25 100644
--- a/test/e2e/prune_test.go
+++ b/test/e2e/prune_test.go
@@ -168,12 +168,13 @@ var _ = Describe("Podman prune", func() {
session = podmanTest.Podman([]string{"pod", "create"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
+ podid1 := session.OutputToString()
- session = podmanTest.Podman([]string{"pod", "start", "-l"})
+ session = podmanTest.Podman([]string{"pod", "start", podid1})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
- session = podmanTest.Podman([]string{"pod", "stop", "-l"})
+ session = podmanTest.Podman([]string{"pod", "stop", podid1})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
@@ -196,13 +197,14 @@ var _ = Describe("Podman prune", func() {
session := podmanTest.Podman([]string{"pod", "create"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
+ podid1 := session.OutputToString()
// Start and stop a pod to get it in exited state.
- session = podmanTest.Podman([]string{"pod", "start", "-l"})
+ session = podmanTest.Podman([]string{"pod", "start", podid1})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
- session = podmanTest.Podman([]string{"pod", "stop", "-l"})
+ session = podmanTest.Podman([]string{"pod", "stop", podid1})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
@@ -224,17 +226,17 @@ var _ = Describe("Podman prune", func() {
})
It("podman system prune with running, exited pod and volume prune set true", func() {
-
// Start and stop a pod to get it in exited state.
session := podmanTest.Podman([]string{"pod", "create"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
+ podid1 := session.OutputToString()
- session = podmanTest.Podman([]string{"pod", "start", "-l"})
+ session = podmanTest.Podman([]string{"pod", "start", podid1})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
- session = podmanTest.Podman([]string{"pod", "stop", "-l"})
+ session = podmanTest.Podman([]string{"pod", "stop", podid1})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
@@ -242,7 +244,9 @@ var _ = Describe("Podman prune", func() {
session = podmanTest.Podman([]string{"pod", "create"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
- session = podmanTest.Podman([]string{"pod", "start", "-l"})
+ podid2 := session.OutputToString()
+
+ session = podmanTest.Podman([]string{"pod", "start", podid2})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
@@ -302,13 +306,14 @@ var _ = Describe("Podman prune", func() {
session := podmanTest.Podman([]string{"pod", "create"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
+ podid1 := session.OutputToString()
// Start and stop a pod to get it in exited state.
- session = podmanTest.Podman([]string{"pod", "start", "-l"})
+ session = podmanTest.Podman([]string{"pod", "start", podid1})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
- session = podmanTest.Podman([]string{"pod", "stop", "-l"})
+ session = podmanTest.Podman([]string{"pod", "stop", podid1})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
@@ -317,9 +322,6 @@ var _ = Describe("Podman prune", func() {
create.WaitWithDefaultTimeout()
Expect(create.ExitCode()).To(Equal(0))
- // Adding images should be pruned
- podmanTest.BuildImage(pruneImage, "alpine_bash:latest", "true")
-
// Adding unused volume should not be pruned as volumes not set
session = podmanTest.Podman([]string{"volume", "create"})
session.WaitWithDefaultTimeout()
diff --git a/test/e2e/ps_test.go b/test/e2e/ps_test.go
index b987c3ff4..8965ce297 100644
--- a/test/e2e/ps_test.go
+++ b/test/e2e/ps_test.go
@@ -101,6 +101,7 @@ var _ = Describe("Podman ps", func() {
})
It("podman ps latest flag", func() {
+ SkipIfRemote()
_, ec, _ := podmanTest.RunLsContainer("")
Expect(ec).To(Equal(0))
diff --git a/test/e2e/restart_test.go b/test/e2e/restart_test.go
index 2b515f53b..e72a20f2d 100644
--- a/test/e2e/restart_test.go
+++ b/test/e2e/restart_test.go
@@ -122,6 +122,7 @@ var _ = Describe("Podman restart", func() {
})
It("Podman restart the latest container", func() {
+ SkipIfRemote()
_, exitCode, _ := podmanTest.RunLsContainer("test1")
Expect(exitCode).To(Equal(0))
diff --git a/test/e2e/rm_test.go b/test/e2e/rm_test.go
index 4eb568879..87e3de922 100644
--- a/test/e2e/rm_test.go
+++ b/test/e2e/rm_test.go
@@ -123,6 +123,7 @@ var _ = Describe("Podman rm", func() {
})
It("podman rm the latest container", func() {
+ SkipIfRemote()
session := podmanTest.Podman([]string{"create", ALPINE, "ls"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
diff --git a/test/e2e/rmi_test.go b/test/e2e/rmi_test.go
index 6c0b01bd5..150726ce4 100644
--- a/test/e2e/rmi_test.go
+++ b/test/e2e/rmi_test.go
@@ -75,6 +75,7 @@ var _ = Describe("Podman rmi", func() {
})
It("podman rmi tagged image", func() {
+ Skip(v2remotefail)
setup := podmanTest.PodmanNoCache([]string{"images", "-q", ALPINE})
setup.WaitWithDefaultTimeout()
Expect(setup).Should(Exit(0))
@@ -91,6 +92,7 @@ var _ = Describe("Podman rmi", func() {
})
It("podman rmi image with tags by ID cannot be done without force", func() {
+ Skip(v2remotefail)
setup := podmanTest.PodmanNoCache([]string{"images", "-q", ALPINE})
setup.WaitWithDefaultTimeout()
Expect(setup).Should(Exit(0))
diff --git a/test/e2e/run_networking_test.go b/test/e2e/run_networking_test.go
index 6a93da085..9db2f5d49 100644
--- a/test/e2e/run_networking_test.go
+++ b/test/e2e/run_networking_test.go
@@ -19,7 +19,6 @@ var _ = Describe("Podman run networking", func() {
)
BeforeEach(func() {
- SkipIfRootlessV2()
tempdir, err = CreateTempDirInTempDir()
if err != nil {
os.Exit(1)
@@ -193,6 +192,8 @@ var _ = Describe("Podman run networking", func() {
})
It("podman run network expose duplicate host port results in error", func() {
+ SkipIfRootless()
+
session := podmanTest.Podman([]string{"run", "-dt", "-p", "80", ALPINE, "/bin/sh"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
@@ -202,7 +203,7 @@ var _ = Describe("Podman run networking", func() {
Expect(inspect.ExitCode()).To(Equal(0))
containerConfig := inspect.InspectContainerToJSON()
- Expect(containerConfig[0].NetworkSettings.Ports[0].HostPort).ToNot(Equal("80"))
+ Expect(containerConfig[0].NetworkSettings.Ports[0].HostPort).ToNot(Equal(80))
})
It("podman run hostname test", func() {
diff --git a/test/e2e/run_volume_test.go b/test/e2e/run_volume_test.go
index 1f892d9f8..58091ff68 100644
--- a/test/e2e/run_volume_test.go
+++ b/test/e2e/run_volume_test.go
@@ -117,7 +117,7 @@ var _ = Describe("Podman run with volumes", func() {
session = podmanTest.Podman([]string{"run", "--rm", "--mount", "type=tmpfs,target=" + dest, ALPINE, "grep", dest, "/proc/self/mountinfo"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
- Expect(session.OutputToString()).To(ContainSubstring(dest + " rw,nosuid,nodev,noexec,relatime - tmpfs"))
+ Expect(session.OutputToString()).To(ContainSubstring(dest + " rw,nosuid,nodev,relatime - tmpfs"))
session = podmanTest.Podman([]string{"run", "--rm", "--mount", "type=tmpfs,target=/etc/ssl,tmpcopyup", ALPINE, "ls", "/etc/ssl"})
session.WaitWithDefaultTimeout()
diff --git a/test/e2e/start_test.go b/test/e2e/start_test.go
index 6af0b7068..b8198a3a9 100644
--- a/test/e2e/start_test.go
+++ b/test/e2e/start_test.go
@@ -120,11 +120,11 @@ var _ = Describe("Podman start", func() {
})
It("podman failed to start with --rm should delete the container", func() {
- session := podmanTest.Podman([]string{"create", "-it", "--rm", ALPINE, "foo"})
+ session := podmanTest.Podman([]string{"create", "--name", "test1", "-it", "--rm", ALPINE, "foo"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
- start := podmanTest.Podman([]string{"start", "-l"})
+ start := podmanTest.Podman([]string{"start", "test1"})
start.WaitWithDefaultTimeout()
Expect(start).To(ExitWithError())
@@ -136,7 +136,7 @@ var _ = Describe("Podman start", func() {
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
- start := podmanTest.Podman([]string{"start", "-l"})
+ start := podmanTest.Podman([]string{"start", session.OutputToString()})
start.WaitWithDefaultTimeout()
Expect(start).To(ExitWithError())
diff --git a/test/e2e/stop_test.go b/test/e2e/stop_test.go
index 54c64d66b..cd78a54e1 100644
--- a/test/e2e/stop_test.go
+++ b/test/e2e/stop_test.go
@@ -184,6 +184,7 @@ var _ = Describe("Podman stop", func() {
})
It("podman stop latest containers", func() {
+ SkipIfRemote()
session := podmanTest.RunTopContainer("test1")
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
diff --git a/test/e2e/system_reset_test.go b/test/e2e/system_reset_test.go
index e5ce69739..f45ff0c5f 100644
--- a/test/e2e/system_reset_test.go
+++ b/test/e2e/system_reset_test.go
@@ -34,6 +34,7 @@ var _ = Describe("podman system reset", func() {
})
It("podman system reset", func() {
+ Skip(v2remotefail)
// system reset will not remove additional store images, so need to grab length
session := podmanTest.Podman([]string{"rmi", "--force", "--all"})
@@ -63,7 +64,7 @@ var _ = Describe("podman system reset", func() {
// If remote then the varlink service should have exited
// On local tests this is a noop
- podmanTest.StartVarlink()
+ podmanTest.StartRemoteService()
session = podmanTest.Podman([]string{"images", "-n"})
session.WaitWithDefaultTimeout()
diff --git a/test/e2e/untag_test.go b/test/e2e/untag_test.go
index 17171cd41..dc1a6208e 100644
--- a/test/e2e/untag_test.go
+++ b/test/e2e/untag_test.go
@@ -58,7 +58,7 @@ var _ = Describe("Podman untag", func() {
results := podmanTest.PodmanNoCache([]string{"images"})
results.WaitWithDefaultTimeout()
Expect(results.ExitCode()).To(Equal(0))
- Expect(results.OutputToStringArray()).To(HaveLen(5))
+ Expect(results.OutputToStringArray()).To(HaveLen(6))
Expect(results.LineInOuputStartsWith("docker.io/library/alpine")).To(BeTrue())
Expect(results.LineInOuputStartsWith("localhost/foo")).To(BeTrue())
Expect(results.LineInOuputStartsWith("localhost/bar")).To(BeTrue())
diff --git a/test/endpoint/endpoint.go b/test/endpoint/endpoint.go
index f1677ec5f..284f0d79c 100644
--- a/test/endpoint/endpoint.go
+++ b/test/endpoint/endpoint.go
@@ -71,7 +71,7 @@ func (p *EndpointTestIntegration) startVarlink(useImageCache bool) {
os.MkdirAll("/run/podman", 0755)
}
varlinkEndpoint := p.VarlinkEndpoint
- //p.SetVarlinkAddress(p.VarlinkEndpoint)
+ //p.SetVarlinkAddress(p.RemoteSocket)
args := []string{"varlink", "--timeout", "0", varlinkEndpoint}
podmanOptions := getVarlinkOptions(p, args)
diff --git a/test/system/001-basic.bats b/test/system/001-basic.bats
index 5fc07acfb..71595f419 100644
--- a/test/system/001-basic.bats
+++ b/test/system/001-basic.bats
@@ -23,7 +23,7 @@ function setup() {
is "${lines[0]}" "Version:[ ]\+[1-9][0-9.]\+" "Version line 1"
is "$output" ".*Go Version: \+" "'Go Version' in output"
- is "$output" ".*RemoteAPI Version: \+" "API version in output"
+ is "$output" ".*API Version: \+" "API version in output"
}
diff --git a/test/system/030-run.bats b/test/system/030-run.bats
index ae2e39d6b..1bcf3896f 100644
--- a/test/system/030-run.bats
+++ b/test/system/030-run.bats
@@ -8,8 +8,8 @@ load helpers
# 2019-09 Fedora 31 and rawhide (32) are switching from runc to crun
# because of cgroups v2; crun emits different error messages.
# Default to runc:
- err_no_such_cmd="Error: .*: starting container process caused .*exec:.*stat /no/such/command: no such file or directory"
- err_no_exec_dir="Error: .*: starting container process caused .*exec:.* permission denied"
+ err_no_such_cmd="Error: .*: starting container process caused.*exec:.*stat /no/such/command: no such file or directory"
+ err_no_exec_dir="Error: .*: starting container process caused.*exec:.* permission denied"
# ...but check the configured runtime engine, and switch to crun as needed
run_podman info --format '{{ .Host.OCIRuntime.Path }}'
diff --git a/test/system/160-volumes.bats b/test/system/160-volumes.bats
new file mode 100644
index 000000000..3233e6f04
--- /dev/null
+++ b/test/system/160-volumes.bats
@@ -0,0 +1,244 @@
+#!/usr/bin/env bats -*- bats -*-
+#
+# podman volume-related tests
+#
+
+load helpers
+
+function setup() {
+ basic_setup
+
+ run_podman '?' volume rm -a
+}
+
+function teardown() {
+ run_podman '?' rm -a --volumes
+ run_podman '?' volume rm -a -f
+
+ basic_teardown
+}
+
+
+# Simple volume tests: share files between host and container
+@test "podman run --volumes : basic" {
+ skip_if_remote "volumes cannot be shared across hosts"
+
+ # Create three temporary directories
+ vol1=${PODMAN_TMPDIR}/v1_$(random_string)
+ vol2=${PODMAN_TMPDIR}/v2_$(random_string)
+ vol3=${PODMAN_TMPDIR}/v3_$(random_string)
+ mkdir $vol1 $vol2 $vol3
+
+ # In each directory, write a random string to a file
+ echo $(random_string) >$vol1/file1_in
+ echo $(random_string) >$vol2/file2_in
+ echo $(random_string) >$vol3/file3_in
+
+ # Run 'cat' on each file, and compare against local files. Mix -v / --volume
+ # flags, and specify them out of order just for grins. The shell wildcard
+ # expansion must sort vol1/2/3 lexically regardless.
+ v_opts="-v $vol1:/vol1:z --volume $vol3:/vol3:z -v $vol2:/vol2:z"
+ run_podman run --rm $v_opts $IMAGE sh -c "cat /vol?/file?_in"
+
+ for i in 1 2 3; do
+ eval voldir=\$vol${i}
+ is "${lines[$(($i - 1))]}" "$(< $voldir/file${i}_in)" \
+ "contents of /vol${i}/file${i}_in"
+ done
+
+ # Confirm that container sees vol1 as a mount point
+ run_podman run --rm $v_opts $IMAGE mount
+ is "$output" ".* on /vol1 type .*" "'mount' in container lists vol1"
+
+ # Have the container do write operations, confirm them on host
+ out1=$(random_string)
+ run_podman run --rm $v_opts $IMAGE sh -c "echo $out1 >/vol1/file1_out;
+ cp /vol2/file2_in /vol3/file3_out"
+ is "$(<$vol1/file1_out)" "$out1" "contents of /vol1/file1_out"
+ is "$(<$vol3/file3_out)" "$(<$vol2/file2_in)" "contents of /vol3/file3_out"
+
+ # Writing to read-only volumes: not allowed
+ run_podman 1 run --rm -v $vol1:/vol1ro:z,ro $IMAGE sh -c "touch /vol1ro/abc"
+ is "$output" ".*Read-only file system" "touch on read-only volume"
+}
+
+
+# Named volumes
+@test "podman volume create / run" {
+ myvolume=myvol$(random_string)
+ mylabel=$(random_string)
+
+ # Create a named volume
+ run_podman volume create --label l=$mylabel $myvolume
+ is "$output" "$myvolume" "output from volume create"
+
+ # Confirm that it shows up in 'volume ls', and confirm values
+ run_podman volume ls --format json
+ tests="
+Name | $myvolume
+Driver | local
+Labels.l | $mylabel
+"
+ parse_table "$tests" | while read field expect; do
+ actual=$(jq -r ".[0].$field" <<<"$output")
+ is "$actual" "$expect" "volume ls .$field"
+ done
+
+ # Run a container that writes to a file in that volume
+ mountpoint=$(jq -r '.[0].Mountpoint' <<<"$output")
+ rand=$(random_string)
+ run_podman run --rm --volume $myvolume:/vol $IMAGE sh -c "echo $rand >/vol/myfile"
+
+ # Confirm that the file is visible, with content, outside the container
+ is "$(<$mountpoint/myfile)" "$rand" "we see content created in container"
+
+ # Clean up
+ run_podman volume rm $myvolume
+}
+
+
+# Running scripts (executables) from a volume
+@test "podman volume: exec/noexec" {
+ myvolume=myvol$(random_string)
+
+ run_podman volume create $myvolume
+ is "$output" "$myvolume" "output from volume create"
+
+ run_podman volume inspect --format '{{.Mountpoint}}' $myvolume
+ mountpoint="$output"
+
+ # Create a script, make it runnable
+ rand=$(random_string)
+ cat >$mountpoint/myscript <<EOF
+#!/bin/sh
+echo "got here -$rand-"
+EOF
+ chmod 755 $mountpoint/myscript
+
+ # By default, volumes are mounted exec, but we have manually added the
+ # noexec option. This should fail.
+ # ARGH. Unfortunately, runc (used for cgroups v1) produces a different error
+ local expect_rc=126
+ local expect_msg='.* OCI runtime permission denied.*'
+ run_podman info --format '{{ .Host.OCIRuntime.Path }}'
+ if expr "$output" : ".*/runc"; then
+ expect_rc=1
+ expect_msg='.* exec user process caused.*permission denied'
+ fi
+
+ run_podman ${expect_rc} run --rm --volume $myvolume:/vol:noexec,z $IMAGE /vol/myscript
+ is "$output" "$expect_msg" "run on volume, noexec"
+
+ # With the default, it should pass
+ run_podman run --rm -v $myvolume:/vol:z $IMAGE /vol/myscript
+ is "$output" "got here -$rand-" "script in volume is runnable with default (exec)"
+
+ # Clean up
+ run_podman volume rm $myvolume
+}
+
+
+# Anonymous temporary volumes, and persistent autocreated named ones
+@test "podman volume, implicit creation with run" {
+
+ # No hostdir arg: create anonymous container with random name
+ rand=$(random_string)
+ run_podman run -v /myvol $IMAGE sh -c "echo $rand >/myvol/myfile"
+
+ run_podman volume ls -q
+ tempvolume="$output"
+
+ # We should see the file created in the container
+ run_podman volume inspect --format '{{.Mountpoint}}' $tempvolume
+ mountpoint="$output"
+ test -e "$mountpoint/myfile"
+ is "$(< $mountpoint/myfile)" "$rand" "file contents, anonymous volume"
+
+ # Remove the container, using rm --volumes. Volume should now be gone.
+ run_podman rm -a --volumes
+ run_podman volume ls -q
+ is "$output" "" "anonymous volume is removed after container is rm'ed"
+
+ # Create a *named* container. This one should persist after container ends
+ myvol=myvol$(random_string)
+ rand=$(random_string)
+
+ run_podman run --rm -v $myvol:/myvol:z $IMAGE \
+ sh -c "echo $rand >/myvol/myfile"
+ run_podman volume ls -q
+ is "$output" "$myvol" "autocreated named container persists"
+
+ # ...and should be usable, read/write, by a second container
+ run_podman run --rm -v $myvol:/myvol:z $IMAGE \
+ sh -c "cp /myvol/myfile /myvol/myfile2"
+
+ run_podman volume rm $myvol
+
+ # Autocreated volumes should also work with keep-id
+ # All we do here is check status; podman 1.9.1 would fail with EPERM
+ myvol=myvol$(random_string)
+ run_podman run --rm -v $myvol:/myvol:z --userns=keep-id $IMAGE \
+ touch /myvol/myfile
+
+ run_podman volume rm $myvol
+}
+
+
+# Confirm that container sees the correct id
+@test "podman volume with --userns=keep-id" {
+ is_rootless || skip "only meaningful when run rootless"
+
+ myvoldir=${PODMAN_TMPDIR}/volume_$(random_string)
+ mkdir $myvoldir
+ touch $myvoldir/myfile
+
+ # With keep-id
+ run_podman run --rm -v $myvoldir:/vol:z --userns=keep-id $IMAGE \
+ stat -c "%u:%s" /vol/myfile
+ is "$output" "$(id -u):0" "with keep-id: stat(file in container) == my uid"
+
+ # Without
+ run_podman run --rm -v $myvoldir:/vol:z $IMAGE \
+ stat -c "%u:%s" /vol/myfile
+ is "$output" "0:0" "w/o keep-id: stat(file in container) == root"
+}
+
+
+# 'volume prune' identifies and cleans up unused volumes
+@test "podman volume prune" {
+ # Create four named volumes
+ local -a v=()
+ for i in 1 2 3 4;do
+ vol=myvol${i}$(random_string)
+ v[$i]=$vol
+ run_podman volume create $vol
+ done
+
+ # Run two containers: one mounting v1, one mounting v2 & v3
+ run_podman run --name c1 --volume ${v[1]}:/vol1 $IMAGE date
+ run_podman run --name c2 --volume ${v[2]}:/vol2 -v ${v[3]}:/vol3 \
+ $IMAGE date
+
+ # prune should remove v4
+ run_podman volume prune --force
+ is "$output" "${v[4]}" "volume prune, with 1, 2, 3 in use, deletes only 4"
+
+ # Remove the container using v2 and v3. Prune should now remove those.
+ # The 'echo sort' is to get the output sorted and in one line.
+ run_podman rm c2
+ run_podman volume prune --force
+ is "$(echo $(sort <<<$output))" "${v[2]} ${v[3]}" \
+ "volume prune, after rm c2, deletes volumes 2 and 3"
+
+ # Remove the final container. Prune should now remove v1.
+ run_podman rm c1
+ run_podman volume prune --force
+ is "$output" "${v[1]}" "volume prune, after rm c2 & c1, deletes volume 1"
+
+ # Further prunes are NOPs
+ run_podman volume prune --force
+ is "$output" "" "no more volumes to prune"
+}
+
+
+# vim: filetype=sh
diff --git a/test/system/200-pod-top.bats b/test/system/200-pod-top.bats
deleted file mode 100644
index bba1e8d14..000000000
--- a/test/system/200-pod-top.bats
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/usr/bin/env bats
-
-load helpers
-
-@test "podman pod top - containers in different PID namespaces" {
- skip_if_remote "podman-pod does not work with podman-remote"
-
- # With infra=false, we don't get a /pause container (we also
- # don't pull k8s.gcr.io/pause )
- no_infra='--infra=false'
- run_podman pod create $no_infra
- podid="$output"
-
- # Start two containers...
- run_podman run -d --pod $podid $IMAGE top -d 2
- cid1="$output"
- run_podman run -d --pod $podid $IMAGE top -d 2
- cid2="$output"
-
- # ...and wait for them to actually start.
- wait_for_output "PID \+PPID \+USER " $cid1
- wait_for_output "PID \+PPID \+USER " $cid2
-
- # Both containers have emitted at least one top-like line.
- # Now run 'pod top', and expect two 'top -d 2' processes running.
- run_podman pod top $podid
- is "$output" ".*root.*top -d 2.*root.*top -d 2" "two 'top' containers"
-
- # By default (podman pod create w/ default --infra) there should be
- # a /pause container.
- if [ -z "$no_infra" ]; then
- is "$output" ".*0 \+1 \+0 \+[0-9. ?s]\+/pause" "there is a /pause container"
- fi
-
- # Clean up
- run_podman pod rm -f $podid
-}
-
-
-# vim: filetype=sh
diff --git a/test/system/200-pod.bats b/test/system/200-pod.bats
new file mode 100644
index 000000000..f3d278826
--- /dev/null
+++ b/test/system/200-pod.bats
@@ -0,0 +1,174 @@
+#!/usr/bin/env bats
+
+load helpers
+
+# This is a long ugly way to clean up pods and remove the pause image
+function teardown() {
+ run_podman pod rm -f -a
+ run_podman rm -f -a
+ run_podman image list --format '{{.ID}} {{.Repository}}'
+ while read id name; do
+ if [[ "$name" =~ /pause ]]; then
+ run_podman rmi $id
+ fi
+ done <<<"$output"
+
+ basic_teardown
+}
+
+
+@test "podman pod top - containers in different PID namespaces" {
+ skip_if_remote "podman-pod does not work with podman-remote"
+
+ # With infra=false, we don't get a /pause container (we also
+ # don't pull k8s.gcr.io/pause )
+ no_infra='--infra=false'
+ run_podman pod create $no_infra
+ podid="$output"
+
+ # Start two containers...
+ run_podman run -d --pod $podid $IMAGE top -d 2
+ cid1="$output"
+ run_podman run -d --pod $podid $IMAGE top -d 2
+ cid2="$output"
+
+ # ...and wait for them to actually start.
+ wait_for_output "PID \+PPID \+USER " $cid1
+ wait_for_output "PID \+PPID \+USER " $cid2
+
+ # Both containers have emitted at least one top-like line.
+ # Now run 'pod top', and expect two 'top -d 2' processes running.
+ run_podman pod top $podid
+ is "$output" ".*root.*top -d 2.*root.*top -d 2" "two 'top' containers"
+
+ # By default (podman pod create w/ default --infra) there should be
+ # a /pause container.
+ if [ -z "$no_infra" ]; then
+ is "$output" ".*0 \+1 \+0 \+[0-9. ?s]\+/pause" "there is a /pause container"
+ fi
+
+ # Clean up
+ run_podman pod rm -f $podid
+}
+
+
+@test "podman pod - communicating between pods" {
+ skip_if_remote "podman-pod does not work with podman-remote"
+
+ podname=pod$(random_string)
+ run_podman pod create --infra=true --name=$podname
+
+ # Randomly-assigned port in the 5xxx range
+ for port in $(shuf -i 5000-5999);do
+ if ! { exec 3<> /dev/tcp/127.0.0.1/$port; } &>/dev/null; then
+ break
+ fi
+ done
+
+ # Listener. This will exit as soon as it receives a message.
+ run_podman run -d --pod $podname $IMAGE nc -l -p $port
+ cid1="$output"
+
+ # Talker: send the message via common port on localhost
+ message=$(random_string 15)
+ run_podman run --rm --pod $podname $IMAGE \
+ sh -c "echo $message | nc 127.0.0.1 $port"
+
+ # Back to the first (listener) container. Make sure message was received.
+ run_podman logs $cid1
+ is "$output" "$message" "message sent from one container to another"
+
+ # Clean up. First the nc -l container...
+ run_podman rm $cid1
+
+ # ...then, from pause container, find the image ID of the pause image...
+ # FIXME: if #6283 gets implemented, use 'inspect --format ...'
+ run_podman pod inspect $podname
+ pause_cid=$(jq -r '.Containers[0].Id' <<<"$output")
+ run_podman container inspect --format '{{.Image}}' $pause_cid
+ pause_iid="$output"
+
+ # ...then rm the pod, then rmi the pause image so we don't leave strays.
+ run_podman pod rm $podname
+ run_podman rmi $pause_iid
+}
+
+# Random byte
+function octet() {
+ echo $(( $RANDOM & 255 ))
+}
+
+# random MAC address: convention seems to be that 2nd lsb=1, lsb=0
+# (i.e. 0bxxxxxx10) in the first octet guarantees a private space.
+# FIXME: I can't find a definitive reference for this though
+# Generate the address IN CAPS (A-F), but we will test it in lowercase.
+function random_mac() {
+ local mac=$(printf "%02X" $(( $(octet) & 242 | 2 )) )
+ for i in $(seq 2 6); do
+ mac+=$(printf ":%02X" $(octet))
+ done
+
+ echo $mac
+}
+
+# Random RFC1918 IP address
+function random_ip() {
+ local ip="172.20"
+ for i in 1 2;do
+ ip+=$(printf ".%d" $(octet))
+ done
+ echo $ip
+}
+
+@test "podman pod create - hashtag AllTheOptions" {
+ mac=$(random_mac)
+ add_host_ip=$(random_ip)
+ add_host_n=$(random_string | tr A-Z a-z).$(random_string | tr A-Z a-z).xyz
+
+ dns_server=$(random_ip)
+ dns_opt="ndots:$(octet)"
+ dns_search=$(random_string 15 | tr A-Z a-z).abc
+
+ hostname=$(random_string | tr A-Z a-z).$(random_string | tr A-Z a-z).net
+
+ pod_id_file=${PODMAN_TMPDIR}/pod-id-file
+
+ # Create a pod with all the desired options
+ # FIXME: --ip=$ip fails:
+ # Error adding network: failed to allocate all requested IPs
+ run_podman pod create --name=mypod \
+ --pod-id-file=$pod_id_file \
+ --mac-address=$mac \
+ --hostname=$hostname \
+ --add-host "$add_host_n:$add_host_ip" \
+ --dns "$dns_server" \
+ --dns-search "$dns_search" \
+ --dns-opt "$dns_opt"
+ pod_id="$output"
+
+ # Check --pod-id-file
+ # FIXME: broken in master; reenable once #6292 is fixed
+ #is "$(<$pod_id_file)" "$pod_id" "contents of pod-id-file"
+
+ # Check each of the options
+ if ! is_rootless; then
+ run_podman run --rm --pod mypod $IMAGE ip link show
+ # 'ip' outputs hex in lower-case, ${expr,,} converts UC to lc
+ is "$output" ".* link/ether ${mac,,} " "requested MAC address was set"
+ fi
+
+ run_podman run --rm --pod mypod $IMAGE hostname
+ is "$output" "$hostname" "--hostname set the hostname"
+
+ run_podman run --rm --pod $pod_id $IMAGE cat /etc/hosts
+ is "$output" ".*$add_host_ip $add_host_n" "--add-host was added"
+ is "$output" ".* $hostname" "--hostname is in /etc/hosts"
+ # ^^^^ this must be a tab, not a space
+
+ run_podman run --rm --pod mypod $IMAGE cat /etc/resolv.conf
+ is "$output" ".*nameserver $dns_server" "--dns [server] was added"
+ is "$output" ".*search $dns_search" "--dns-search was added"
+ is "$output" ".*options $dns_opt" "--dns-opt was added"
+}
+
+# vim: filetype=sh
diff --git a/test/system/250-systemd.bats b/test/system/250-systemd.bats
index cdac43c1c..4bee13414 100644
--- a/test/system/250-systemd.bats
+++ b/test/system/250-systemd.bats
@@ -33,6 +33,13 @@ function teardown() {
# This test can fail in dev. environment because of SELinux.
# quick fix: chcon -t container_runtime_exec_t ./bin/podman
@test "podman generate - systemd - basic" {
+ # podman initializes this if unset, but systemctl doesn't
+ if is_rootless; then
+ if [ -z "$XDG_RUNTIME_DIR" ]; then
+ export XDG_RUNTIME_DIR=/run/user/$(id -u)
+ fi
+ fi
+
cname=$(random_string)
run_podman create --name $cname --detach $IMAGE top
diff --git a/test/utils/utils.go b/test/utils/utils.go
index 0131e023d..1d59e5468 100644
--- a/test/utils/utils.go
+++ b/test/utils/utils.go
@@ -39,9 +39,9 @@ type PodmanTest struct {
TempDir string
RemoteTest bool
RemotePodmanBinary string
- VarlinkSession *os.Process
- VarlinkEndpoint string
- VarlinkCommand *exec.Cmd
+ RemoteSession *os.Process
+ RemoteSocket string
+ RemoteCommand *exec.Cmd
ImageCacheDir string
ImageCacheFS string
}
@@ -71,9 +71,10 @@ func (p *PodmanTest) PodmanAsUserBase(args []string, uid, gid uint32, cwd string
podmanBinary := p.PodmanBinary
if p.RemoteTest {
podmanBinary = p.RemotePodmanBinary
- env = append(env, fmt.Sprintf("PODMAN_VARLINK_ADDRESS=%s", p.VarlinkEndpoint))
}
-
+ if p.RemoteTest {
+ podmanOptions = append([]string{"--remote", p.RemoteSocket}, podmanOptions...)
+ }
if env == nil {
fmt.Printf("Running: %s %s\n", podmanBinary, strings.Join(podmanOptions, " "))
} else {
diff --git a/troubleshooting.md b/troubleshooting.md
index f04d9e9fa..167ee14c3 100644
--- a/troubleshooting.md
+++ b/troubleshooting.md
@@ -219,8 +219,15 @@ the system.
#### Solution
-SELinux provides a boolean `container_manage_cgroup`, which allows container
-processes to write to the cgroup file system. Turn on this boolean, on SELinux separated systems, to allow systemd to run properly in the container.
+Newer versions of Podman (2.0 or greater) support running init based containers
+with a different SELinux labels, which allow the container process access to the
+cgroup file system. This feature requires container-selinux-2.132 or newer
+versions.
+
+Prior to Podman 2.0, the SELinux boolean `container_manage_cgroup` allows
+container processes to write to the cgroup file system. Turn on this boolean,
+on SELinux separated systems, to allow systemd to run properly in the container.
+Only do this on systems running older versions of Podman.
`setsebool -P container_manage_cgroup true`
@@ -240,7 +247,7 @@ cannot find newuidmap: exec: "newuidmap": executable file not found in $PATH
#### Solution
-Install a version of shadow-utils that includes these executables. Note RHEL7 and Centos 7 will not have support for this until RHEL7.7 is released.
+Install a version of shadow-utils that includes these executables. Note RHEL 7 and CentOS 7 will not have support for this until RHEL7.7 is released.
### 11) rootless setup user: invalid argument
@@ -424,9 +431,10 @@ Choose one of the following:
* Install the fuse-overlayfs package for your Linux Distribution.
* Add `mount_program = "/usr/bin/fuse-overlayfs"` under `[storage.options]` in your `~/.config/containers/storage.conf` file.
-### 17) rhel7-init based images don't work with cgroups v2
+### 17) RHEL 7 and CentOS 7 based `init` images don't work with cgroup v2
-The systemd version shipped in rhel7-init doesn't have support for cgroups v2. You'll need at least systemd 230.
+The systemd version shipped in RHEL 7 and CentOS 7 doesn't have support for cgroup v2. Support for cgroup V2 requires version 230 of systemd or newer, which
+was never shipped or supported on RHEL 7 or CentOS 7.
#### Symptom
```console
@@ -440,7 +448,15 @@ Error: non zero exit code: 1: OCI runtime error
#### Solution
You'll need to either:
-* configure the host to use cgroups v1
+* configure the host to use cgroup v1
+
+```
+On Fedora you can do:
+# dnf install -y grubby
+# grubby --update-kernel=ALL --args=”systemd.unified_cgroup_hierarchy=0"
+# reboot
+```
+
* update the image to use an updated version of systemd.
### 18) rootless containers exit once the user session exits
@@ -483,7 +499,7 @@ Unable to pull images
```console
$ podman unshare cat /proc/self/uid_map
- 0 1000 1
+ 0 1000 1
```
#### Solution
@@ -496,8 +512,8 @@ Original command now returns
```
$ podman unshare cat /proc/self/uid_map
- 0 1000 1
- 1 100000 65536
+ 0 1000 1
+ 1 100000 65536
```
Reference [subuid](http://man7.org/linux/man-pages/man5/subuid.5.html) and [subgid](http://man7.org/linux/man-pages/man5/subgid.5.html) man pages for more detail.
diff --git a/vendor/github.com/Microsoft/go-winio/vhd/vhd.go b/vendor/github.com/Microsoft/go-winio/vhd/vhd.go
new file mode 100644
index 000000000..229ac2556
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/vhd/vhd.go
@@ -0,0 +1,151 @@
+// +build windows
+
+package vhd
+
+import "syscall"
+
+//go:generate go run mksyscall_windows.go -output zvhd.go vhd.go
+
+//sys createVirtualDisk(virtualStorageType *virtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, flags uint32, providerSpecificFlags uint32, parameters *createVirtualDiskParameters, o *syscall.Overlapped, handle *syscall.Handle) (err error) [failretval != 0] = VirtDisk.CreateVirtualDisk
+//sys openVirtualDisk(virtualStorageType *virtualStorageType, path string, virtualDiskAccessMask uint32, flags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (err error) [failretval != 0] = VirtDisk.OpenVirtualDisk
+//sys detachVirtualDisk(handle syscall.Handle, flags uint32, providerSpecificFlags uint32) (err error) [failretval != 0] = VirtDisk.DetachVirtualDisk
+
+type virtualStorageType struct {
+ DeviceID uint32
+ VendorID [16]byte
+}
+
+type (
+ createVirtualDiskFlag uint32
+ VirtualDiskAccessMask uint32
+ VirtualDiskFlag uint32
+)
+
+const (
+ // Flags for creating a VHD (not exported)
+ createVirtualDiskFlagNone createVirtualDiskFlag = 0
+ createVirtualDiskFlagFullPhysicalAllocation createVirtualDiskFlag = 1
+ createVirtualDiskFlagPreventWritesToSourceDisk createVirtualDiskFlag = 2
+ createVirtualDiskFlagDoNotCopyMetadataFromParent createVirtualDiskFlag = 4
+
+ // Access Mask for opening a VHD
+ VirtualDiskAccessNone VirtualDiskAccessMask = 0
+ VirtualDiskAccessAttachRO VirtualDiskAccessMask = 65536
+ VirtualDiskAccessAttachRW VirtualDiskAccessMask = 131072
+ VirtualDiskAccessDetach VirtualDiskAccessMask = 262144
+ VirtualDiskAccessGetInfo VirtualDiskAccessMask = 524288
+ VirtualDiskAccessCreate VirtualDiskAccessMask = 1048576
+ VirtualDiskAccessMetaOps VirtualDiskAccessMask = 2097152
+ VirtualDiskAccessRead VirtualDiskAccessMask = 851968
+ VirtualDiskAccessAll VirtualDiskAccessMask = 4128768
+ VirtualDiskAccessWritable VirtualDiskAccessMask = 3276800
+
+ // Flags for opening a VHD
+ OpenVirtualDiskFlagNone VirtualDiskFlag = 0
+ OpenVirtualDiskFlagNoParents VirtualDiskFlag = 0x1
+ OpenVirtualDiskFlagBlankFile VirtualDiskFlag = 0x2
+ OpenVirtualDiskFlagBootDrive VirtualDiskFlag = 0x4
+ OpenVirtualDiskFlagCachedIO VirtualDiskFlag = 0x8
+ OpenVirtualDiskFlagCustomDiffChain VirtualDiskFlag = 0x10
+ OpenVirtualDiskFlagParentCachedIO VirtualDiskFlag = 0x20
+ OpenVirtualDiskFlagVhdSetFileOnly VirtualDiskFlag = 0x40
+ OpenVirtualDiskFlagIgnoreRelativeParentLocator VirtualDiskFlag = 0x80
+ OpenVirtualDiskFlagNoWriteHardening VirtualDiskFlag = 0x100
+)
+
+type createVersion2 struct {
+ UniqueID [16]byte // GUID
+ MaximumSize uint64
+ BlockSizeInBytes uint32
+ SectorSizeInBytes uint32
+ ParentPath *uint16 // string
+ SourcePath *uint16 // string
+ OpenFlags uint32
+ ParentVirtualStorageType virtualStorageType
+ SourceVirtualStorageType virtualStorageType
+ ResiliencyGUID [16]byte // GUID
+}
+
+type createVirtualDiskParameters struct {
+ Version uint32 // Must always be set to 2
+ Version2 createVersion2
+}
+
+type openVersion2 struct {
+ GetInfoOnly int32 // bool but 4-byte aligned
+ ReadOnly int32 // bool but 4-byte aligned
+ ResiliencyGUID [16]byte // GUID
+}
+
+type openVirtualDiskParameters struct {
+ Version uint32 // Must always be set to 2
+ Version2 openVersion2
+}
+
+// CreateVhdx will create a simple vhdx file at the given path using default values.
+func CreateVhdx(path string, maxSizeInGb, blockSizeInMb uint32) error {
+ var (
+ defaultType virtualStorageType
+ handle syscall.Handle
+ )
+
+ parameters := createVirtualDiskParameters{
+ Version: 2,
+ Version2: createVersion2{
+ MaximumSize: uint64(maxSizeInGb) * 1024 * 1024 * 1024,
+ BlockSizeInBytes: blockSizeInMb * 1024 * 1024,
+ },
+ }
+
+ if err := createVirtualDisk(
+ &defaultType,
+ path,
+ uint32(VirtualDiskAccessNone),
+ nil,
+ uint32(createVirtualDiskFlagNone),
+ 0,
+ &parameters,
+ nil,
+ &handle); err != nil {
+ return err
+ }
+
+ if err := syscall.CloseHandle(handle); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// DetachVhd detaches a mounted container layer vhd found at `path`.
+func DetachVhd(path string) error {
+ handle, err := OpenVirtualDisk(
+ path,
+ VirtualDiskAccessNone,
+ OpenVirtualDiskFlagCachedIO|OpenVirtualDiskFlagIgnoreRelativeParentLocator)
+
+ if err != nil {
+ return err
+ }
+ defer syscall.CloseHandle(handle)
+ return detachVirtualDisk(handle, 0, 0)
+}
+
+// OpenVirtualDisk obtains a handle to a VHD opened with supplied access mask and flags.
+func OpenVirtualDisk(path string, accessMask VirtualDiskAccessMask, flag VirtualDiskFlag) (syscall.Handle, error) {
+ var (
+ defaultType virtualStorageType
+ handle syscall.Handle
+ )
+ parameters := openVirtualDiskParameters{Version: 2}
+ if err := openVirtualDisk(
+ &defaultType,
+ path,
+ uint32(accessMask),
+ uint32(flag),
+ &parameters,
+ &handle); err != nil {
+ return 0, err
+ }
+ return handle, nil
+}
diff --git a/vendor/github.com/Microsoft/go-winio/vhd/zvhd.go b/vendor/github.com/Microsoft/go-winio/vhd/zvhd.go
new file mode 100644
index 000000000..00599ea49
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/vhd/zvhd.go
@@ -0,0 +1,99 @@
+// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT
+
+package vhd
+
+import (
+ "syscall"
+ "unsafe"
+
+ "golang.org/x/sys/windows"
+)
+
+var _ unsafe.Pointer
+
+// Do the interface allocations only once for common
+// Errno values.
+const (
+ errnoERROR_IO_PENDING = 997
+)
+
+var (
+ errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
+)
+
+// errnoErr returns common boxed Errno values, to prevent
+// allocations at runtime.
+func errnoErr(e syscall.Errno) error {
+ switch e {
+ case 0:
+ return nil
+ case errnoERROR_IO_PENDING:
+ return errERROR_IO_PENDING
+ }
+ // TODO: add more here, after collecting data on the common
+ // error values see on Windows. (perhaps when running
+ // all.bat?)
+ return e
+}
+
+var (
+ modVirtDisk = windows.NewLazySystemDLL("VirtDisk.dll")
+
+ procCreateVirtualDisk = modVirtDisk.NewProc("CreateVirtualDisk")
+ procOpenVirtualDisk = modVirtDisk.NewProc("OpenVirtualDisk")
+ procDetachVirtualDisk = modVirtDisk.NewProc("DetachVirtualDisk")
+)
+
+func createVirtualDisk(virtualStorageType *virtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, flags uint32, providerSpecificFlags uint32, parameters *createVirtualDiskParameters, o *syscall.Overlapped, handle *syscall.Handle) (err error) {
+ var _p0 *uint16
+ _p0, err = syscall.UTF16PtrFromString(path)
+ if err != nil {
+ return
+ }
+ return _createVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, securityDescriptor, flags, providerSpecificFlags, parameters, o, handle)
+}
+
+func _createVirtualDisk(virtualStorageType *virtualStorageType, path *uint16, virtualDiskAccessMask uint32, securityDescriptor *uintptr, flags uint32, providerSpecificFlags uint32, parameters *createVirtualDiskParameters, o *syscall.Overlapped, handle *syscall.Handle) (err error) {
+ r1, _, e1 := syscall.Syscall9(procCreateVirtualDisk.Addr(), 9, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(flags), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(handle)))
+ if r1 != 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func openVirtualDisk(virtualStorageType *virtualStorageType, path string, virtualDiskAccessMask uint32, flags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (err error) {
+ var _p0 *uint16
+ _p0, err = syscall.UTF16PtrFromString(path)
+ if err != nil {
+ return
+ }
+ return _openVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, flags, parameters, handle)
+}
+
+func _openVirtualDisk(virtualStorageType *virtualStorageType, path *uint16, virtualDiskAccessMask uint32, flags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (err error) {
+ r1, _, e1 := syscall.Syscall6(procOpenVirtualDisk.Addr(), 6, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(flags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle)))
+ if r1 != 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func detachVirtualDisk(handle syscall.Handle, flags uint32, providerSpecificFlags uint32) (err error) {
+ r1, _, e1 := syscall.Syscall(procDetachVirtualDisk.Addr(), 3, uintptr(handle), uintptr(flags), uintptr(providerSpecificFlags))
+ if r1 != 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/CODEOWNERS b/vendor/github.com/Microsoft/hcsshim/CODEOWNERS
new file mode 100644
index 000000000..1a59c8021
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/CODEOWNERS
@@ -0,0 +1,3 @@
+* @microsoft/containerplat
+
+/hcn/* @nagiesek \ No newline at end of file
diff --git a/vendor/github.com/Microsoft/hcsshim/README.md b/vendor/github.com/Microsoft/hcsshim/README.md
index 15b39181a..d504f1889 100644
--- a/vendor/github.com/Microsoft/hcsshim/README.md
+++ b/vendor/github.com/Microsoft/hcsshim/README.md
@@ -2,7 +2,7 @@
[![Build status](https://ci.appveyor.com/api/projects/status/nbcw28mnkqml0loa/branch/master?svg=true)](https://ci.appveyor.com/project/WindowsVirtualization/hcsshim/branch/master)
-This package contains the Golang interface for using the Windows [Host Compute Service](https://blogs.technet.microsoft.com/virtualization/2017/01/27/introducing-the-host-compute-service-hcs/) (HCS) to launch and manage [Windows Containers](https://docs.microsoft.com/en-us/virtualization/windowscontainers/about/). It also contains other helpers and functions for managing Windows Containers such as the Golang interface for the Host Network Service (HNS).
+This package contains the Golang interface for using the Windows [Host Compute Service](https://techcommunity.microsoft.com/t5/containers/introducing-the-host-compute-service-hcs/ba-p/382332) (HCS) to launch and manage [Windows Containers](https://docs.microsoft.com/en-us/virtualization/windowscontainers/about/). It also contains other helpers and functions for managing Windows Containers such as the Golang interface for the Host Network Service (HNS).
It is primarily used in the [Moby Project](https://github.com/moby/moby), but it can be freely used by other projects as well.
@@ -16,6 +16,11 @@ When you submit a pull request, a CLA-bot will automatically determine whether y
a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions
provided by the bot. You will only need to do this once across all repos using our CLA.
+We also ask that contributors [sign their commits](https://git-scm.com/docs/git-commit) using `git commit -s` or `git commit --signoff` to certify they either authored the work themselves or otherwise have permission to use it in this project.
+
+
+## Code of Conduct
+
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
diff --git a/vendor/github.com/Microsoft/hcsshim/appveyor.yml b/vendor/github.com/Microsoft/hcsshim/appveyor.yml
index 661bc406f..6617fade0 100644
--- a/vendor/github.com/Microsoft/hcsshim/appveyor.yml
+++ b/vendor/github.com/Microsoft/hcsshim/appveyor.yml
@@ -6,7 +6,7 @@ clone_folder: c:\gopath\src\github.com\Microsoft\hcsshim
environment:
GOPATH: c:\gopath
- PATH: C:\mingw-w64\x86_64-7.2.0-posix-seh-rt_v5-rev1\mingw64\bin;%GOPATH%\bin;C:\gometalinter-2.0.12-windows-amd64;%PATH%
+ PATH: "%GOPATH%\\bin;C:\\gometalinter-2.0.12-windows-amd64;%PATH%"
stack: go 1.13.4
@@ -22,10 +22,12 @@ build_script:
- go build ./internal/tools/uvmboot
- go build ./internal/tools/zapdir
- go test -v ./... -tags admin
- - go test -c ./test/containerd-shim-runhcs-v1/ -tags functional
- - go test -c ./test/cri-containerd/ -tags functional
- - go test -c ./test/functional/ -tags functional
- - go test -c ./test/runhcs/ -tags functional
+ - cd test
+ - go test -v ./internal -tags admin
+ - go test -c ./containerd-shim-runhcs-v1/ -tags functional
+ - go test -c ./cri-containerd/ -tags functional
+ - go test -c ./functional/ -tags functional
+ - go test -c ./runhcs/ -tags functional
artifacts:
- path: 'containerd-shim-runhcs-v1.exe'
@@ -35,7 +37,7 @@ artifacts:
- path: 'grantvmgroupaccess.exe'
- path: 'uvmboot.exe'
- path: 'zapdir.exe'
- - path: 'containerd-shim-runhcs-v1.test.exe'
- - path: 'cri-containerd.test.exe'
- - path: 'functional.test.exe'
- - path: 'runhcs.test.exe' \ No newline at end of file
+ - path: './test/containerd-shim-runhcs-v1.test.exe'
+ - path: './test/cri-containerd.test.exe'
+ - path: './test/functional.test.exe'
+ - path: './test/runhcs.test.exe'
diff --git a/vendor/github.com/Microsoft/hcsshim/go.mod b/vendor/github.com/Microsoft/hcsshim/go.mod
index 72d253dad..5255b93f1 100644
--- a/vendor/github.com/Microsoft/hcsshim/go.mod
+++ b/vendor/github.com/Microsoft/hcsshim/go.mod
@@ -4,34 +4,32 @@ go 1.13
require (
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5
- github.com/blang/semver v3.1.0+incompatible // indirect
github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1
- github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69
+ github.com/containerd/containerd v1.3.2
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc // indirect
github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448 // indirect
github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd
- github.com/gogo/protobuf v1.2.1
- github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce // indirect
- github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874 // indirect
+ github.com/gogo/protobuf v1.3.1
+ github.com/golang/protobuf v1.3.2 // indirect
+ github.com/kr/pretty v0.1.0 // indirect
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2 // indirect
github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f // indirect
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700
- github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39
github.com/pkg/errors v0.8.1
- github.com/prometheus/procfs v0.0.5 // indirect
- github.com/sirupsen/logrus v1.4.1
- github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8 // indirect
+ github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7 // indirect
+ github.com/sirupsen/logrus v1.4.2
+ github.com/stretchr/testify v1.4.0 // indirect
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5
- github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect
- github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
- github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f // indirect
go.opencensus.io v0.22.0
- golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6
+ golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 // indirect
+ golang.org/x/sync v0.0.0-20190423024810-112230192c58
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3
- google.golang.org/grpc v1.20.1
+ google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873 // indirect
+ google.golang.org/grpc v1.23.1
+ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
+ gopkg.in/yaml.v2 v2.2.8 // indirect
gotest.tools v2.2.0+incompatible // indirect
- k8s.io/kubernetes v1.13.0
)
diff --git a/vendor/github.com/Microsoft/hcsshim/go.sum b/vendor/github.com/Microsoft/hcsshim/go.sum
index 578b78e81..8ab4318ed 100644
--- a/vendor/github.com/Microsoft/hcsshim/go.sum
+++ b/vendor/github.com/Microsoft/hcsshim/go.sum
@@ -1,16 +1,15 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5 h1:ygIc8M6trr62pF5DucadTWGdEB4mEyvzi0e2nbcmcyA=
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
-github.com/blang/semver v3.1.0+incompatible h1:7hqmJYuaEK3qwVjWubYiht3j93YI0WQBuysxHIfUriU=
-github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f h1:tSNMc+rJDfmYntojat8lljbt1mgKNpTxUZJsSzJ9Y1s=
github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1 h1:uict5mhHFTzKLUCufdSLym7z/J0CbBJT59lYbP9wtbg=
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
-github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69 h1:rG1clvJbgsUcmb50J82YUJhUMopWNtZvyMZjb+4fqGw=
-github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.3.2 h1:ForxmXkA6tPIvffbrDAcPUIB32QgXkt2XFj+F0UxetA=
+github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc h1:TP+534wVlf61smEIq1nwLLAjQVEK2EADoW3CX9AuT+8=
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448 h1:PUD50EuOMkXVcpBIA/R95d56duJR9VxhwncsFbNnxW4=
@@ -23,6 +22,7 @@ github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd h1:JNn81o/xG+8N
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
@@ -31,6 +31,8 @@ github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e h1:BWhy2j3IXJhjCbC68Fp
github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
+github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
+github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
@@ -38,47 +40,47 @@ github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce h1:prjrVgOk2Yg6w+PflHoszQNLTUh4kaByUcEWM/9uin4=
-github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
-github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874 h1:cAv7ZbSmyb1wjn6T4TIiyFCkpcfgpbcNNC3bM2srLaI=
-github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
+github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2 h1:QhPf3A2AZW3tTGvHPg0TA+CR3oHbVLlXUhlghqISp1I=
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f h1:a969LJ4IQFwRHYqonHtUDMSh9i54WcKggeEkQ3fZMl4=
github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700 h1:eNUVfm/RFLIi1G7flU5/ZRTHvd4kcVuzfRnL6OFlzCI=
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39 h1:H7DMc6FAjgwZZi8BRqjrAAHWoqEr5e5L6pS4V0ezet4=
-github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/prometheus/procfs v0.0.5 h1:3+auTFlqw+ZaQYJARz6ArODtkaIwtvBTx3N2NehQlL8=
-github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
+github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7 h1:hhvfGDVThBnd4kYisSFmYuHYeUhglxcwag7FhVPH9zM=
+github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/sirupsen/logrus v1.4.1 h1:GL2rEmy6nsikmW0r8opw9JIRScdMF5hA8cOYLH7In1k=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
+github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
+github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8 h1:zLV6q4e8Jv9EHjNg/iHfzwDkCve6Ua5jCygptrtXHvI=
-github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
+github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5 h1:MCfT24H3f//U5+UCrZp1/riVO3B50BovxtDiNn0XKkk=
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
-github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c=
-github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
-github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
-github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
-github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f h1:mvXjJIHRZyhNuGassLTcXTwjiWq7NmjdavZsUnmFybQ=
-github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
@@ -93,15 +95,19 @@ golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJV
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09 h1:KaQtG+aDELoNmXYas3TVkGNYRuq8JQ1aa7LJt8EXVyo=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 h1:rjwSpXsdiK0dV8/Naq3kAw9ymfAeJIyd0upUIElB+lI=
+golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3 h1:7TYNF4UdlohbFwpNH04CoPMp1cHUZgO1Ebq5r2hIjfo=
@@ -112,20 +118,32 @@ golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb h1:i1Ppqkc3WQXikh8bXiwHqAN5Rv3/qDCcRk0/Otx73BY=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873 h1:nfPFGzJkUDX6uBmpN/pSw7MbOAWegH5QDQuoXFHedLg=
+google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
+google.golang.org/grpc v1.23.1 h1:q4XQuHFC6I28BKZpo6IYyb3mNO+l7lSOxRuYTCiDfXk=
+google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-k8s.io/kubernetes v1.13.0 h1:qTfB+u5M92k2fCCCVP2iuhgwwSOv1EkAkvQY1tQODD8=
-k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
+honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
diff --git a/vendor/github.com/Microsoft/hcsshim/hnspolicy.go b/vendor/github.com/Microsoft/hcsshim/hnspolicy.go
index a3e03ff8f..00ab26364 100644
--- a/vendor/github.com/Microsoft/hcsshim/hnspolicy.go
+++ b/vendor/github.com/Microsoft/hcsshim/hnspolicy.go
@@ -21,8 +21,11 @@ const (
OutboundNat = hns.OutboundNat
ExternalLoadBalancer = hns.ExternalLoadBalancer
Route = hns.Route
+ Proxy = hns.Proxy
)
+type ProxyPolicy = hns.ProxyPolicy
+
type NatPolicy = hns.NatPolicy
type QosPolicy = hns.QosPolicy
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/cgo.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/cgo.go
deleted file mode 100644
index 3669c34aa..000000000
--- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/cgo.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package hcs
-
-import "C"
-
-// This import is needed to make the library compile as CGO because HCSSHIM
-// only works with CGO due to callbacks from HCS comming back from a C thread
-// which is not supported without CGO. See https://github.com/golang/go/issues/10973
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/syscall.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/syscall.go
new file mode 100644
index 000000000..ded2175c5
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/syscall.go
@@ -0,0 +1,5 @@
+package hcs
+
+//go:generate go run ../../mksyscall_windows.go -output zsyscall_windows.go syscall.go
+
+//sys hcsFormatWritableLayerVhd(handle uintptr) (hr error) = computestorage.HcsFormatWritableLayerVhd
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go
index 6300a7974..67a5f7176 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go
@@ -4,12 +4,9 @@ import (
"context"
"encoding/json"
"errors"
- "os"
- "strconv"
"strings"
"sync"
"syscall"
- "time"
"github.com/Microsoft/hcsshim/internal/cow"
"github.com/Microsoft/hcsshim/internal/log"
@@ -21,27 +18,6 @@ import (
"go.opencensus.io/trace"
)
-// currentContainerStarts is used to limit the number of concurrent container
-// starts.
-var currentContainerStarts containerStarts
-
-type containerStarts struct {
- maxParallel int
- inProgress int
- sync.Mutex
-}
-
-func init() {
- mpsS := os.Getenv("HCSSHIM_MAX_PARALLEL_START")
- if len(mpsS) > 0 {
- mpsI, err := strconv.Atoi(mpsS)
- if err != nil || mpsI < 0 {
- return
- }
- currentContainerStarts.maxParallel = mpsI
- }
-}
-
type System struct {
handleLock sync.RWMutex
handle vmcompute.HcsSystem
@@ -215,32 +191,6 @@ func (computeSystem *System) Start(ctx context.Context) (err error) {
return makeSystemError(computeSystem, operation, "", ErrAlreadyClosed, nil)
}
- // This is a very simple backoff-retry loop to limit the number
- // of parallel container starts if environment variable
- // HCSSHIM_MAX_PARALLEL_START is set to a positive integer.
- // It should generally only be used as a workaround to various
- // platform issues that exist between RS1 and RS4 as of Aug 2018
- if currentContainerStarts.maxParallel > 0 {
- for {
- currentContainerStarts.Lock()
- if currentContainerStarts.inProgress < currentContainerStarts.maxParallel {
- currentContainerStarts.inProgress++
- currentContainerStarts.Unlock()
- break
- }
- if currentContainerStarts.inProgress == currentContainerStarts.maxParallel {
- currentContainerStarts.Unlock()
- time.Sleep(100 * time.Millisecond)
- }
- }
- // Make sure we decrement the count when we are done.
- defer func() {
- currentContainerStarts.Lock()
- currentContainerStarts.inProgress--
- currentContainerStarts.Unlock()
- }()
- }
-
resultJSON, err := vmcompute.HcsStartComputeSystem(ctx, computeSystem.handle, "")
events, err := processAsyncHcsResult(ctx, err, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemStartCompleted, &timeout.SystemStart)
if err != nil {
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go
index a638677ed..b474604bd 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go
@@ -1,10 +1,14 @@
package hcs
import (
+ "context"
"io"
"syscall"
"github.com/Microsoft/go-winio"
+ diskutil "github.com/Microsoft/go-winio/vhd"
+ "github.com/pkg/errors"
+ "golang.org/x/sys/windows"
)
// makeOpenFiles calls winio.MakeOpenFile for each handle in a slice but closes all the handles
@@ -31,3 +35,27 @@ func makeOpenFiles(hs []syscall.Handle) (_ []io.ReadWriteCloser, err error) {
}
return fs, nil
}
+
+// creates a VHD formatted with NTFS of size `sizeGB` at the given `vhdPath`.
+func CreateNTFSVHD(ctx context.Context, vhdPath string, sizeGB uint32) (err error) {
+ if err := diskutil.CreateVhdx(vhdPath, sizeGB, 1); err != nil {
+ return errors.Wrap(err, "failed to create VHD")
+ }
+
+ vhd, err := diskutil.OpenVirtualDisk(vhdPath, diskutil.VirtualDiskAccessNone, diskutil.OpenVirtualDiskFlagNone)
+ if err != nil {
+ return errors.Wrap(err, "failed to open VHD")
+ }
+ defer func() {
+ err2 := windows.CloseHandle(windows.Handle(vhd))
+ if err == nil {
+ err = errors.Wrap(err2, "failed to close VHD")
+ }
+ }()
+
+ if err := hcsFormatWritableLayerVhd(uintptr(vhd)); err != nil {
+ return errors.Wrap(err, "failed to format VHD")
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/zsyscall_windows.go
new file mode 100644
index 000000000..39396d272
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/zsyscall_windows.go
@@ -0,0 +1,54 @@
+// Code generated mksyscall_windows.exe DO NOT EDIT
+
+package hcs
+
+import (
+ "syscall"
+ "unsafe"
+
+ "golang.org/x/sys/windows"
+)
+
+var _ unsafe.Pointer
+
+// Do the interface allocations only once for common
+// Errno values.
+const (
+ errnoERROR_IO_PENDING = 997
+)
+
+var (
+ errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
+)
+
+// errnoErr returns common boxed Errno values, to prevent
+// allocations at runtime.
+func errnoErr(e syscall.Errno) error {
+ switch e {
+ case 0:
+ return nil
+ case errnoERROR_IO_PENDING:
+ return errERROR_IO_PENDING
+ }
+ // TODO: add more here, after collecting data on the common
+ // error values see on Windows. (perhaps when running
+ // all.bat?)
+ return e
+}
+
+var (
+ modcomputestorage = windows.NewLazySystemDLL("computestorage.dll")
+
+ procHcsFormatWritableLayerVhd = modcomputestorage.NewProc("HcsFormatWritableLayerVhd")
+)
+
+func hcsFormatWritableLayerVhd(handle uintptr) (hr error) {
+ r0, _, _ := syscall.Syscall(procHcsFormatWritableLayerVhd.Addr(), 1, uintptr(handle), 0, 0)
+ if int32(r0) < 0 {
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
+ }
+ return
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go
index 6a1c41e15..e0e1a4710 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go
@@ -173,6 +173,27 @@ func (endpoint *HNSEndpoint) ApplyACLPolicy(policies ...*ACLPolicy) error {
return err
}
+// ApplyProxyPolicy applies a set of Proxy Policies on the Endpoint
+func (endpoint *HNSEndpoint) ApplyProxyPolicy(policies ...*ProxyPolicy) error {
+ operation := "ApplyProxyPolicy"
+ title := "hcsshim::HNSEndpoint::" + operation
+ logrus.Debugf(title+" id=%s", endpoint.Id)
+
+ for _, policy := range policies {
+ if policy == nil {
+ continue
+ }
+ jsonString, err := json.Marshal(policy)
+ if err != nil {
+ return err
+ }
+ endpoint.Policies = append(endpoint.Policies, jsonString)
+ }
+
+ _, err := endpoint.Update()
+ return err
+}
+
// ContainerAttach attaches an endpoint to container
func (endpoint *HNSEndpoint) ContainerAttach(containerID string, compartmentID uint16) error {
operation := "ContainerAttach"
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go
index 61da242ee..6765aaead 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go
@@ -17,6 +17,7 @@ const (
OutboundNat PolicyType = "OutBoundNAT"
ExternalLoadBalancer PolicyType = "ELB"
Route PolicyType = "ROUTE"
+ Proxy PolicyType = "PROXY"
)
type NatPolicy struct {
@@ -60,6 +61,15 @@ type OutboundNatPolicy struct {
Destinations []string `json:",omitempty"`
}
+type ProxyPolicy struct {
+ Type PolicyType `json:"Type"`
+ IP string `json:",omitempty"`
+ Port string `json:",omitempty"`
+ ExceptionList []string `json:",omitempty"`
+ Destination string `json:",omitempty"`
+ OutboundNat bool `json:",omitempty"`
+}
+
type ActionType string
type DirectionType string
type RuleType string
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema1/schema1.go b/vendor/github.com/Microsoft/hcsshim/internal/schema1/schema1.go
index fb23617f5..24bb3b46b 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/schema1/schema1.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/schema1/schema1.go
@@ -214,9 +214,10 @@ type MappedVirtualDiskController struct {
// GuestDefinedCapabilities is part of the GuestConnectionInfo returned by a GuestConnection call on a utility VM
type GuestDefinedCapabilities struct {
- NamespaceAddRequestSupported bool `json:",omitempty"`
- SignalProcessSupported bool `json:",omitempty"`
- DumpStacksSupported bool `json:",omitempty"`
+ NamespaceAddRequestSupported bool `json:",omitempty"`
+ SignalProcessSupported bool `json:",omitempty"`
+ DumpStacksSupported bool `json:",omitempty"`
+ DeleteContainerStateSupported bool `json:",omitempty"`
}
// GuestConnectionInfo is the structure of an iterm return by a GuestConnection call on a utility VM
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/devices.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/devices.go
index 781a88401..e985d96d2 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/devices.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/devices.go
@@ -39,4 +39,8 @@ type Devices struct {
FlexibleIov map[string]FlexibleIoDevice `json:"FlexibleIov,omitempty"`
SharedMemory *SharedMemoryConfiguration `json:"SharedMemory,omitempty"`
+
+ // TODO: This is pre-release support in schema 2.3. Need to add build number
+ // docs when a public build with this is out.
+ VirtualPci map[string]VirtualPciDevice `json:",omitempty"`
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/memory_2.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/memory_2.go
index b4a36954d..95328ec30 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/memory_2.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/memory_2.go
@@ -27,4 +27,23 @@ type Memory2 struct {
// to the VM, allowing it to trim non-zeroed pages from the working set (if supported by
// the guest operating system).
EnableColdDiscardHint bool `json:"EnableColdDiscardHint,omitempty"`
+
+ // LowMmioGapInMB is the low MMIO region allocated below 4GB.
+ //
+ // TODO: This is pre-release support in schema 2.3. Need to add build number
+ // docs when a public build with this is out.
+ LowMMIOGapInMB uint64 `json:"LowMmioGapInMB,omitempty"`
+
+ // HighMmioBaseInMB is the high MMIO region allocated above 4GB (base and
+ // size).
+ //
+ // TODO: This is pre-release support in schema 2.3. Need to add build number
+ // docs when a public build with this is out.
+ HighMMIOBaseInMB uint64 `json:"HighMmioBaseInMB,omitempty"`
+
+ // HighMmioGapInMB is the high MMIO region.
+ //
+ // TODO: This is pre-release support in schema 2.3. Need to add build number
+ // docs when a public build with this is out.
+ HighMMIOGapInMB uint64 `json:"HighMmioGapInMB,omitempty"`
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_pci_device.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_pci_device.go
new file mode 100644
index 000000000..f5e05903c
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_pci_device.go
@@ -0,0 +1,16 @@
+/*
+ * HCS API
+ *
+ * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
+ *
+ * API version: 2.3
+ * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
+ */
+
+package hcsschema
+
+// TODO: This is pre-release support in schema 2.3. Need to add build number
+// docs when a public build with this is out.
+type VirtualPciDevice struct {
+ Functions []VirtualPciFunction `json:",omitempty"`
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_pci_function.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_pci_function.go
new file mode 100644
index 000000000..cedb7d18b
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_pci_function.go
@@ -0,0 +1,18 @@
+/*
+ * HCS API
+ *
+ * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
+ *
+ * API version: 2.3
+ * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
+ */
+
+package hcsschema
+
+// TODO: This is pre-release support in schema 2.3. Need to add build number
+// docs when a public build with this is out.
+type VirtualPciFunction struct {
+ DeviceInstancePath string `json:",omitempty"`
+
+ VirtualFunction uint16 `json:",omitempty"`
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go
index dcb919268..81e454956 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go
@@ -1,28 +1,23 @@
package wclayer
import (
+ "context"
+
"github.com/Microsoft/hcsshim/internal/hcserror"
- "github.com/sirupsen/logrus"
+ "github.com/Microsoft/hcsshim/internal/oc"
+ "go.opencensus.io/trace"
)
// ActivateLayer will find the layer with the given id and mount it's filesystem.
// For a read/write layer, the mounted filesystem will appear as a volume on the
// host, while a read-only layer is generally expected to be a no-op.
// An activated layer must later be deactivated via DeactivateLayer.
-func ActivateLayer(path string) (err error) {
+func ActivateLayer(ctx context.Context, path string) (err error) {
title := "hcsshim::ActivateLayer"
- fields := logrus.Fields{
- "path": path,
- }
- logrus.WithFields(fields).Debug(title)
- defer func() {
- if err != nil {
- fields[logrus.ErrorKey] = err
- logrus.WithFields(fields).Error(err)
- } else {
- logrus.WithFields(fields).Debug(title + " - succeeded")
- }
- }()
+ ctx, span := trace.StartSpan(ctx, title)
+ defer span.End()
+ defer func() { oc.SetSpanStatus(span, err) }()
+ span.AddAttributes(trace.StringAttribute("path", path))
err = activateLayer(&stdDriverInfo, path)
if err != nil {
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayer.go
index 5784241df..f907a7044 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayer.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayer.go
@@ -1,6 +1,7 @@
package wclayer
import (
+ "context"
"errors"
"os"
"path/filepath"
@@ -8,10 +9,15 @@ import (
"github.com/Microsoft/go-winio"
"github.com/Microsoft/hcsshim/internal/hcserror"
+ "github.com/Microsoft/hcsshim/internal/oc"
"github.com/Microsoft/hcsshim/internal/safefile"
+ "go.opencensus.io/trace"
)
type baseLayerWriter struct {
+ ctx context.Context
+ s *trace.Span
+
root *os.File
f *os.File
bw *winio.BackupFileWriter
@@ -136,12 +142,15 @@ func (w *baseLayerWriter) Write(b []byte) (int, error) {
return n, err
}
-func (w *baseLayerWriter) Close() error {
+func (w *baseLayerWriter) Close() (err error) {
+ defer w.s.End()
+ defer func() { oc.SetSpanStatus(w.s, err) }()
defer func() {
w.root.Close()
w.root = nil
}()
- err := w.closeCurrentFile()
+
+ err = w.closeCurrentFile()
if err != nil {
return err
}
@@ -153,7 +162,7 @@ func (w *baseLayerWriter) Close() error {
return err
}
- err = ProcessBaseLayer(w.root.Name())
+ err = ProcessBaseLayer(w.ctx, w.root.Name())
if err != nil {
return err
}
@@ -163,7 +172,7 @@ func (w *baseLayerWriter) Close() error {
if err != nil {
return err
}
- err = ProcessUtilityVMImage(filepath.Join(w.root.Name(), "UtilityVM"))
+ err = ProcessUtilityVMImage(w.ctx, filepath.Join(w.root.Name(), "UtilityVM"))
if err != nil {
return err
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go
index be2bc3fd6..41e5e6731 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go
@@ -1,27 +1,23 @@
package wclayer
import (
+ "context"
+
"github.com/Microsoft/hcsshim/internal/hcserror"
- "github.com/sirupsen/logrus"
+ "github.com/Microsoft/hcsshim/internal/oc"
+ "go.opencensus.io/trace"
)
// CreateLayer creates a new, empty, read-only layer on the filesystem based on
// the parent layer provided.
-func CreateLayer(path, parent string) (err error) {
+func CreateLayer(ctx context.Context, path, parent string) (err error) {
title := "hcsshim::CreateLayer"
- fields := logrus.Fields{
- "parent": parent,
- "path": path,
- }
- logrus.WithFields(fields).Debug(title)
- defer func() {
- if err != nil {
- fields[logrus.ErrorKey] = err
- logrus.WithFields(fields).Error(err)
- } else {
- logrus.WithFields(fields).Debug(title + " - succeeded")
- }
- }()
+ ctx, span := trace.StartSpan(ctx, title)
+ defer span.End()
+ defer func() { oc.SetSpanStatus(span, err) }()
+ span.AddAttributes(
+ trace.StringAttribute("path", path),
+ trace.StringAttribute("parent", parent))
err = createLayer(&stdDriverInfo, path, parent)
if err != nil {
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go
index 7e3351289..e3ff952a7 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go
@@ -1,31 +1,29 @@
package wclayer
import (
+ "context"
+ "strings"
+
"github.com/Microsoft/hcsshim/internal/hcserror"
- "github.com/sirupsen/logrus"
+ "github.com/Microsoft/hcsshim/internal/oc"
+ "go.opencensus.io/trace"
)
// CreateScratchLayer creates and populates new read-write layer for use by a container.
// This requires both the id of the direct parent layer, as well as the full list
// of paths to all parent layers up to the base (and including the direct parent
// whose id was provided).
-func CreateScratchLayer(path string, parentLayerPaths []string) (err error) {
+func CreateScratchLayer(ctx context.Context, path string, parentLayerPaths []string) (err error) {
title := "hcsshim::CreateScratchLayer"
- fields := logrus.Fields{
- "path": path,
- }
- logrus.WithFields(fields).Debug(title)
- defer func() {
- if err != nil {
- fields[logrus.ErrorKey] = err
- logrus.WithFields(fields).Error(err)
- } else {
- logrus.WithFields(fields).Debug(title + " - succeeded")
- }
- }()
+ ctx, span := trace.StartSpan(ctx, title)
+ defer span.End()
+ defer func() { oc.SetSpanStatus(span, err) }()
+ span.AddAttributes(
+ trace.StringAttribute("path", path),
+ trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", ")))
// Generate layer descriptors
- layers, err := layerPathsToDescriptors(parentLayerPaths)
+ layers, err := layerPathsToDescriptors(ctx, parentLayerPaths)
if err != nil {
return err
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go
index 2dd5d5715..70a711cf5 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go
@@ -1,25 +1,20 @@
package wclayer
import (
+ "context"
+
"github.com/Microsoft/hcsshim/internal/hcserror"
- "github.com/sirupsen/logrus"
+ "github.com/Microsoft/hcsshim/internal/oc"
+ "go.opencensus.io/trace"
)
// DeactivateLayer will dismount a layer that was mounted via ActivateLayer.
-func DeactivateLayer(path string) (err error) {
+func DeactivateLayer(ctx context.Context, path string) (err error) {
title := "hcsshim::DeactivateLayer"
- fields := logrus.Fields{
- "path": path,
- }
- logrus.WithFields(fields).Debug(title)
- defer func() {
- if err != nil {
- fields[logrus.ErrorKey] = err
- logrus.WithFields(fields).Error(err)
- } else {
- logrus.WithFields(fields).Debug(title + " - succeeded")
- }
- }()
+ ctx, span := trace.StartSpan(ctx, title)
+ defer span.End()
+ defer func() { oc.SetSpanStatus(span, err) }()
+ span.AddAttributes(trace.StringAttribute("path", path))
err = deactivateLayer(&stdDriverInfo, path)
if err != nil {
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go
index 4da690c20..bf197e3b0 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go
@@ -1,26 +1,21 @@
package wclayer
import (
+ "context"
+
"github.com/Microsoft/hcsshim/internal/hcserror"
- "github.com/sirupsen/logrus"
+ "github.com/Microsoft/hcsshim/internal/oc"
+ "go.opencensus.io/trace"
)
// DestroyLayer will remove the on-disk files representing the layer with the given
// path, including that layer's containing folder, if any.
-func DestroyLayer(path string) (err error) {
+func DestroyLayer(ctx context.Context, path string) (err error) {
title := "hcsshim::DestroyLayer"
- fields := logrus.Fields{
- "path": path,
- }
- logrus.WithFields(fields).Debug(title)
- defer func() {
- if err != nil {
- fields[logrus.ErrorKey] = err
- logrus.WithFields(fields).Error(err)
- } else {
- logrus.WithFields(fields).Debug(title + " - succeeded")
- }
- }()
+ ctx, span := trace.StartSpan(ctx, title)
+ defer span.End()
+ defer func() { oc.SetSpanStatus(span, err) }()
+ span.AddAttributes(trace.StringAttribute("path", path))
err = destroyLayer(&stdDriverInfo, path)
if err != nil {
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go
index b3b431e35..93f27da8a 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go
@@ -1,32 +1,27 @@
package wclayer
import (
+ "context"
"os"
"path/filepath"
"syscall"
"unsafe"
"github.com/Microsoft/hcsshim/internal/hcserror"
+ "github.com/Microsoft/hcsshim/internal/oc"
"github.com/Microsoft/hcsshim/osversion"
- "github.com/sirupsen/logrus"
+ "go.opencensus.io/trace"
)
// ExpandScratchSize expands the size of a layer to at least size bytes.
-func ExpandScratchSize(path string, size uint64) (err error) {
+func ExpandScratchSize(ctx context.Context, path string, size uint64) (err error) {
title := "hcsshim::ExpandScratchSize"
- fields := logrus.Fields{
- "path": path,
- "size": size,
- }
- logrus.WithFields(fields).Debug(title)
- defer func() {
- if err != nil {
- fields[logrus.ErrorKey] = err
- logrus.WithFields(fields).Error(err)
- } else {
- logrus.WithFields(fields).Debug(title + " - succeeded")
- }
- }()
+ ctx, span := trace.StartSpan(ctx, title)
+ defer span.End()
+ defer func() { oc.SetSpanStatus(span, err) }()
+ span.AddAttributes(
+ trace.StringAttribute("path", path),
+ trace.Int64Attribute("size", int64(size)))
err = expandSandboxSize(&stdDriverInfo, path, size)
if err != nil {
@@ -36,7 +31,7 @@ func ExpandScratchSize(path string, size uint64) (err error) {
// Manually expand the volume now in order to work around bugs in 19H1 and
// prerelease versions of Vb. Remove once this is fixed in Windows.
if build := osversion.Get().Build; build >= osversion.V19H1 && build < 19020 {
- err = expandSandboxVolume(path)
+ err = expandSandboxVolume(ctx, path)
if err != nil {
return err
}
@@ -84,7 +79,7 @@ func attachVhd(path string) (syscall.Handle, error) {
return handle, nil
}
-func expandSandboxVolume(path string) error {
+func expandSandboxVolume(ctx context.Context, path string) error {
// Mount the sandbox VHD temporarily.
vhdPath := filepath.Join(path, "sandbox.vhdx")
vhd, err := attachVhd(vhdPath)
@@ -94,7 +89,7 @@ func expandSandboxVolume(path string) error {
defer syscall.Close(vhd)
// Open the volume.
- volumePath, err := GetLayerMountPath(path)
+ volumePath, err := GetLayerMountPath(ctx, path)
if err != nil {
return err
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go
index 0425b3395..09f0de1a4 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go
@@ -1,12 +1,15 @@
package wclayer
import (
+ "context"
"io/ioutil"
"os"
+ "strings"
"github.com/Microsoft/go-winio"
"github.com/Microsoft/hcsshim/internal/hcserror"
- "github.com/sirupsen/logrus"
+ "github.com/Microsoft/hcsshim/internal/oc"
+ "go.opencensus.io/trace"
)
// ExportLayer will create a folder at exportFolderPath and fill that folder with
@@ -14,24 +17,18 @@ import (
// format includes any metadata required for later importing the layer (using
// ImportLayer), and requires the full list of parent layer paths in order to
// perform the export.
-func ExportLayer(path string, exportFolderPath string, parentLayerPaths []string) (err error) {
+func ExportLayer(ctx context.Context, path string, exportFolderPath string, parentLayerPaths []string) (err error) {
title := "hcsshim::ExportLayer"
- fields := logrus.Fields{
- "path": path,
- "exportFolderPath": exportFolderPath,
- }
- logrus.WithFields(fields).Debug(title)
- defer func() {
- if err != nil {
- fields[logrus.ErrorKey] = err
- logrus.WithFields(fields).Error(err)
- } else {
- logrus.WithFields(fields).Debug(title + " - succeeded")
- }
- }()
+ ctx, span := trace.StartSpan(ctx, title)
+ defer span.End()
+ defer func() { oc.SetSpanStatus(span, err) }()
+ span.AddAttributes(
+ trace.StringAttribute("path", path),
+ trace.StringAttribute("exportFolderPath", exportFolderPath),
+ trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", ")))
// Generate layer descriptors
- layers, err := layerPathsToDescriptors(parentLayerPaths)
+ layers, err := layerPathsToDescriptors(ctx, parentLayerPaths)
if err != nil {
return err
}
@@ -52,25 +49,46 @@ type LayerReader interface {
// NewLayerReader returns a new layer reader for reading the contents of an on-disk layer.
// The caller must have taken the SeBackupPrivilege privilege
// to call this and any methods on the resulting LayerReader.
-func NewLayerReader(path string, parentLayerPaths []string) (LayerReader, error) {
+func NewLayerReader(ctx context.Context, path string, parentLayerPaths []string) (_ LayerReader, err error) {
+ ctx, span := trace.StartSpan(ctx, "hcsshim::NewLayerReader")
+ defer func() {
+ if err != nil {
+ oc.SetSpanStatus(span, err)
+ span.End()
+ }
+ }()
+ span.AddAttributes(
+ trace.StringAttribute("path", path),
+ trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", ")))
+
exportPath, err := ioutil.TempDir("", "hcs")
if err != nil {
return nil, err
}
- err = ExportLayer(path, exportPath, parentLayerPaths)
+ err = ExportLayer(ctx, path, exportPath, parentLayerPaths)
if err != nil {
os.RemoveAll(exportPath)
return nil, err
}
- return &legacyLayerReaderWrapper{newLegacyLayerReader(exportPath)}, nil
+ return &legacyLayerReaderWrapper{
+ ctx: ctx,
+ s: span,
+ legacyLayerReader: newLegacyLayerReader(exportPath),
+ }, nil
}
type legacyLayerReaderWrapper struct {
+ ctx context.Context
+ s *trace.Span
+
*legacyLayerReader
}
-func (r *legacyLayerReaderWrapper) Close() error {
- err := r.legacyLayerReader.Close()
+func (r *legacyLayerReaderWrapper) Close() (err error) {
+ defer r.s.End()
+ defer func() { oc.SetSpanStatus(r.s, err) }()
+
+ err = r.legacyLayerReader.Close()
os.RemoveAll(r.root)
return err
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go
index d60b6ed53..942e3bbf9 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go
@@ -1,36 +1,31 @@
package wclayer
import (
+ "context"
"syscall"
"github.com/Microsoft/hcsshim/internal/hcserror"
- "github.com/sirupsen/logrus"
+ "github.com/Microsoft/hcsshim/internal/log"
+ "github.com/Microsoft/hcsshim/internal/oc"
+ "go.opencensus.io/trace"
)
// GetLayerMountPath will look for a mounted layer with the given path and return
// the path at which that layer can be accessed. This path may be a volume path
// if the layer is a mounted read-write layer, otherwise it is expected to be the
// folder path at which the layer is stored.
-func GetLayerMountPath(path string) (_ string, err error) {
+func GetLayerMountPath(ctx context.Context, path string) (_ string, err error) {
title := "hcsshim::GetLayerMountPath"
- fields := logrus.Fields{
- "path": path,
- }
- logrus.WithFields(fields).Debug(title)
- defer func() {
- if err != nil {
- fields[logrus.ErrorKey] = err
- logrus.WithFields(fields).Error(err)
- } else {
- logrus.WithFields(fields).Debug(title + " - succeeded")
- }
- }()
+ ctx, span := trace.StartSpan(ctx, title)
+ defer span.End()
+ defer func() { oc.SetSpanStatus(span, err) }()
+ span.AddAttributes(trace.StringAttribute("path", path))
var mountPathLength uintptr
mountPathLength = 0
// Call the procedure itself.
- logrus.WithFields(fields).Debug("Calling proc (1)")
+ log.G(ctx).Debug("Calling proc (1)")
err = getLayerMountPath(&stdDriverInfo, path, &mountPathLength, nil)
if err != nil {
return "", hcserror.New(err, title+" - failed", "(first call)")
@@ -44,13 +39,13 @@ func GetLayerMountPath(path string) (_ string, err error) {
mountPathp[0] = 0
// Call the procedure again
- logrus.WithFields(fields).Debug("Calling proc (2)")
+ log.G(ctx).Debug("Calling proc (2)")
err = getLayerMountPath(&stdDriverInfo, path, &mountPathLength, &mountPathp[0])
if err != nil {
return "", hcserror.New(err, title+" - failed", "(second call)")
}
mountPath := syscall.UTF16ToString(mountPathp[0:])
- fields["mountPath"] = mountPath
+ span.AddAttributes(trace.StringAttribute("mountPath", mountPath))
return mountPath, nil
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go
index dbd83ef2b..a50378f49 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go
@@ -1,29 +1,29 @@
package wclayer
import (
+ "context"
+
"github.com/Microsoft/hcsshim/internal/hcserror"
"github.com/Microsoft/hcsshim/internal/interop"
- "github.com/sirupsen/logrus"
+ "github.com/Microsoft/hcsshim/internal/oc"
+ "go.opencensus.io/trace"
)
// GetSharedBaseImages will enumerate the images stored in the common central
// image store and return descriptive info about those images for the purpose
// of registering them with the graphdriver, graph, and tagstore.
-func GetSharedBaseImages() (imageData string, err error) {
+func GetSharedBaseImages(ctx context.Context) (_ string, err error) {
title := "hcsshim::GetSharedBaseImages"
- logrus.Debug(title)
- defer func() {
- if err != nil {
- logrus.WithError(err).Error(err)
- } else {
- logrus.WithField("imageData", imageData).Debug(title + " - succeeded")
- }
- }()
+ ctx, span := trace.StartSpan(ctx, title)
+ defer span.End()
+ defer func() { oc.SetSpanStatus(span, err) }()
var buffer *uint16
err = getBaseImages(&buffer)
if err != nil {
return "", hcserror.New(err, title+" - failed", "")
}
- return interop.ConvertAndFreeCoTaskMemString(buffer), nil
+ imageData := interop.ConvertAndFreeCoTaskMemString(buffer)
+ span.AddAttributes(trace.StringAttribute("imageData", imageData))
+ return imageData, nil
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go
index 05735df6c..aa7c8ae1f 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go
@@ -1,26 +1,22 @@
package wclayer
import (
+ "context"
+
"github.com/Microsoft/hcsshim/internal/hcserror"
- "github.com/sirupsen/logrus"
+ "github.com/Microsoft/hcsshim/internal/oc"
+ "go.opencensus.io/trace"
)
// GrantVmAccess adds access to a file for a given VM
-func GrantVmAccess(vmid string, filepath string) (err error) {
+func GrantVmAccess(ctx context.Context, vmid string, filepath string) (err error) {
title := "hcsshim::GrantVmAccess"
- fields := logrus.Fields{
- "vm-id": vmid,
- "path": filepath,
- }
- logrus.WithFields(fields).Debug(title)
- defer func() {
- if err != nil {
- fields[logrus.ErrorKey] = err
- logrus.WithFields(fields).Error(err)
- } else {
- logrus.WithFields(fields).Debug(title + " - succeeded")
- }
- }()
+ ctx, span := trace.StartSpan(ctx, title)
+ defer span.End()
+ defer func() { oc.SetSpanStatus(span, err) }()
+ span.AddAttributes(
+ trace.StringAttribute("vm-id", vmid),
+ trace.StringAttribute("path", filepath))
err = grantVmAccess(vmid, filepath)
if err != nil {
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go
index 76a804f2a..16800b394 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go
@@ -1,38 +1,35 @@
package wclayer
import (
+ "context"
"io/ioutil"
"os"
"path/filepath"
+ "strings"
"github.com/Microsoft/go-winio"
"github.com/Microsoft/hcsshim/internal/hcserror"
+ "github.com/Microsoft/hcsshim/internal/oc"
"github.com/Microsoft/hcsshim/internal/safefile"
- "github.com/sirupsen/logrus"
+ "go.opencensus.io/trace"
)
// ImportLayer will take the contents of the folder at importFolderPath and import
// that into a layer with the id layerId. Note that in order to correctly populate
// the layer and interperet the transport format, all parent layers must already
// be present on the system at the paths provided in parentLayerPaths.
-func ImportLayer(path string, importFolderPath string, parentLayerPaths []string) (err error) {
+func ImportLayer(ctx context.Context, path string, importFolderPath string, parentLayerPaths []string) (err error) {
title := "hcsshim::ImportLayer"
- fields := logrus.Fields{
- "path": path,
- "importFolderPath": importFolderPath,
- }
- logrus.WithFields(fields).Debug(title)
- defer func() {
- if err != nil {
- fields[logrus.ErrorKey] = err
- logrus.WithFields(fields).Error(err)
- } else {
- logrus.WithFields(fields).Debug(title + " - succeeded")
- }
- }()
+ ctx, span := trace.StartSpan(ctx, title)
+ defer span.End()
+ defer func() { oc.SetSpanStatus(span, err) }()
+ span.AddAttributes(
+ trace.StringAttribute("path", path),
+ trace.StringAttribute("importFolderPath", importFolderPath),
+ trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", ")))
// Generate layer descriptors
- layers, err := layerPathsToDescriptors(parentLayerPaths)
+ layers, err := layerPathsToDescriptors(ctx, parentLayerPaths)
if err != nil {
return err
}
@@ -60,20 +57,26 @@ type LayerWriter interface {
}
type legacyLayerWriterWrapper struct {
+ ctx context.Context
+ s *trace.Span
+
*legacyLayerWriter
path string
parentLayerPaths []string
}
-func (r *legacyLayerWriterWrapper) Close() error {
+func (r *legacyLayerWriterWrapper) Close() (err error) {
+ defer r.s.End()
+ defer func() { oc.SetSpanStatus(r.s, err) }()
defer os.RemoveAll(r.root.Name())
defer r.legacyLayerWriter.CloseRoots()
- err := r.legacyLayerWriter.Close()
+
+ err = r.legacyLayerWriter.Close()
if err != nil {
return err
}
- if err = ImportLayer(r.destRoot.Name(), r.path, r.parentLayerPaths); err != nil {
+ if err = ImportLayer(r.ctx, r.destRoot.Name(), r.path, r.parentLayerPaths); err != nil {
return err
}
for _, name := range r.Tombstones {
@@ -96,7 +99,7 @@ func (r *legacyLayerWriterWrapper) Close() error {
if err != nil {
return err
}
- err = ProcessUtilityVMImage(filepath.Join(r.destRoot.Name(), "UtilityVM"))
+ err = ProcessUtilityVMImage(r.ctx, filepath.Join(r.destRoot.Name(), "UtilityVM"))
if err != nil {
return err
}
@@ -107,7 +110,18 @@ func (r *legacyLayerWriterWrapper) Close() error {
// NewLayerWriter returns a new layer writer for creating a layer on disk.
// The caller must have taken the SeBackupPrivilege and SeRestorePrivilege privileges
// to call this and any methods on the resulting LayerWriter.
-func NewLayerWriter(path string, parentLayerPaths []string) (LayerWriter, error) {
+func NewLayerWriter(ctx context.Context, path string, parentLayerPaths []string) (_ LayerWriter, err error) {
+ ctx, span := trace.StartSpan(ctx, "hcsshim::NewLayerWriter")
+ defer func() {
+ if err != nil {
+ oc.SetSpanStatus(span, err)
+ span.End()
+ }
+ }()
+ span.AddAttributes(
+ trace.StringAttribute("path", path),
+ trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", ")))
+
if len(parentLayerPaths) == 0 {
// This is a base layer. It gets imported differently.
f, err := safefile.OpenRoot(path)
@@ -115,6 +129,8 @@ func NewLayerWriter(path string, parentLayerPaths []string) (LayerWriter, error)
return nil, err
}
return &baseLayerWriter{
+ ctx: ctx,
+ s: span,
root: f,
}, nil
}
@@ -128,6 +144,8 @@ func NewLayerWriter(path string, parentLayerPaths []string) (LayerWriter, error)
return nil, err
}
return &legacyLayerWriterWrapper{
+ ctx: ctx,
+ s: span,
legacyLayerWriter: w,
path: importPath,
parentLayerPaths: parentLayerPaths,
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go
index 258167a57..6dd6f2d57 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go
@@ -1,26 +1,21 @@
package wclayer
import (
+ "context"
+
"github.com/Microsoft/hcsshim/internal/hcserror"
- "github.com/sirupsen/logrus"
+ "github.com/Microsoft/hcsshim/internal/oc"
+ "go.opencensus.io/trace"
)
// LayerExists will return true if a layer with the given id exists and is known
// to the system.
-func LayerExists(path string) (_ bool, err error) {
+func LayerExists(ctx context.Context, path string) (_ bool, err error) {
title := "hcsshim::LayerExists"
- fields := logrus.Fields{
- "path": path,
- }
- logrus.WithFields(fields).Debug(title)
- defer func() {
- if err != nil {
- fields[logrus.ErrorKey] = err
- logrus.WithFields(fields).Error(err)
- } else {
- logrus.WithFields(fields).Debug(title + " - succeeded")
- }
- }()
+ ctx, span := trace.StartSpan(ctx, title)
+ defer span.End()
+ defer func() { oc.SetSpanStatus(span, err) }()
+ span.AddAttributes(trace.StringAttribute("path", path))
// Call the procedure itself.
var exists uint32
@@ -28,6 +23,6 @@ func LayerExists(path string) (_ bool, err error) {
if err != nil {
return false, hcserror.New(err, title+" - failed", "")
}
- fields["layer-exists"] = exists != 0
+ span.AddAttributes(trace.BoolAttribute("layer-exists", exists != 0))
return exists != 0, nil
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerid.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerid.go
index 443596fba..0ce34a30f 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerid.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerid.go
@@ -1,13 +1,22 @@
package wclayer
import (
+ "context"
"path/filepath"
"github.com/Microsoft/go-winio/pkg/guid"
+ "github.com/Microsoft/hcsshim/internal/oc"
+ "go.opencensus.io/trace"
)
// LayerID returns the layer ID of a layer on disk.
-func LayerID(path string) (guid.GUID, error) {
+func LayerID(ctx context.Context, path string) (_ guid.GUID, err error) {
+ title := "hcsshim::LayerID"
+ ctx, span := trace.StartSpan(ctx, title)
+ defer span.End()
+ defer func() { oc.SetSpanStatus(span, err) }()
+ span.AddAttributes(trace.StringAttribute("path", path))
+
_, file := filepath.Split(path)
- return NameToGuid(file)
+ return NameToGuid(ctx, file)
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go
index 06671309d..1ec893c6a 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go
@@ -4,6 +4,7 @@ package wclayer
// functionality.
import (
+ "context"
"syscall"
"github.com/Microsoft/go-winio/pkg/guid"
@@ -68,12 +69,12 @@ type WC_LAYER_DESCRIPTOR struct {
Pathp *uint16
}
-func layerPathsToDescriptors(parentLayerPaths []string) ([]WC_LAYER_DESCRIPTOR, error) {
+func layerPathsToDescriptors(ctx context.Context, parentLayerPaths []string) ([]WC_LAYER_DESCRIPTOR, error) {
// Array of descriptors that gets constructed.
var layers []WC_LAYER_DESCRIPTOR
for i := 0; i < len(parentLayerPaths); i++ {
- g, err := LayerID(parentLayerPaths[i])
+ g, err := LayerID(ctx, parentLayerPaths[i])
if err != nil {
logrus.WithError(err).Debug("Failed to convert name to guid")
return nil, err
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go
index a259c1b82..b732857b3 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go
@@ -1,34 +1,29 @@
package wclayer
import (
+ "context"
+
"github.com/Microsoft/go-winio/pkg/guid"
"github.com/Microsoft/hcsshim/internal/hcserror"
- "github.com/sirupsen/logrus"
+ "github.com/Microsoft/hcsshim/internal/oc"
+ "go.opencensus.io/trace"
)
// NameToGuid converts the given string into a GUID using the algorithm in the
// Host Compute Service, ensuring GUIDs generated with the same string are common
// across all clients.
-func NameToGuid(name string) (id guid.GUID, err error) {
+func NameToGuid(ctx context.Context, name string) (_ guid.GUID, err error) {
title := "hcsshim::NameToGuid"
- fields := logrus.Fields{
- "name": name,
- }
- logrus.WithFields(fields).Debug(title)
- defer func() {
- if err != nil {
- fields[logrus.ErrorKey] = err
- logrus.WithFields(fields).Error(err)
- } else {
- logrus.WithFields(fields).Debug(title + " - succeeded")
- }
- }()
+ ctx, span := trace.StartSpan(ctx, title)
+ defer span.End()
+ defer func() { oc.SetSpanStatus(span, err) }()
+ span.AddAttributes(trace.StringAttribute("name", name))
+ var id guid.GUID
err = nameToGuid(name, &id)
if err != nil {
- err = hcserror.New(err, title+" - failed", "")
- return
+ return guid.GUID{}, hcserror.New(err, title+" - failed", "")
}
- fields["guid"] = id.String()
- return
+ span.AddAttributes(trace.StringAttribute("guid", id.String()))
+ return id, nil
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go
index 2b65b0186..55f7730d0 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go
@@ -1,10 +1,13 @@
package wclayer
import (
+ "context"
+ "strings"
"sync"
"github.com/Microsoft/hcsshim/internal/hcserror"
- "github.com/sirupsen/logrus"
+ "github.com/Microsoft/hcsshim/internal/oc"
+ "go.opencensus.io/trace"
)
var prepareLayerLock sync.Mutex
@@ -14,23 +17,17 @@ var prepareLayerLock sync.Mutex
// parent layers, and is necessary in order to view or interact with the layer
// as an actual filesystem (reading and writing files, creating directories, etc).
// Disabling the filter must be done via UnprepareLayer.
-func PrepareLayer(path string, parentLayerPaths []string) (err error) {
+func PrepareLayer(ctx context.Context, path string, parentLayerPaths []string) (err error) {
title := "hcsshim::PrepareLayer"
- fields := logrus.Fields{
- "path": path,
- }
- logrus.WithFields(fields).Debug(title)
- defer func() {
- if err != nil {
- fields[logrus.ErrorKey] = err
- logrus.WithFields(fields).Error(err)
- } else {
- logrus.WithFields(fields).Debug(title + " - succeeded")
- }
- }()
+ ctx, span := trace.StartSpan(ctx, title)
+ defer span.End()
+ defer func() { oc.SetSpanStatus(span, err) }()
+ span.AddAttributes(
+ trace.StringAttribute("path", path),
+ trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", ")))
// Generate layer descriptors
- layers, err := layerPathsToDescriptors(parentLayerPaths)
+ layers, err := layerPathsToDescriptors(ctx, parentLayerPaths)
if err != nil {
return err
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/processimage.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/processimage.go
index 884207c3e..aabb31368 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/processimage.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/processimage.go
@@ -1,23 +1,41 @@
package wclayer
-import "os"
+import (
+ "context"
+ "os"
+
+ "github.com/Microsoft/hcsshim/internal/oc"
+ "go.opencensus.io/trace"
+)
// ProcessBaseLayer post-processes a base layer that has had its files extracted.
// The files should have been extracted to <path>\Files.
-func ProcessBaseLayer(path string) error {
- err := processBaseImage(path)
+func ProcessBaseLayer(ctx context.Context, path string) (err error) {
+ title := "hcsshim::ProcessBaseLayer"
+ ctx, span := trace.StartSpan(ctx, title)
+ defer span.End()
+ defer func() { oc.SetSpanStatus(span, err) }()
+ span.AddAttributes(trace.StringAttribute("path", path))
+
+ err = processBaseImage(path)
if err != nil {
- return &os.PathError{Op: "ProcessBaseLayer", Path: path, Err: err}
+ return &os.PathError{Op: title, Path: path, Err: err}
}
return nil
}
// ProcessUtilityVMImage post-processes a utility VM image that has had its files extracted.
// The files should have been extracted to <path>\Files.
-func ProcessUtilityVMImage(path string) error {
- err := processUtilityImage(path)
+func ProcessUtilityVMImage(ctx context.Context, path string) (err error) {
+ title := "hcsshim::ProcessUtilityVMImage"
+ ctx, span := trace.StartSpan(ctx, title)
+ defer span.End()
+ defer func() { oc.SetSpanStatus(span, err) }()
+ span.AddAttributes(trace.StringAttribute("path", path))
+
+ err = processUtilityImage(path)
if err != nil {
- return &os.PathError{Op: "ProcessUtilityVMImage", Path: path, Err: err}
+ return &os.PathError{Op: title, Path: path, Err: err}
}
return nil
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go
index bccd45969..84f81848f 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go
@@ -1,26 +1,21 @@
package wclayer
import (
+ "context"
+
"github.com/Microsoft/hcsshim/internal/hcserror"
- "github.com/sirupsen/logrus"
+ "github.com/Microsoft/hcsshim/internal/oc"
+ "go.opencensus.io/trace"
)
// UnprepareLayer disables the filesystem filter for the read-write layer with
// the given id.
-func UnprepareLayer(path string) (err error) {
+func UnprepareLayer(ctx context.Context, path string) (err error) {
title := "hcsshim::UnprepareLayer"
- fields := logrus.Fields{
- "path": path,
- }
- logrus.WithFields(fields).Debug(title)
- defer func() {
- if err != nil {
- fields[logrus.ErrorKey] = err
- logrus.WithFields(fields).Error(err)
- } else {
- logrus.WithFields(fields).Debug(title + " - succeeded")
- }
- }()
+ ctx, span := trace.StartSpan(ctx, title)
+ defer span.End()
+ defer func() { oc.SetSpanStatus(span, err) }()
+ span.AddAttributes(trace.StringAttribute("path", path))
err = unprepareLayer(&stdDriverInfo, path)
if err != nil {
diff --git a/vendor/github.com/Microsoft/hcsshim/layer.go b/vendor/github.com/Microsoft/hcsshim/layer.go
index f60ba5501..891616370 100644
--- a/vendor/github.com/Microsoft/hcsshim/layer.go
+++ b/vendor/github.com/Microsoft/hcsshim/layer.go
@@ -1,6 +1,7 @@
package hcsshim
import (
+ "context"
"crypto/sha1"
"path/filepath"
@@ -13,59 +14,59 @@ func layerPath(info *DriverInfo, id string) string {
}
func ActivateLayer(info DriverInfo, id string) error {
- return wclayer.ActivateLayer(layerPath(&info, id))
+ return wclayer.ActivateLayer(context.Background(), layerPath(&info, id))
}
func CreateLayer(info DriverInfo, id, parent string) error {
- return wclayer.CreateLayer(layerPath(&info, id), parent)
+ return wclayer.CreateLayer(context.Background(), layerPath(&info, id), parent)
}
// New clients should use CreateScratchLayer instead. Kept in to preserve API compatibility.
func CreateSandboxLayer(info DriverInfo, layerId, parentId string, parentLayerPaths []string) error {
- return wclayer.CreateScratchLayer(layerPath(&info, layerId), parentLayerPaths)
+ return wclayer.CreateScratchLayer(context.Background(), layerPath(&info, layerId), parentLayerPaths)
}
func CreateScratchLayer(info DriverInfo, layerId, parentId string, parentLayerPaths []string) error {
- return wclayer.CreateScratchLayer(layerPath(&info, layerId), parentLayerPaths)
+ return wclayer.CreateScratchLayer(context.Background(), layerPath(&info, layerId), parentLayerPaths)
}
func DeactivateLayer(info DriverInfo, id string) error {
- return wclayer.DeactivateLayer(layerPath(&info, id))
+ return wclayer.DeactivateLayer(context.Background(), layerPath(&info, id))
}
func DestroyLayer(info DriverInfo, id string) error {
- return wclayer.DestroyLayer(layerPath(&info, id))
+ return wclayer.DestroyLayer(context.Background(), layerPath(&info, id))
}
// New clients should use ExpandScratchSize instead. Kept in to preserve API compatibility.
func ExpandSandboxSize(info DriverInfo, layerId string, size uint64) error {
- return wclayer.ExpandScratchSize(layerPath(&info, layerId), size)
+ return wclayer.ExpandScratchSize(context.Background(), layerPath(&info, layerId), size)
}
func ExpandScratchSize(info DriverInfo, layerId string, size uint64) error {
- return wclayer.ExpandScratchSize(layerPath(&info, layerId), size)
+ return wclayer.ExpandScratchSize(context.Background(), layerPath(&info, layerId), size)
}
func ExportLayer(info DriverInfo, layerId string, exportFolderPath string, parentLayerPaths []string) error {
- return wclayer.ExportLayer(layerPath(&info, layerId), exportFolderPath, parentLayerPaths)
+ return wclayer.ExportLayer(context.Background(), layerPath(&info, layerId), exportFolderPath, parentLayerPaths)
}
func GetLayerMountPath(info DriverInfo, id string) (string, error) {
- return wclayer.GetLayerMountPath(layerPath(&info, id))
+ return wclayer.GetLayerMountPath(context.Background(), layerPath(&info, id))
}
func GetSharedBaseImages() (imageData string, err error) {
- return wclayer.GetSharedBaseImages()
+ return wclayer.GetSharedBaseImages(context.Background())
}
func ImportLayer(info DriverInfo, layerID string, importFolderPath string, parentLayerPaths []string) error {
- return wclayer.ImportLayer(layerPath(&info, layerID), importFolderPath, parentLayerPaths)
+ return wclayer.ImportLayer(context.Background(), layerPath(&info, layerID), importFolderPath, parentLayerPaths)
}
func LayerExists(info DriverInfo, id string) (bool, error) {
- return wclayer.LayerExists(layerPath(&info, id))
+ return wclayer.LayerExists(context.Background(), layerPath(&info, id))
}
func PrepareLayer(info DriverInfo, layerId string, parentLayerPaths []string) error {
- return wclayer.PrepareLayer(layerPath(&info, layerId), parentLayerPaths)
+ return wclayer.PrepareLayer(context.Background(), layerPath(&info, layerId), parentLayerPaths)
}
func ProcessBaseLayer(path string) error {
- return wclayer.ProcessBaseLayer(path)
+ return wclayer.ProcessBaseLayer(context.Background(), path)
}
func ProcessUtilityVMImage(path string) error {
- return wclayer.ProcessUtilityVMImage(path)
+ return wclayer.ProcessUtilityVMImage(context.Background(), path)
}
func UnprepareLayer(info DriverInfo, layerId string) error {
- return wclayer.UnprepareLayer(layerPath(&info, layerId))
+ return wclayer.UnprepareLayer(context.Background(), layerPath(&info, layerId))
}
type DriverInfo struct {
@@ -76,7 +77,7 @@ type DriverInfo struct {
type GUID [16]byte
func NameToGuid(name string) (id GUID, err error) {
- g, err := wclayer.NameToGuid(name)
+ g, err := wclayer.NameToGuid(context.Background(), name)
return g.ToWindowsArray(), err
}
@@ -94,13 +95,13 @@ func (g *GUID) ToString() string {
type LayerReader = wclayer.LayerReader
func NewLayerReader(info DriverInfo, layerID string, parentLayerPaths []string) (LayerReader, error) {
- return wclayer.NewLayerReader(layerPath(&info, layerID), parentLayerPaths)
+ return wclayer.NewLayerReader(context.Background(), layerPath(&info, layerID), parentLayerPaths)
}
type LayerWriter = wclayer.LayerWriter
func NewLayerWriter(info DriverInfo, layerID string, parentLayerPaths []string) (LayerWriter, error) {
- return wclayer.NewLayerWriter(layerPath(&info, layerID), parentLayerPaths)
+ return wclayer.NewLayerWriter(context.Background(), layerPath(&info, layerID), parentLayerPaths)
}
type WC_LAYER_DESCRIPTOR = wclayer.WC_LAYER_DESCRIPTOR
diff --git a/vendor/github.com/blang/semver/.travis.yml b/vendor/github.com/blang/semver/.travis.yml
new file mode 100644
index 000000000..102fb9a69
--- /dev/null
+++ b/vendor/github.com/blang/semver/.travis.yml
@@ -0,0 +1,21 @@
+language: go
+matrix:
+ include:
+ - go: 1.4.3
+ - go: 1.5.4
+ - go: 1.6.3
+ - go: 1.7
+ - go: tip
+ allow_failures:
+ - go: tip
+install:
+- go get golang.org/x/tools/cmd/cover
+- go get github.com/mattn/goveralls
+script:
+- echo "Test and track coverage" ; $HOME/gopath/bin/goveralls -package "." -service=travis-ci
+ -repotoken $COVERALLS_TOKEN
+- echo "Build examples" ; cd examples && go build
+- echo "Check if gofmt'd" ; diff -u <(echo -n) <(gofmt -d -s .)
+env:
+ global:
+ secure: HroGEAUQpVq9zX1b1VIkraLiywhGbzvNnTZq2TMxgK7JHP8xqNplAeF1izrR2i4QLL9nsY+9WtYss4QuPvEtZcVHUobw6XnL6radF7jS1LgfYZ9Y7oF+zogZ2I5QUMRLGA7rcxQ05s7mKq3XZQfeqaNts4bms/eZRefWuaFZbkw=
diff --git a/vendor/github.com/blang/semver/README.md b/vendor/github.com/blang/semver/README.md
index 4399639e2..08b2e4a3d 100644
--- a/vendor/github.com/blang/semver/README.md
+++ b/vendor/github.com/blang/semver/README.md
@@ -1,4 +1,4 @@
-semver for golang [![Build Status](https://drone.io/github.com/blang/semver/status.png)](https://drone.io/github.com/blang/semver/latest) [![GoDoc](https://godoc.org/github.com/blang/semver?status.png)](https://godoc.org/github.com/blang/semver) [![Coverage Status](https://img.shields.io/coveralls/blang/semver.svg)](https://coveralls.io/r/blang/semver?branch=master)
+semver for golang [![Build Status](https://travis-ci.org/blang/semver.svg?branch=master)](https://travis-ci.org/blang/semver) [![GoDoc](https://godoc.org/github.com/blang/semver?status.png)](https://godoc.org/github.com/blang/semver) [![Coverage Status](https://img.shields.io/coveralls/blang/semver.svg)](https://coveralls.io/r/blang/semver?branch=master)
======
semver is a [Semantic Versioning](http://semver.org/) library written in golang. It fully covers spec version `2.0.0`.
@@ -41,6 +41,7 @@ Features
- Compare Helper Methods
- InPlace manipulation
- Ranges `>=1.0.0 <2.0.0 || >=3.0.0 !3.0.1-beta.1`
+- Wildcards `>=1.x`, `<=2.5.x`
- Sortable (implements sort.Interface)
- database/sql compatible (sql.Scanner/Valuer)
- encoding/json compatible (json.Marshaler/Unmarshaler)
@@ -59,6 +60,8 @@ A condition is composed of an operator and a version. The supported operators ar
- `1.0.0`, `=1.0.0`, `==1.0.0` Equal to `1.0.0`
- `!1.0.0`, `!=1.0.0` Not equal to `1.0.0`. Excludes version `1.0.0`.
+Note that spaces between the operator and the version will be gracefully tolerated.
+
A `Range` can link multiple `Ranges` separated by space:
Ranges can be linked by logical AND:
diff --git a/vendor/github.com/blang/semver/package.json b/vendor/github.com/blang/semver/package.json
new file mode 100644
index 000000000..1cf8ebdd9
--- /dev/null
+++ b/vendor/github.com/blang/semver/package.json
@@ -0,0 +1,17 @@
+{
+ "author": "blang",
+ "bugs": {
+ "URL": "https://github.com/blang/semver/issues",
+ "url": "https://github.com/blang/semver/issues"
+ },
+ "gx": {
+ "dvcsimport": "github.com/blang/semver"
+ },
+ "gxVersion": "0.10.0",
+ "language": "go",
+ "license": "MIT",
+ "name": "semver",
+ "releaseCmd": "git commit -a -m \"gx publish $VERSION\"",
+ "version": "3.5.1"
+}
+
diff --git a/vendor/github.com/blang/semver/range.go b/vendor/github.com/blang/semver/range.go
index 0a8eaa1c9..fca406d47 100644
--- a/vendor/github.com/blang/semver/range.go
+++ b/vendor/github.com/blang/semver/range.go
@@ -2,10 +2,33 @@ package semver
import (
"fmt"
+ "strconv"
"strings"
"unicode"
)
+type wildcardType int
+
+const (
+ noneWildcard wildcardType = iota
+ majorWildcard wildcardType = 1
+ minorWildcard wildcardType = 2
+ patchWildcard wildcardType = 3
+)
+
+func wildcardTypefromInt(i int) wildcardType {
+ switch i {
+ case 1:
+ return majorWildcard
+ case 2:
+ return minorWildcard
+ case 3:
+ return patchWildcard
+ default:
+ return noneWildcard
+ }
+}
+
type comparator func(Version, Version) bool
var (
@@ -92,8 +115,12 @@ func ParseRange(s string) (Range, error) {
if err != nil {
return nil, err
}
+ expandedParts, err := expandWildcardVersion(orParts)
+ if err != nil {
+ return nil, err
+ }
var orFn Range
- for _, p := range orParts {
+ for _, p := range expandedParts {
var andFn Range
for _, ap := range p {
opStr, vStr, err := splitComparatorVersion(ap)
@@ -164,20 +191,39 @@ func buildVersionRange(opStr, vStr string) (*versionRange, error) {
}
-// splitAndTrim splits a range string by spaces and cleans leading and trailing spaces
+// inArray checks if a byte is contained in an array of bytes
+func inArray(s byte, list []byte) bool {
+ for _, el := range list {
+ if el == s {
+ return true
+ }
+ }
+ return false
+}
+
+// splitAndTrim splits a range string by spaces and cleans whitespaces
func splitAndTrim(s string) (result []string) {
last := 0
+ var lastChar byte
+ excludeFromSplit := []byte{'>', '<', '='}
for i := 0; i < len(s); i++ {
- if s[i] == ' ' {
+ if s[i] == ' ' && !inArray(lastChar, excludeFromSplit) {
if last < i-1 {
result = append(result, s[last:i])
}
last = i + 1
+ } else if s[i] != ' ' {
+ lastChar = s[i]
}
}
if last < len(s)-1 {
result = append(result, s[last:])
}
+
+ for i, v := range result {
+ result[i] = strings.Replace(v, " ", "", -1)
+ }
+
// parts := strings.Split(s, " ")
// for _, x := range parts {
// if s := strings.TrimSpace(x); len(s) != 0 {
@@ -188,7 +234,6 @@ func splitAndTrim(s string) (result []string) {
}
// splitComparatorVersion splits the comparator from the version.
-// Spaces between the comparator and the version are not allowed.
// Input must be free of leading or trailing spaces.
func splitComparatorVersion(s string) (string, string, error) {
i := strings.IndexFunc(s, unicode.IsDigit)
@@ -198,6 +243,144 @@ func splitComparatorVersion(s string) (string, string, error) {
return strings.TrimSpace(s[0:i]), s[i:], nil
}
+// getWildcardType will return the type of wildcard that the
+// passed version contains
+func getWildcardType(vStr string) wildcardType {
+ parts := strings.Split(vStr, ".")
+ nparts := len(parts)
+ wildcard := parts[nparts-1]
+
+ possibleWildcardType := wildcardTypefromInt(nparts)
+ if wildcard == "x" {
+ return possibleWildcardType
+ }
+
+ return noneWildcard
+}
+
+// createVersionFromWildcard will convert a wildcard version
+// into a regular version, replacing 'x's with '0's, handling
+// special cases like '1.x.x' and '1.x'
+func createVersionFromWildcard(vStr string) string {
+ // handle 1.x.x
+ vStr2 := strings.Replace(vStr, ".x.x", ".x", 1)
+ vStr2 = strings.Replace(vStr2, ".x", ".0", 1)
+ parts := strings.Split(vStr2, ".")
+
+ // handle 1.x
+ if len(parts) == 2 {
+ return vStr2 + ".0"
+ }
+
+ return vStr2
+}
+
+// incrementMajorVersion will increment the major version
+// of the passed version
+func incrementMajorVersion(vStr string) (string, error) {
+ parts := strings.Split(vStr, ".")
+ i, err := strconv.Atoi(parts[0])
+ if err != nil {
+ return "", err
+ }
+ parts[0] = strconv.Itoa(i + 1)
+
+ return strings.Join(parts, "."), nil
+}
+
+// incrementMajorVersion will increment the minor version
+// of the passed version
+func incrementMinorVersion(vStr string) (string, error) {
+ parts := strings.Split(vStr, ".")
+ i, err := strconv.Atoi(parts[1])
+ if err != nil {
+ return "", err
+ }
+ parts[1] = strconv.Itoa(i + 1)
+
+ return strings.Join(parts, "."), nil
+}
+
+// expandWildcardVersion will expand wildcards inside versions
+// following these rules:
+//
+// * when dealing with patch wildcards:
+// >= 1.2.x will become >= 1.2.0
+// <= 1.2.x will become < 1.3.0
+// > 1.2.x will become >= 1.3.0
+// < 1.2.x will become < 1.2.0
+// != 1.2.x will become < 1.2.0 >= 1.3.0
+//
+// * when dealing with minor wildcards:
+// >= 1.x will become >= 1.0.0
+// <= 1.x will become < 2.0.0
+// > 1.x will become >= 2.0.0
+// < 1.0 will become < 1.0.0
+// != 1.x will become < 1.0.0 >= 2.0.0
+//
+// * when dealing with wildcards without
+// version operator:
+// 1.2.x will become >= 1.2.0 < 1.3.0
+// 1.x will become >= 1.0.0 < 2.0.0
+func expandWildcardVersion(parts [][]string) ([][]string, error) {
+ var expandedParts [][]string
+ for _, p := range parts {
+ var newParts []string
+ for _, ap := range p {
+ if strings.Index(ap, "x") != -1 {
+ opStr, vStr, err := splitComparatorVersion(ap)
+ if err != nil {
+ return nil, err
+ }
+
+ versionWildcardType := getWildcardType(vStr)
+ flatVersion := createVersionFromWildcard(vStr)
+
+ var resultOperator string
+ var shouldIncrementVersion bool
+ switch opStr {
+ case ">":
+ resultOperator = ">="
+ shouldIncrementVersion = true
+ case ">=":
+ resultOperator = ">="
+ case "<":
+ resultOperator = "<"
+ case "<=":
+ resultOperator = "<"
+ shouldIncrementVersion = true
+ case "", "=", "==":
+ newParts = append(newParts, ">="+flatVersion)
+ resultOperator = "<"
+ shouldIncrementVersion = true
+ case "!=", "!":
+ newParts = append(newParts, "<"+flatVersion)
+ resultOperator = ">="
+ shouldIncrementVersion = true
+ }
+
+ var resultVersion string
+ if shouldIncrementVersion {
+ switch versionWildcardType {
+ case patchWildcard:
+ resultVersion, _ = incrementMinorVersion(flatVersion)
+ case minorWildcard:
+ resultVersion, _ = incrementMajorVersion(flatVersion)
+ }
+ } else {
+ resultVersion = flatVersion
+ }
+
+ ap = resultOperator + resultVersion
+ }
+ newParts = append(newParts, ap)
+ }
+ expandedParts = append(expandedParts, newParts)
+ }
+
+ return expandedParts, nil
+}
+
func parseComparator(s string) comparator {
switch s {
case "==":
@@ -222,3 +405,12 @@ func parseComparator(s string) comparator {
return nil
}
+
+// MustParseRange is like ParseRange but panics if the range cannot be parsed.
+func MustParseRange(s string) Range {
+ r, err := ParseRange(s)
+ if err != nil {
+ panic(`semver: ParseRange(` + s + `): ` + err.Error())
+ }
+ return r
+}
diff --git a/vendor/github.com/blang/semver/semver.go b/vendor/github.com/blang/semver/semver.go
index bbf85ce97..8ee0842e6 100644
--- a/vendor/github.com/blang/semver/semver.go
+++ b/vendor/github.com/blang/semver/semver.go
@@ -200,6 +200,29 @@ func Make(s string) (Version, error) {
return Parse(s)
}
+// ParseTolerant allows for certain version specifications that do not strictly adhere to semver
+// specs to be parsed by this library. It does so by normalizing versions before passing them to
+// Parse(). It currently trims spaces, removes a "v" prefix, and adds a 0 patch number to versions
+// with only major and minor components specified
+func ParseTolerant(s string) (Version, error) {
+ s = strings.TrimSpace(s)
+ s = strings.TrimPrefix(s, "v")
+
+ // Split into major.minor.(patch+pr+meta)
+ parts := strings.SplitN(s, ".", 3)
+ if len(parts) < 3 {
+ if strings.ContainsAny(parts[len(parts)-1], "+-") {
+ return Version{}, errors.New("Short version cannot contain PreRelease/Build meta data")
+ }
+ for len(parts) < 3 {
+ parts = append(parts, "0")
+ }
+ s = strings.Join(parts, ".")
+ }
+
+ return Parse(s)
+}
+
// Parse parses version string and returns a validated Version or error
func Parse(s string) (Version, error) {
if len(s) == 0 {
diff --git a/vendor/github.com/containernetworking/plugins/pkg/ip/link_linux.go b/vendor/github.com/containernetworking/plugins/pkg/ip/link_linux.go
index c0053cabe..f8781cf19 100644
--- a/vendor/github.com/containernetworking/plugins/pkg/ip/link_linux.go
+++ b/vendor/github.com/containernetworking/plugins/pkg/ip/link_linux.go
@@ -21,10 +21,12 @@ import (
"net"
"os"
- "github.com/containernetworking/plugins/pkg/ns"
- "github.com/containernetworking/plugins/pkg/utils/hwaddr"
"github.com/safchain/ethtool"
"github.com/vishvananda/netlink"
+
+ "github.com/containernetworking/plugins/pkg/ns"
+ "github.com/containernetworking/plugins/pkg/utils/hwaddr"
+ "github.com/containernetworking/plugins/pkg/utils/sysctl"
)
var (
@@ -158,6 +160,9 @@ func SetupVethWithName(contVethName, hostVethName string, mtu int, hostNS ns.Net
if err = netlink.LinkSetUp(hostVeth); err != nil {
return fmt.Errorf("failed to set %q up: %v", hostVethName, err)
}
+
+ // we want to own the routes for this interface
+ _, _ = sysctl.Sysctl(fmt.Sprintf("net/ipv6/conf/%s/accept_ra", hostVethName), "0")
return nil
})
if err != nil {
@@ -178,7 +183,7 @@ func SetupVeth(contVethName string, mtu int, hostNS ns.NetNS) (net.Interface, ne
func DelLinkByName(ifName string) error {
iface, err := netlink.LinkByName(ifName)
if err != nil {
- if err.Error() == "Link not found" {
+ if _, ok := err.(netlink.LinkNotFoundError); ok {
return ErrLinkNotFound
}
return fmt.Errorf("failed to lookup %q: %v", ifName, err)
@@ -195,7 +200,7 @@ func DelLinkByName(ifName string) error {
func DelLinkByNameAddr(ifName string) ([]*net.IPNet, error) {
iface, err := netlink.LinkByName(ifName)
if err != nil {
- if err != nil && err.Error() == "Link not found" {
+ if _, ok := err.(netlink.LinkNotFoundError); ok {
return nil, ErrLinkNotFound
}
return nil, fmt.Errorf("failed to lookup %q: %v", ifName, err)
diff --git a/vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go b/vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go
index 31ad5f622..a34f97170 100644
--- a/vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go
+++ b/vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go
@@ -178,7 +178,16 @@ func (ns *netNS) Do(toRun func(NetNS) error) error {
if err = ns.Set(); err != nil {
return fmt.Errorf("error switching to ns %v: %v", ns.file.Name(), err)
}
- defer threadNS.Set() // switch back
+ defer func() {
+ err := threadNS.Set() // switch back
+ if err == nil {
+ // Unlock the current thread only when we successfully switched back
+ // to the original namespace; otherwise leave the thread locked which
+ // will force the runtime to scrap the current thread, that is maybe
+ // not as optimal but at least always safe to do.
+ runtime.UnlockOSThread()
+ }
+ }()
return toRun(hostNS)
}
@@ -193,6 +202,10 @@ func (ns *netNS) Do(toRun func(NetNS) error) error {
var wg sync.WaitGroup
wg.Add(1)
+ // Start the callback in a new green thread so that if we later fail
+ // to switch the namespace back to the original one, we can safely
+ // leave the thread locked to die without a risk of the current thread
+ // left lingering with incorrect namespace.
var innerError error
go func() {
defer wg.Done()
diff --git a/vendor/github.com/containernetworking/plugins/pkg/utils/sysctl/sysctl_linux.go b/vendor/github.com/containernetworking/plugins/pkg/utils/sysctl/sysctl_linux.go
new file mode 100644
index 000000000..7ee47e1ce
--- /dev/null
+++ b/vendor/github.com/containernetworking/plugins/pkg/utils/sysctl/sysctl_linux.go
@@ -0,0 +1,80 @@
+// Copyright 2016 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package sysctl
+
+import (
+ "fmt"
+ "io/ioutil"
+ "path/filepath"
+ "strings"
+)
+
+// Sysctl provides a method to set/get values from /proc/sys - in linux systems
+// new interface to set/get values of variables formerly handled by sysctl syscall
+// If optional `params` have only one string value - this function will
+// set this value into corresponding sysctl variable
+func Sysctl(name string, params ...string) (string, error) {
+ if len(params) > 1 {
+ return "", fmt.Errorf("unexcepted additional parameters")
+ } else if len(params) == 1 {
+ return setSysctl(name, params[0])
+ }
+ return getSysctl(name)
+}
+
+func getSysctl(name string) (string, error) {
+ fullName := filepath.Join("/proc/sys", toNormalName(name))
+ fullName = filepath.Clean(fullName)
+ data, err := ioutil.ReadFile(fullName)
+ if err != nil {
+ return "", err
+ }
+
+ return string(data[:len(data)-1]), nil
+}
+
+func setSysctl(name, value string) (string, error) {
+ fullName := filepath.Join("/proc/sys", toNormalName(name))
+ fullName = filepath.Clean(fullName)
+ if err := ioutil.WriteFile(fullName, []byte(value), 0644); err != nil {
+ return "", err
+ }
+
+ return getSysctl(name)
+}
+
+// Normalize names by using slash as separator
+// Sysctl names can use dots or slashes as separator:
+// - if dots are used, dots and slashes are interchanged.
+// - if slashes are used, slashes and dots are left intact.
+// Separator in use is determined by first occurrence.
+func toNormalName(name string) string {
+ interchange := false
+ for _, c := range name {
+ if c == '.' {
+ interchange = true
+ break
+ }
+ if c == '/' {
+ break
+ }
+ }
+
+ if interchange {
+ r := strings.NewReplacer(".", "/", "/", ".")
+ return r.Replace(name)
+ }
+ return name
+}
diff --git a/vendor/github.com/containers/common/pkg/config/config.go b/vendor/github.com/containers/common/pkg/config/config.go
index 611284476..ef75d9847 100644
--- a/vendor/github.com/containers/common/pkg/config/config.go
+++ b/vendor/github.com/containers/common/pkg/config/config.go
@@ -19,12 +19,15 @@ import (
)
const (
+ // _configPath is the path to the containers/containers.conf
+ // inside a given config directory.
+ _configPath = "containers/containers.conf"
// DefaultContainersConfig holds the default containers config path
- DefaultContainersConfig = "/usr/share/containers/containers.conf"
+ DefaultContainersConfig = "/usr/share/" + _configPath
// OverrideContainersConfig holds the default config paths overridden by the root user
- OverrideContainersConfig = "/etc/containers/containers.conf"
+ OverrideContainersConfig = "/etc/" + _configPath
// UserOverrideContainersConfig holds the containers config path overridden by the rootless user
- UserOverrideContainersConfig = ".config/containers/containers.conf"
+ UserOverrideContainersConfig = ".config/" + _configPath
)
// RuntimeStateStore is a constant indicating which state store implementation
@@ -827,7 +830,7 @@ func isDirectory(path string) error {
func rootlessConfigPath() (string, error) {
if configHome := os.Getenv("XDG_CONFIG_HOME"); configHome != "" {
- return filepath.Join(configHome, UserOverrideContainersConfig), nil
+ return filepath.Join(configHome, _configPath), nil
}
home, err := unshare.HomeDir()
if err != nil {
diff --git a/vendor/github.com/containers/image/v5/copy/copy.go b/vendor/github.com/containers/image/v5/copy/copy.go
index e8610254c..9fc0e5123 100644
--- a/vendor/github.com/containers/image/v5/copy/copy.go
+++ b/vendor/github.com/containers/image/v5/copy/copy.go
@@ -798,7 +798,6 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
// copyGroup is used to determine if all layers are copied
copyGroup := sync.WaitGroup{}
- copyGroup.Add(numLayers)
// copySemaphore is used to limit the number of parallel downloads to
// avoid malicious images causing troubles and to be nice to servers.
@@ -850,18 +849,22 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
if err := func() error { // A scope for defer
progressPool, progressCleanup := ic.c.newProgressPool(ctx)
- defer progressCleanup()
+ defer func() {
+ // Wait for all layers to be copied. progressCleanup() must not be called while any of the copyLayerHelpers interact with the progressPool.
+ copyGroup.Wait()
+ progressCleanup()
+ }()
for i, srcLayer := range srcInfos {
err = copySemaphore.Acquire(ctx, 1)
if err != nil {
return errors.Wrapf(err, "Can't acquire semaphore")
}
+ copyGroup.Add(1)
go copyLayerHelper(i, srcLayer, encLayerBitmap[i], progressPool)
}
- // Wait for all layers to be copied
- copyGroup.Wait()
+ // A call to copyGroup.Wait() is done at this point by the defer above.
return nil
}(); err != nil {
return err
diff --git a/vendor/github.com/containers/image/v5/docker/docker_client.go b/vendor/github.com/containers/image/v5/docker/docker_client.go
index c5c49b90b..9461bc91a 100644
--- a/vendor/github.com/containers/image/v5/docker/docker_client.go
+++ b/vendor/github.com/containers/image/v5/docker/docker_client.go
@@ -613,6 +613,9 @@ func (c *dockerClient) getBearerTokenOAuth2(ctx context.Context, challenge chall
params.Add("client_id", "containers/image")
authReq.Body = ioutil.NopCloser(bytes.NewBufferString(params.Encode()))
+ if c.sys != nil && c.sys.DockerRegistryUserAgent != "" {
+ authReq.Header.Add("User-Agent", c.sys.DockerRegistryUserAgent)
+ }
authReq.Header.Add("Content-Type", "application/x-www-form-urlencoded")
logrus.Debugf("%s %s", authReq.Method, authReq.URL.String())
res, err := c.client.Do(authReq)
@@ -665,6 +668,9 @@ func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge,
if c.auth.Username != "" && c.auth.Password != "" {
authReq.SetBasicAuth(c.auth.Username, c.auth.Password)
}
+ if c.sys != nil && c.sys.DockerRegistryUserAgent != "" {
+ authReq.Header.Add("User-Agent", c.sys.DockerRegistryUserAgent)
+ }
logrus.Debugf("%s %s", authReq.Method, authReq.URL.String())
res, err := c.client.Do(authReq)
diff --git a/vendor/github.com/containers/image/v5/docker/docker_image.go b/vendor/github.com/containers/image/v5/docker/docker_image.go
index 483581dbc..479effa59 100644
--- a/vendor/github.com/containers/image/v5/docker/docker_image.go
+++ b/vendor/github.com/containers/image/v5/docker/docker_image.go
@@ -37,7 +37,7 @@ func newImage(ctx context.Context, sys *types.SystemContext, ref dockerReference
// SourceRefFullName returns a fully expanded name for the repository this image is in.
func (i *Image) SourceRefFullName() string {
- return i.src.ref.ref.Name()
+ return i.src.logicalRef.ref.Name()
}
// GetRepositoryTags list all tags available in the repository. The tag
@@ -45,7 +45,7 @@ func (i *Image) SourceRefFullName() string {
// backward-compatible shim method which calls the module-level
// GetRepositoryTags)
func (i *Image) GetRepositoryTags(ctx context.Context) ([]string, error) {
- return GetRepositoryTags(ctx, i.src.c.sys, i.src.ref)
+ return GetRepositoryTags(ctx, i.src.c.sys, i.src.logicalRef)
}
// GetRepositoryTags list all tags available in the repository. The tag
diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go
index ab74e1607..979100ee3 100644
--- a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go
+++ b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go
@@ -16,6 +16,7 @@ import (
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/iolimits"
+ "github.com/containers/image/v5/internal/uploadreader"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/blobinfocache/none"
"github.com/containers/image/v5/types"
@@ -162,20 +163,31 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
digester := digest.Canonical.Digester()
sizeCounter := &sizeCounter{}
- tee := io.TeeReader(stream, io.MultiWriter(digester.Hash(), sizeCounter))
- res, err = d.c.makeRequestToResolvedURL(ctx, "PATCH", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, tee, inputInfo.Size, v2Auth, nil)
+ uploadLocation, err = func() (*url.URL, error) { // A scope for defer
+ uploadReader := uploadreader.NewUploadReader(io.TeeReader(stream, io.MultiWriter(digester.Hash(), sizeCounter)))
+ // This error text should never be user-visible, we terminate only after makeRequestToResolvedURL
+ // returns, so there isn’t a way for the error text to be provided to any of our callers.
+ defer uploadReader.Terminate(errors.New("Reading data from an already terminated upload"))
+ res, err = d.c.makeRequestToResolvedURL(ctx, "PATCH", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, uploadReader, inputInfo.Size, v2Auth, nil)
+ if err != nil {
+ logrus.Debugf("Error uploading layer chunked %v", err)
+ return nil, err
+ }
+ defer res.Body.Close()
+ if !successStatus(res.StatusCode) {
+ return nil, errors.Wrapf(client.HandleErrorResponse(res), "Error uploading layer chunked")
+ }
+ uploadLocation, err := res.Location()
+ if err != nil {
+ return nil, errors.Wrap(err, "Error determining upload URL")
+ }
+ return uploadLocation, nil
+ }()
if err != nil {
- logrus.Debugf("Error uploading layer chunked, response %#v", res)
return types.BlobInfo{}, err
}
- defer res.Body.Close()
computedDigest := digester.Digest()
- uploadLocation, err = res.Location()
- if err != nil {
- return types.BlobInfo{}, errors.Wrap(err, "Error determining upload URL")
- }
-
// FIXME: DELETE uploadLocation on failure (does not really work in docker/distribution servers, which incorrectly require the "delete" action in the token's scope)
locationQuery := uploadLocation.Query()
@@ -469,17 +481,17 @@ func (d *dockerImageDestination) PutSignatures(ctx context.Context, signatures [
}
switch {
case d.c.signatureBase != nil:
- return d.putSignaturesToLookaside(signatures, instanceDigest)
+ return d.putSignaturesToLookaside(signatures, *instanceDigest)
case d.c.supportsSignatures:
- return d.putSignaturesToAPIExtension(ctx, signatures, instanceDigest)
+ return d.putSignaturesToAPIExtension(ctx, signatures, *instanceDigest)
default:
return errors.Errorf("X-Registry-Supports-Signatures extension not supported, and lookaside is not configured")
}
}
// putSignaturesToLookaside implements PutSignatures() from the lookaside location configured in s.c.signatureBase,
-// which is not nil.
-func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte, instanceDigest *digest.Digest) error {
+// which is not nil, for a manifest with manifestDigest.
+func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte, manifestDigest digest.Digest) error {
// FIXME? This overwrites files one at a time, definitely not atomic.
// A failure when updating signatures with a reordered copy could lose some of them.
@@ -490,7 +502,7 @@ func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte, i
// NOTE: Keep this in sync with docs/signature-protocols.md!
for i, signature := range signatures {
- url := signatureStorageURL(d.c.signatureBase, *instanceDigest, i)
+ url := signatureStorageURL(d.c.signatureBase, manifestDigest, i)
if url == nil {
return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
}
@@ -505,7 +517,7 @@ func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte, i
// is enough for dockerImageSource to stop looking for other signatures, so that
// is sufficient.
for i := len(signatures); ; i++ {
- url := signatureStorageURL(d.c.signatureBase, *instanceDigest, i)
+ url := signatureStorageURL(d.c.signatureBase, manifestDigest, i)
if url == nil {
return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
}
@@ -564,8 +576,9 @@ func (c *dockerClient) deleteOneSignature(url *url.URL) (missing bool, err error
}
}
-// putSignaturesToAPIExtension implements PutSignatures() using the X-Registry-Supports-Signatures API extension.
-func (d *dockerImageDestination) putSignaturesToAPIExtension(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
+// putSignaturesToAPIExtension implements PutSignatures() using the X-Registry-Supports-Signatures API extension,
+// for a manifest with manifestDigest.
+func (d *dockerImageDestination) putSignaturesToAPIExtension(ctx context.Context, signatures [][]byte, manifestDigest digest.Digest) error {
// Skip dealing with the manifest digest, or reading the old state, if not necessary.
if len(signatures) == 0 {
return nil
@@ -575,7 +588,7 @@ func (d *dockerImageDestination) putSignaturesToAPIExtension(ctx context.Context
// always adds signatures. Eventually we should also allow removing signatures,
// but the X-Registry-Supports-Signatures API extension does not support that yet.
- existingSignatures, err := d.c.getExtensionsSignatures(ctx, d.ref, *instanceDigest)
+ existingSignatures, err := d.c.getExtensionsSignatures(ctx, d.ref, manifestDigest)
if err != nil {
return err
}
@@ -600,7 +613,7 @@ sigExists:
if err != nil || n != 16 {
return errors.Wrapf(err, "Error generating random signature len %d", n)
}
- signatureName = fmt.Sprintf("%s@%032x", instanceDigest.String(), randBytes)
+ signatureName = fmt.Sprintf("%s@%032x", manifestDigest.String(), randBytes)
if _, ok := existingSigNames[signatureName]; !ok {
break
}
@@ -616,7 +629,7 @@ sigExists:
return err
}
- path := fmt.Sprintf(extensionsSignaturePath, reference.Path(d.ref.ref), d.manifestDigest.String())
+ path := fmt.Sprintf(extensionsSignaturePath, reference.Path(d.ref.ref), manifestDigest.String())
res, err := d.c.makeRequest(ctx, "PUT", path, nil, bytes.NewReader(body), v2Auth, nil)
if err != nil {
return err
diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_src.go b/vendor/github.com/containers/image/v5/docker/docker_image_src.go
index 9c0c20c64..10aff615e 100644
--- a/vendor/github.com/containers/image/v5/docker/docker_image_src.go
+++ b/vendor/github.com/containers/image/v5/docker/docker_image_src.go
@@ -24,8 +24,9 @@ import (
)
type dockerImageSource struct {
- ref dockerReference
- c *dockerClient
+ logicalRef dockerReference // The reference the user requested.
+ physicalRef dockerReference // The actual reference we are accessing (possibly a mirror)
+ c *dockerClient
// State
cachedManifest []byte // nil if not loaded yet
cachedManifestMIMEType string // Only valid if cachedManifest != nil
@@ -49,7 +50,6 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerRef
}
}
- primaryDomain := reference.Domain(ref.ref)
// Check all endpoints for the manifest availability. If we find one that does
// contain the image, it will be used for all future pull actions. Always try the
// non-mirror original location last; this both transparently handles the case
@@ -66,7 +66,7 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerRef
attempts := []attempt{}
for _, pullSource := range pullSources {
logrus.Debugf("Trying to access %q", pullSource.Reference)
- s, err := newImageSourceAttempt(ctx, sys, pullSource, primaryDomain)
+ s, err := newImageSourceAttempt(ctx, sys, ref, pullSource)
if err == nil {
return s, nil
}
@@ -95,32 +95,33 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerRef
}
// newImageSourceAttempt is an internal helper for newImageSource. Everyone else must call newImageSource.
-// Given a pullSource and primaryDomain, return a dockerImageSource if it is reachable.
+// Given a logicalReference and a pullSource, return a dockerImageSource if it is reachable.
// The caller must call .Close() on the returned ImageSource.
-func newImageSourceAttempt(ctx context.Context, sys *types.SystemContext, pullSource sysregistriesv2.PullSource, primaryDomain string) (*dockerImageSource, error) {
- ref, err := newReference(pullSource.Reference)
+func newImageSourceAttempt(ctx context.Context, sys *types.SystemContext, logicalRef dockerReference, pullSource sysregistriesv2.PullSource) (*dockerImageSource, error) {
+ physicalRef, err := newReference(pullSource.Reference)
if err != nil {
return nil, err
}
endpointSys := sys
// sys.DockerAuthConfig does not explicitly specify a registry; we must not blindly send the credentials intended for the primary endpoint to mirrors.
- if endpointSys != nil && endpointSys.DockerAuthConfig != nil && reference.Domain(ref.ref) != primaryDomain {
+ if endpointSys != nil && endpointSys.DockerAuthConfig != nil && reference.Domain(physicalRef.ref) != reference.Domain(logicalRef.ref) {
copy := *endpointSys
copy.DockerAuthConfig = nil
copy.DockerBearerRegistryToken = ""
endpointSys = &copy
}
- client, err := newDockerClientFromRef(endpointSys, ref, false, "pull")
+ client, err := newDockerClientFromRef(endpointSys, physicalRef, false, "pull")
if err != nil {
return nil, err
}
client.tlsClientConfig.InsecureSkipVerify = pullSource.Endpoint.Insecure
s := &dockerImageSource{
- ref: ref,
- c: client,
+ logicalRef: logicalRef,
+ physicalRef: physicalRef,
+ c: client,
}
if err := s.ensureManifestIsLoaded(ctx); err != nil {
@@ -132,7 +133,7 @@ func newImageSourceAttempt(ctx context.Context, sys *types.SystemContext, pullSo
// Reference returns the reference used to set up this source, _as specified by the user_
// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
func (s *dockerImageSource) Reference() types.ImageReference {
- return s.ref
+ return s.logicalRef
}
// Close removes resources associated with an initialized ImageSource, if any.
@@ -181,7 +182,7 @@ func (s *dockerImageSource) GetManifest(ctx context.Context, instanceDigest *dig
}
func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest string) ([]byte, string, error) {
- path := fmt.Sprintf(manifestPath, reference.Path(s.ref.ref), tagOrDigest)
+ path := fmt.Sprintf(manifestPath, reference.Path(s.physicalRef.ref), tagOrDigest)
headers := map[string][]string{
"Accept": manifest.DefaultRequestedManifestMIMETypes,
}
@@ -191,7 +192,7 @@ func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest strin
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
- return nil, "", errors.Wrapf(client.HandleErrorResponse(res), "Error reading manifest %s in %s", tagOrDigest, s.ref.ref.Name())
+ return nil, "", errors.Wrapf(client.HandleErrorResponse(res), "Error reading manifest %s in %s", tagOrDigest, s.physicalRef.ref.Name())
}
manblob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxManifestBodySize)
@@ -213,7 +214,7 @@ func (s *dockerImageSource) ensureManifestIsLoaded(ctx context.Context) error {
return nil
}
- reference, err := s.ref.tagOrDigest()
+ reference, err := s.physicalRef.tagOrDigest()
if err != nil {
return err
}
@@ -271,7 +272,7 @@ func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo, ca
return s.getExternalBlob(ctx, info.URLs)
}
- path := fmt.Sprintf(blobsPath, reference.Path(s.ref.ref), info.Digest.String())
+ path := fmt.Sprintf(blobsPath, reference.Path(s.physicalRef.ref), info.Digest.String())
logrus.Debugf("Downloading %s", path)
res, err := s.c.makeRequest(ctx, "GET", path, nil, nil, v2Auth, nil)
if err != nil {
@@ -280,7 +281,7 @@ func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo, ca
if err := httpResponseToError(res, "Error fetching blob"); err != nil {
return nil, 0, err
}
- cache.RecordKnownLocation(s.ref.Transport(), bicTransportScope(s.ref), info.Digest, newBICLocationReference(s.ref))
+ cache.RecordKnownLocation(s.physicalRef.Transport(), bicTransportScope(s.physicalRef), info.Digest, newBICLocationReference(s.physicalRef))
return res.Body, getBlobSize(res), nil
}
@@ -308,7 +309,7 @@ func (s *dockerImageSource) manifestDigest(ctx context.Context, instanceDigest *
if instanceDigest != nil {
return *instanceDigest, nil
}
- if digested, ok := s.ref.ref.(reference.Digested); ok {
+ if digested, ok := s.physicalRef.ref.(reference.Digested); ok {
d := digested.Digest()
if d.Algorithm() == digest.Canonical {
return d, nil
@@ -398,7 +399,7 @@ func (s *dockerImageSource) getSignaturesFromAPIExtension(ctx context.Context, i
return nil, err
}
- parsedBody, err := s.c.getExtensionsSignatures(ctx, s.ref, manifestDigest)
+ parsedBody, err := s.c.getExtensionsSignatures(ctx, s.physicalRef, manifestDigest)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go b/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go
index c23457642..c4d42f3eb 100644
--- a/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go
+++ b/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go
@@ -115,12 +115,23 @@ func getCPUVariant(os string, arch string) string {
return ""
}
+// compatibility contains, for a specified architecture, a list of known variants, in the
+// order from most capable (most restrictive) to least capable (most compatible).
+// Architectures that don’t have variants should not have an entry here.
var compatibility = map[string][]string{
- "arm": {"v7", "v6", "v5"},
+ "arm": {"v8", "v7", "v6", "v5"},
"arm64": {"v8"},
}
-// Returns all compatible platforms with the platform specifics possibly overriden by user,
+// baseVariants contains, for a specified architecture, a variant that is known to be
+// supported by _all_ machines using that architecture.
+// Architectures that don’t have variants, or where there are possible versions without
+// an established variant name, should not have an entry here.
+var baseVariants = map[string]string{
+ "arm64": "v8",
+}
+
+// WantedPlatforms returns all compatible platforms with the platform specifics possibly overriden by user,
// the most compatible platform is first.
// If some option (arch, os, variant) is not present, a value from current platform is detected.
func WantedPlatforms(ctx *types.SystemContext) ([]imgspecv1.Platform, error) {
@@ -145,59 +156,45 @@ func WantedPlatforms(ctx *types.SystemContext) ([]imgspecv1.Platform, error) {
wantedOS = ctx.OSChoice
}
- var wantedPlatforms []imgspecv1.Platform
- if wantedVariant != "" && compatibility[wantedArch] != nil {
- wantedPlatforms = make([]imgspecv1.Platform, 0, len(compatibility[wantedArch]))
- wantedIndex := -1
- for i, v := range compatibility[wantedArch] {
- if wantedVariant == v {
- wantedIndex = i
- break
+ var variants []string = nil
+ if wantedVariant != "" {
+ if compatibility[wantedArch] != nil {
+ variantOrder := compatibility[wantedArch]
+ for i, v := range variantOrder {
+ if wantedVariant == v {
+ variants = variantOrder[i:]
+ break
+ }
}
}
- // user wants a variant which we know nothing about - not even compatibility
- if wantedIndex == -1 {
- wantedPlatforms = []imgspecv1.Platform{
- {
- OS: wantedOS,
- Architecture: wantedArch,
- Variant: wantedVariant,
- },
- }
- } else {
- for i := wantedIndex; i < len(compatibility[wantedArch]); i++ {
- v := compatibility[wantedArch][i]
- wantedPlatforms = append(wantedPlatforms, imgspecv1.Platform{
- OS: wantedOS,
- Architecture: wantedArch,
- Variant: v,
- })
- }
+ if variants == nil {
+ // user wants a variant which we know nothing about - not even compatibility
+ variants = []string{wantedVariant}
}
+ variants = append(variants, "")
} else {
- wantedPlatforms = []imgspecv1.Platform{
- {
- OS: wantedOS,
- Architecture: wantedArch,
- Variant: wantedVariant,
- },
+ variants = append(variants, "") // No variant specified, use a “no variant specified” image if present
+ if baseVariant, ok := baseVariants[wantedArch]; ok {
+ // But also accept an image with the “base” variant for the architecture, if it exists.
+ variants = append(variants, baseVariant)
}
}
- return wantedPlatforms, nil
+ res := make([]imgspecv1.Platform, 0, len(variants))
+ for _, v := range variants {
+ res = append(res, imgspecv1.Platform{
+ OS: wantedOS,
+ Architecture: wantedArch,
+ Variant: v,
+ })
+ }
+ return res, nil
}
+// MatchesPlatform returns true if a platform descriptor from a multi-arch image matches
+// an item from the return value of WantedPlatforms.
func MatchesPlatform(image imgspecv1.Platform, wanted imgspecv1.Platform) bool {
- if image.Architecture != wanted.Architecture {
- return false
- }
- if image.OS != wanted.OS {
- return false
- }
-
- if wanted.Variant == "" || image.Variant == wanted.Variant {
- return true
- }
-
- return false
+ return image.Architecture == wanted.Architecture &&
+ image.OS == wanted.OS &&
+ image.Variant == wanted.Variant
}
diff --git a/vendor/github.com/containers/image/v5/internal/uploadreader/upload_reader.go b/vendor/github.com/containers/image/v5/internal/uploadreader/upload_reader.go
new file mode 100644
index 000000000..6aa9ead68
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/uploadreader/upload_reader.go
@@ -0,0 +1,61 @@
+package uploadreader
+
+import (
+ "io"
+ "sync"
+)
+
+// UploadReader is a pass-through reader for use in sending non-trivial data using the net/http
+// package (http.NewRequest, http.Post and the like).
+//
+// The net/http package uses a separate goroutine to upload data to a HTTP connection,
+// and it is possible for the server to return a response (typically an error) before consuming
+// the full body of the request. In that case http.Client.Do can return with an error while
+// the body is still being read — regardless of of the cancellation, if any, of http.Request.Context().
+//
+// As a result, any data used/updated by the io.Reader() provided as the request body may be
+// used/updated even after http.Client.Do returns, causing races.
+//
+// To fix this, UploadReader provides a synchronized Terminate() method, which can block for
+// a not-completely-negligible time (for a duration of the underlying Read()), but guarantees that
+// after Terminate() returns, the underlying reader is never used any more (unlike calling
+// the cancellation callback of context.WithCancel, which returns before any recipients may have
+// reacted to the cancellation).
+type UploadReader struct {
+ mutex sync.Mutex
+ // The following members can only be used with mutex held
+ reader io.Reader
+ terminationError error // nil if not terminated yet
+}
+
+// NewUploadReader returns an UploadReader for an "underlying" reader.
+func NewUploadReader(underlying io.Reader) *UploadReader {
+ return &UploadReader{
+ reader: underlying,
+ terminationError: nil,
+ }
+}
+
+// Read returns the error set by Terminate, if any, or calls the underlying reader.
+// It is safe to call this from a different goroutine than Terminate.
+func (ur *UploadReader) Read(p []byte) (int, error) {
+ ur.mutex.Lock()
+ defer ur.mutex.Unlock()
+
+ if ur.terminationError != nil {
+ return 0, ur.terminationError
+ }
+ return ur.reader.Read(p)
+}
+
+// Terminate waits for in-progress Read calls, if any, to finish, and ensures that after
+// this function returns, any Read calls will fail with the provided error, and the underlying
+// reader will never be used any more.
+//
+// It is safe to call this from a different goroutine than Read.
+func (ur *UploadReader) Terminate(err error) {
+ ur.mutex.Lock() // May block for some time if ur.reader.Read() is in progress
+ defer ur.mutex.Unlock()
+
+ ur.terminationError = err
+}
diff --git a/vendor/github.com/containers/image/v5/manifest/common.go b/vendor/github.com/containers/image/v5/manifest/common.go
new file mode 100644
index 000000000..fa2b39e0e
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/manifest/common.go
@@ -0,0 +1,118 @@
+package manifest
+
+import (
+ "fmt"
+
+ "github.com/containers/image/v5/pkg/compression"
+ "github.com/containers/image/v5/types"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+// dupStringSlice returns a deep copy of a slice of strings, or nil if the
+// source slice is empty.
+func dupStringSlice(list []string) []string {
+ if len(list) == 0 {
+ return nil
+ }
+ dup := make([]string, len(list))
+ copy(dup, list)
+ return dup
+}
+
+// dupStringStringMap returns a deep copy of a map[string]string, or nil if the
+// passed-in map is nil or has no keys.
+func dupStringStringMap(m map[string]string) map[string]string {
+ if len(m) == 0 {
+ return nil
+ }
+ result := make(map[string]string)
+ for k, v := range m {
+ result[k] = v
+ }
+ return result
+}
+
+// layerInfosToStrings converts a list of layer infos, presumably obtained from a Manifest.LayerInfos()
+// method call, into a format suitable for inclusion in a types.ImageInspectInfo structure.
+func layerInfosToStrings(infos []LayerInfo) []string {
+ layers := make([]string, len(infos))
+ for i, info := range infos {
+ layers[i] = info.Digest.String()
+ }
+ return layers
+}
+
+// compressionMIMETypeSet describes a set of MIME type “variants” that represent differently-compressed
+// versions of “the same kind of content”.
+// The map key is the return value of compression.Algorithm.Name(), or mtsUncompressed;
+// the map value is a MIME type, or mtsUnsupportedMIMEType to mean "recognized but unsupported".
+type compressionMIMETypeSet map[string]string
+
+const mtsUncompressed = "" // A key in compressionMIMETypeSet for the uncompressed variant
+const mtsUnsupportedMIMEType = "" // A value in compressionMIMETypeSet that means “recognized but unsupported”
+
+// compressionVariantMIMEType returns a variant of mimeType for the specified algorithm (which may be nil
+// to mean "no compression"), based on variantTable.
+func compressionVariantMIMEType(variantTable []compressionMIMETypeSet, mimeType string, algorithm *compression.Algorithm) (string, error) {
+ if mimeType == mtsUnsupportedMIMEType { // Prevent matching against the {algo:mtsUnsupportedMIMEType} entries
+ return "", fmt.Errorf("cannot update unknown MIME type")
+ }
+ for _, variants := range variantTable {
+ for _, mt := range variants {
+ if mt == mimeType { // Found the variant
+ name := mtsUncompressed
+ if algorithm != nil {
+ name = algorithm.Name()
+ }
+ if res, ok := variants[name]; ok {
+ if res != mtsUnsupportedMIMEType {
+ return res, nil
+ }
+ if name != mtsUncompressed {
+ return "", fmt.Errorf("%s compression is not supported", name)
+ }
+ return "", errors.New("uncompressed variant is not supported")
+ }
+ if name != mtsUncompressed {
+ return "", fmt.Errorf("unknown compression algorithm %s", name)
+ }
+ // We can't very well say “the idea of no compression is unknown”
+ return "", errors.New("uncompressed variant is not supported")
+ }
+ }
+ }
+ if algorithm != nil {
+ return "", fmt.Errorf("unsupported MIME type for compression: %s", mimeType)
+ }
+ return "", fmt.Errorf("unsupported MIME type for decompression: %s", mimeType)
+}
+
+// updatedMIMEType returns the result of applying edits in updated (MediaType, CompressionOperation) to
+// mimeType, based on variantTable. It may use updated.Digest for error messages.
+func updatedMIMEType(variantTable []compressionMIMETypeSet, mimeType string, updated types.BlobInfo) (string, error) {
+ // Note that manifests in containers-storage might be reporting the
+ // wrong media type since the original manifests are stored while layers
+ // are decompressed in storage. Hence, we need to consider the case
+ // that an already {de}compressed layer should be {de}compressed;
+ // compressionVariantMIMEType does that by not caring whether the original is
+ // {de}compressed.
+ switch updated.CompressionOperation {
+ case types.PreserveOriginal:
+ // Keep the original media type.
+ return mimeType, nil
+
+ case types.Decompress:
+ return compressionVariantMIMEType(variantTable, mimeType, nil)
+
+ case types.Compress:
+ if updated.CompressionAlgorithm == nil {
+ logrus.Debugf("Error preparing updated manifest: blob %q was compressed but does not specify by which algorithm: falling back to use the original blob", updated.Digest)
+ return mimeType, nil
+ }
+ return compressionVariantMIMEType(variantTable, mimeType, updated.CompressionAlgorithm)
+
+ default:
+ return "", fmt.Errorf("unknown compression operation (%d)", updated.CompressionOperation)
+ }
+}
diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema2.go b/vendor/github.com/containers/image/v5/manifest/docker_schema2.go
index ff0780fe3..8d8bb9e01 100644
--- a/vendor/github.com/containers/image/v5/manifest/docker_schema2.go
+++ b/vendor/github.com/containers/image/v5/manifest/docker_schema2.go
@@ -10,7 +10,6 @@ import (
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
- "github.com/sirupsen/logrus"
)
// Schema2Descriptor is a “descriptor” in docker/distribution schema 2.
@@ -213,26 +212,17 @@ func (m *Schema2) LayerInfos() []LayerInfo {
return blobs
}
-// isSchema2ForeignLayer is a convenience wrapper to check if a given mime type
-// is a compressed or decompressed schema 2 foreign layer.
-func isSchema2ForeignLayer(mimeType string) bool {
- switch mimeType {
- case DockerV2Schema2ForeignLayerMediaType, DockerV2Schema2ForeignLayerMediaTypeGzip:
- return true
- default:
- return false
- }
-}
-
-// isSchema2Layer is a convenience wrapper to check if a given mime type is a
-// compressed or decompressed schema 2 layer.
-func isSchema2Layer(mimeType string) bool {
- switch mimeType {
- case DockerV2SchemaLayerMediaTypeUncompressed, DockerV2Schema2LayerMediaType:
- return true
- default:
- return false
- }
+var schema2CompressionMIMETypeSets = []compressionMIMETypeSet{
+ {
+ mtsUncompressed: DockerV2Schema2ForeignLayerMediaType,
+ compression.Gzip.Name(): DockerV2Schema2ForeignLayerMediaTypeGzip,
+ compression.Zstd.Name(): mtsUnsupportedMIMEType,
+ },
+ {
+ mtsUncompressed: DockerV2SchemaLayerMediaTypeUncompressed,
+ compression.Gzip.Name(): DockerV2Schema2LayerMediaType,
+ compression.Zstd.Name(): mtsUnsupportedMIMEType,
+ },
}
// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers)
@@ -243,67 +233,16 @@ func (m *Schema2) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
original := m.LayersDescriptors
m.LayersDescriptors = make([]Schema2Descriptor, len(layerInfos))
for i, info := range layerInfos {
+ mimeType := original[i].MediaType
// First make sure we support the media type of the original layer.
- if err := SupportedSchema2MediaType(original[i].MediaType); err != nil {
- return fmt.Errorf("Error preparing updated manifest: unknown media type of original layer: %q", original[i].MediaType)
+ if err := SupportedSchema2MediaType(mimeType); err != nil {
+ return fmt.Errorf("Error preparing updated manifest: unknown media type of original layer %q: %q", info.Digest, mimeType)
}
-
- // Set the correct media types based on the specified compression
- // operation, the desired compression algorithm AND the original media
- // type.
- //
- // Note that manifests in containers-storage might be reporting the
- // wrong media type since the original manifests are stored while layers
- // are decompressed in storage. Hence, we need to consider the case
- // that an already {de}compressed layer should be {de}compressed, which
- // is being addressed in `isSchema2{Foreign}Layer`.
- switch info.CompressionOperation {
- case types.PreserveOriginal:
- // Keep the original media type.
- m.LayersDescriptors[i].MediaType = original[i].MediaType
-
- case types.Decompress:
- // Decompress the original media type and check if it was
- // non-distributable one or not.
- mimeType := original[i].MediaType
- switch {
- case isSchema2ForeignLayer(mimeType):
- m.LayersDescriptors[i].MediaType = DockerV2Schema2ForeignLayerMediaType
- case isSchema2Layer(mimeType):
- m.LayersDescriptors[i].MediaType = DockerV2SchemaLayerMediaTypeUncompressed
- default:
- return fmt.Errorf("Error preparing updated manifest: unsupported media type for decompression: %q", original[i].MediaType)
- }
-
- case types.Compress:
- if info.CompressionAlgorithm == nil {
- logrus.Debugf("Preparing updated manifest: blob %q was compressed but does not specify by which algorithm: falling back to use the original blob", info.Digest)
- m.LayersDescriptors[i].MediaType = original[i].MediaType
- break
- }
- // Compress the original media type and set the new one based on
- // that type (distributable or not) and the specified compression
- // algorithm. Throw an error if the algorithm is not supported.
- switch info.CompressionAlgorithm.Name() {
- case compression.Gzip.Name():
- mimeType := original[i].MediaType
- switch {
- case isSchema2ForeignLayer(mimeType):
- m.LayersDescriptors[i].MediaType = DockerV2Schema2ForeignLayerMediaTypeGzip
- case isSchema2Layer(mimeType):
- m.LayersDescriptors[i].MediaType = DockerV2Schema2LayerMediaType
- default:
- return fmt.Errorf("Error preparing updated manifest: unsupported media type for compression: %q", original[i].MediaType)
- }
- case compression.Zstd.Name():
- return fmt.Errorf("Error preparing updated manifest: zstd compression is not supported for docker images")
- default:
- return fmt.Errorf("Error preparing updated manifest: unknown compression algorithm %q for layer %q", info.CompressionAlgorithm.Name(), info.Digest)
- }
-
- default:
- return fmt.Errorf("Error preparing updated manifest: unknown compression operation (%d) for layer %q", info.CompressionOperation, info.Digest)
+ mimeType, err := updatedMIMEType(schema2CompressionMIMETypeSets, mimeType, info)
+ if err != nil {
+ return errors.Wrapf(err, "Error preparing updated manifest, layer %q", info.Digest)
}
+ m.LayersDescriptors[i].MediaType = mimeType
m.LayersDescriptors[i].Digest = info.Digest
m.LayersDescriptors[i].Size = info.Size
m.LayersDescriptors[i].URLs = info.URLs
diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go b/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go
index 5f96a981a..bfedff69c 100644
--- a/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go
+++ b/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go
@@ -107,7 +107,7 @@ func (list *Schema2List) ChooseInstance(ctx *types.SystemContext) (digest.Digest
}
}
}
- return "", fmt.Errorf("no image found in manifest list for architecture %s, variant %s, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS)
+ return "", fmt.Errorf("no image found in manifest list for architecture %s, variant %q, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS)
}
// Serialize returns the list in a blob format.
diff --git a/vendor/github.com/containers/image/v5/manifest/list.go b/vendor/github.com/containers/image/v5/manifest/list.go
index c7d741dc2..58982597e 100644
--- a/vendor/github.com/containers/image/v5/manifest/list.go
+++ b/vendor/github.com/containers/image/v5/manifest/list.go
@@ -59,30 +59,6 @@ type ListUpdate struct {
MediaType string
}
-// dupStringSlice returns a deep copy of a slice of strings, or nil if the
-// source slice is empty.
-func dupStringSlice(list []string) []string {
- if len(list) == 0 {
- return nil
- }
- dup := make([]string, len(list))
- copy(dup, list)
- return dup
-}
-
-// dupStringStringMap returns a deep copy of a map[string]string, or nil if the
-// passed-in map is nil or has no keys.
-func dupStringStringMap(m map[string]string) map[string]string {
- if len(m) == 0 {
- return nil
- }
- result := make(map[string]string)
- for k, v := range m {
- result[k] = v
- }
- return result
-}
-
// ListFromBlob parses a list of manifests.
func ListFromBlob(manifest []byte, manifestMIMEType string) (List, error) {
normalized := NormalizedMIMEType(manifestMIMEType)
diff --git a/vendor/github.com/containers/image/v5/manifest/manifest.go b/vendor/github.com/containers/image/v5/manifest/manifest.go
index 033b8d951..7b0758873 100644
--- a/vendor/github.com/containers/image/v5/manifest/manifest.go
+++ b/vendor/github.com/containers/image/v5/manifest/manifest.go
@@ -256,13 +256,3 @@ func FromBlob(manblob []byte, mt string) (Manifest, error) {
// Note that this may not be reachable, NormalizedMIMEType has a default for unknown values.
return nil, fmt.Errorf("Unimplemented manifest MIME type %s (normalized as %s)", mt, nmt)
}
-
-// layerInfosToStrings converts a list of layer infos, presumably obtained from a Manifest.LayerInfos()
-// method call, into a format suitable for inclusion in a types.ImageInspectInfo structure.
-func layerInfosToStrings(infos []LayerInfo) []string {
- layers := make([]string, len(infos))
- for i, info := range infos {
- layers[i] = info.Digest.String()
- }
- return layers
-}
diff --git a/vendor/github.com/containers/image/v5/manifest/oci.go b/vendor/github.com/containers/image/v5/manifest/oci.go
index aafe6693b..40c40dee8 100644
--- a/vendor/github.com/containers/image/v5/manifest/oci.go
+++ b/vendor/github.com/containers/image/v5/manifest/oci.go
@@ -12,7 +12,6 @@ import (
"github.com/opencontainers/image-spec/specs-go"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
- "github.com/sirupsen/logrus"
)
// BlobInfoFromOCI1Descriptor returns a types.BlobInfo based on the input OCI1 descriptor.
@@ -95,26 +94,17 @@ func (m *OCI1) LayerInfos() []LayerInfo {
return blobs
}
-// isOCI1NonDistributableLayer is a convenience wrapper to check if a given mime
-// type is a compressed or decompressed OCI v1 non-distributable layer.
-func isOCI1NonDistributableLayer(mimeType string) bool {
- switch mimeType {
- case imgspecv1.MediaTypeImageLayerNonDistributable, imgspecv1.MediaTypeImageLayerNonDistributableGzip, imgspecv1.MediaTypeImageLayerNonDistributableZstd:
- return true
- default:
- return false
- }
-}
-
-// isOCI1Layer is a convenience wrapper to check if a given mime type is a
-// compressed or decompressed OCI v1 layer.
-func isOCI1Layer(mimeType string) bool {
- switch mimeType {
- case imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerGzip, imgspecv1.MediaTypeImageLayerZstd:
- return true
- default:
- return false
- }
+var oci1CompressionMIMETypeSets = []compressionMIMETypeSet{
+ {
+ mtsUncompressed: imgspecv1.MediaTypeImageLayerNonDistributable,
+ compression.Gzip.Name(): imgspecv1.MediaTypeImageLayerNonDistributableGzip,
+ compression.Zstd.Name(): imgspecv1.MediaTypeImageLayerNonDistributableZstd,
+ },
+ {
+ mtsUncompressed: imgspecv1.MediaTypeImageLayer,
+ compression.Gzip.Name(): imgspecv1.MediaTypeImageLayerGzip,
+ compression.Zstd.Name(): imgspecv1.MediaTypeImageLayerZstd,
+ },
}
// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls+mediatype), in order (the root layer first, and then successive layered layers)
@@ -133,79 +123,19 @@ func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
}
mimeType = decMimeType
}
-
- // Set the correct media types based on the specified compression
- // operation, the desired compression algorithm AND the original media
- // type.
- //
- // Note that manifests in containers-storage might be reporting the
- // wrong media type since the original manifests are stored while layers
- // are decompressed in storage. Hence, we need to consider the case
- // that an already {de}compressed layer should be {de}compressed, which
- // is being addressed in `isSchema2{Foreign}Layer`.
- switch info.CompressionOperation {
- case types.PreserveOriginal:
- // Keep the original media type.
- m.Layers[i].MediaType = mimeType
-
- case types.Decompress:
- // Decompress the original media type and check if it was
- // non-distributable one or not.
- switch {
- case isOCI1NonDistributableLayer(mimeType):
- m.Layers[i].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable
- case isOCI1Layer(mimeType):
- m.Layers[i].MediaType = imgspecv1.MediaTypeImageLayer
- default:
- return fmt.Errorf("Error preparing updated manifest: unsupported media type for decompression: %q", mimeType)
- }
-
- case types.Compress:
- if info.CompressionAlgorithm == nil {
- logrus.Debugf("Error preparing updated manifest: blob %q was compressed but does not specify by which algorithm: falling back to use the original blob", info.Digest)
- m.Layers[i].MediaType = mimeType
- break
- }
- // Compress the original media type and set the new one based on
- // that type (distributable or not) and the specified compression
- // algorithm. Throw an error if the algorithm is not supported.
- switch info.CompressionAlgorithm.Name() {
- case compression.Gzip.Name():
- switch {
- case isOCI1NonDistributableLayer(mimeType):
- m.Layers[i].MediaType = imgspecv1.MediaTypeImageLayerNonDistributableGzip
- case isOCI1Layer(mimeType):
- m.Layers[i].MediaType = imgspecv1.MediaTypeImageLayerGzip
- default:
- return fmt.Errorf("Error preparing updated manifest: unsupported media type for compression: %q", mimeType)
- }
-
- case compression.Zstd.Name():
- switch {
- case isOCI1NonDistributableLayer(mimeType):
- m.Layers[i].MediaType = imgspecv1.MediaTypeImageLayerNonDistributableZstd
- case isOCI1Layer(mimeType):
- m.Layers[i].MediaType = imgspecv1.MediaTypeImageLayerZstd
- default:
- return fmt.Errorf("Error preparing updated manifest: unsupported media type for compression: %q", mimeType)
- }
-
- default:
- return fmt.Errorf("Error preparing updated manifest: unknown compression algorithm %q for layer %q", info.CompressionAlgorithm.Name(), info.Digest)
- }
-
- default:
- return fmt.Errorf("Error preparing updated manifest: unknown compression operation (%d) for layer %q", info.CompressionOperation, info.Digest)
+ mimeType, err := updatedMIMEType(oci1CompressionMIMETypeSets, mimeType, info)
+ if err != nil {
+ return errors.Wrapf(err, "Error preparing updated manifest, layer %q", info.Digest)
}
-
if info.CryptoOperation == types.Encrypt {
- encMediaType, err := getEncryptedMediaType(m.Layers[i].MediaType)
+ encMediaType, err := getEncryptedMediaType(mimeType)
if err != nil {
- return fmt.Errorf("error preparing updated manifest: encryption specified but no counterpart for mediatype: %q", m.Layers[i].MediaType)
+ return fmt.Errorf("error preparing updated manifest: encryption specified but no counterpart for mediatype: %q", mimeType)
}
- m.Layers[i].MediaType = encMediaType
+ mimeType = encMediaType
}
+ m.Layers[i].MediaType = mimeType
m.Layers[i].Digest = info.Digest
m.Layers[i].Size = info.Size
m.Layers[i].Annotations = info.Annotations
diff --git a/vendor/github.com/containers/image/v5/manifest/oci_index.go b/vendor/github.com/containers/image/v5/manifest/oci_index.go
index 18cc8135c..7bdea8fb2 100644
--- a/vendor/github.com/containers/image/v5/manifest/oci_index.go
+++ b/vendor/github.com/containers/image/v5/manifest/oci_index.go
@@ -79,6 +79,9 @@ func (index *OCI1Index) ChooseInstance(ctx *types.SystemContext) (digest.Digest,
}
for _, wantedPlatform := range wantedPlatforms {
for _, d := range index.Manifests {
+ if d.Platform == nil {
+ continue
+ }
imagePlatform := imgspecv1.Platform{
Architecture: d.Platform.Architecture,
OS: d.Platform.OS,
@@ -97,7 +100,7 @@ func (index *OCI1Index) ChooseInstance(ctx *types.SystemContext) (digest.Digest,
return d.Digest, nil
}
}
- return "", fmt.Errorf("no image found in image index for architecture %s, variant %s, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS)
+ return "", fmt.Errorf("no image found in image index for architecture %s, variant %q, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS)
}
// Serialize returns the index in a blob format.
diff --git a/vendor/github.com/containers/image/v5/version/version.go b/vendor/github.com/containers/image/v5/version/version.go
index 717c2b1b3..67f57e03e 100644
--- a/vendor/github.com/containers/image/v5/version/version.go
+++ b/vendor/github.com/containers/image/v5/version/version.go
@@ -8,7 +8,7 @@ const (
// VersionMinor is for functionality in a backwards-compatible manner
VersionMinor = 4
// VersionPatch is for backwards-compatible bug fixes
- VersionPatch = 3
+ VersionPatch = 4
// VersionDev indicates development branch. Releases will be empty string.
VersionDev = ""
diff --git a/vendor/github.com/containers/storage/.cirrus.yml b/vendor/github.com/containers/storage/.cirrus.yml
index a55b5a189..15bf47baf 100644
--- a/vendor/github.com/containers/storage/.cirrus.yml
+++ b/vendor/github.com/containers/storage/.cirrus.yml
@@ -15,15 +15,20 @@ env:
CIRRUS_CLONE_DEPTH: 50
####
- #### Cache-image names to test with
- ####
+ #### Cache-image names to test with (double-quotes around names are critical)
+ ###
+ FEDORA_NAME: "fedora-32"
+ PRIOR_FEDORA_NAME: "fedora-31"
+ UBUNTU_NAME: "ubuntu-19"
+ PRIOR_UBUNTU_NAME: "ubuntu-18"
+
# GCE project where images live
IMAGE_PROJECT: "libpod-218412"
- _BUILT_IMAGE_SUFFIX: "libpod-6301182083727360"
- FEDORA_CACHE_IMAGE_NAME: "fedora-32-${_BUILT_IMAGE_SUFFIX}"
- PRIOR_FEDORA_CACHE_IMAGE_NAME: "fedora-31-${_BUILT_IMAGE_SUFFIX}"
- UBUNTU_CACHE_IMAGE_NAME: "ubuntu-19-${_BUILT_IMAGE_SUFFIX}"
- PRIOR_UBUNTU_CACHE_IMAGE_NAME: "ubuntu-18-${_BUILT_IMAGE_SUFFIX}"
+ _BUILT_IMAGE_SUFFIX: "libpod-6224667180531712" # From the packer output of 'build_vm_images_script'
+ FEDORA_CACHE_IMAGE_NAME: "${FEDORA_NAME}-${_BUILT_IMAGE_SUFFIX}"
+ PRIOR_FEDORA_CACHE_IMAGE_NAME: "${PRIOR_FEDORA_NAME}-${_BUILT_IMAGE_SUFFIX}"
+ UBUNTU_CACHE_IMAGE_NAME: "${UBUNTU_NAME}-${_BUILT_IMAGE_SUFFIX}"
+ PRIOR_UBUNTU_CACHE_IMAGE_NAME: "${PRIOR_UBUNTU_NAME}-${_BUILT_IMAGE_SUFFIX}"
####
#### Command variables to help avoid duplication
diff --git a/vendor/github.com/containers/storage/SECURITY.md b/vendor/github.com/containers/storage/SECURITY.md
new file mode 100644
index 000000000..1496a4c00
--- /dev/null
+++ b/vendor/github.com/containers/storage/SECURITY.md
@@ -0,0 +1,3 @@
+## Security and Disclosure Information Policy for the Containers Storage Project
+
+The Containers Storage Project follows the [Security and Disclosure Information Policy](https://github.com/containers/common/blob/master/SECURITY.md) for the Containers Projects.
diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION
index 66e2ae6c2..0044d6cb9 100644
--- a/vendor/github.com/containers/storage/VERSION
+++ b/vendor/github.com/containers/storage/VERSION
@@ -1 +1 @@
-1.19.1
+1.20.1
diff --git a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go
index d0c7fab0a..f1c941f11 100644
--- a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go
+++ b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go
@@ -272,7 +272,7 @@ func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) {
if err != nil {
return "", err
}
- if err := idtools.MkdirAllAs(dirname, 0700, uid, gid); err != nil && !os.IsExist(err) {
+ if err := idtools.MkdirAllAs(dirname, 0700, uid, gid); err != nil {
return "", err
}
@@ -1701,10 +1701,10 @@ func (devices *DeviceSet) initDevmapper(doInit bool) (retErr error) {
if err != nil {
return err
}
- if err := idtools.MkdirAs(devices.root, 0700, uid, gid); err != nil && !os.IsExist(err) {
+ if err := idtools.MkdirAs(devices.root, 0700, uid, gid); err != nil {
return err
}
- if err := os.MkdirAll(devices.metadataDir(), 0700); err != nil && !os.IsExist(err) {
+ if err := os.MkdirAll(devices.metadataDir(), 0700); err != nil {
return err
}
diff --git a/vendor/github.com/containers/storage/drivers/devmapper/driver.go b/vendor/github.com/containers/storage/drivers/devmapper/driver.go
index ca50e7f06..0afa6c84d 100644
--- a/vendor/github.com/containers/storage/drivers/devmapper/driver.go
+++ b/vendor/github.com/containers/storage/drivers/devmapper/driver.go
@@ -183,7 +183,7 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
}
// Create the target directories if they don't exist
- if err := idtools.MkdirAllAs(path.Join(d.home, "mnt"), 0755, uid, gid); err != nil && !os.IsExist(err) {
+ if err := idtools.MkdirAllAs(path.Join(d.home, "mnt"), 0755, uid, gid); err != nil {
d.ctr.Decrement(mp)
return "", err
}
@@ -198,7 +198,7 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
return "", err
}
- if err := idtools.MkdirAllAs(rootFs, 0755, uid, gid); err != nil && !os.IsExist(err) {
+ if err := idtools.MkdirAllAs(rootFs, 0755, uid, gid); err != nil {
d.ctr.Decrement(mp)
d.DeviceSet.UnmountDevice(id, mp)
return "", err
diff --git a/vendor/github.com/containers/storage/drivers/overlay/check.go b/vendor/github.com/containers/storage/drivers/overlay/check.go
index a566e4afd..cbfad2cd5 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/check.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/check.go
@@ -145,7 +145,7 @@ func doesMetacopy(d, mountOpts string) (bool, error) {
opts = fmt.Sprintf("%s,%s", opts, data)
}
if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", uintptr(flags), opts); err != nil {
- return false, errors.Wrap(err, "failed to mount overlay for metacopy check")
+ return false, errors.Wrapf(err, "failed to mount overlay for metacopy check with %q options", mountOpts)
}
defer func() {
if err := unix.Unmount(filepath.Join(td, "merged"), 0); err != nil {
diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
index 7e7dba753..2906e3e08 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
@@ -33,6 +33,7 @@ import (
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
+ "github.com/vbatts/tar-split/tar/storage"
"golang.org/x/sys/unix"
)
@@ -152,11 +153,11 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
}
// Create the driver home dir
- if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0700, rootUID, rootGID); err != nil && !os.IsExist(err) {
+ if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0700, rootUID, rootGID); err != nil {
return nil, err
}
runhome := filepath.Join(options.RunRoot, filepath.Base(home))
- if err := idtools.MkdirAllAs(runhome, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) {
+ if err := idtools.MkdirAllAs(runhome, 0700, rootUID, rootGID); err != nil {
return nil, err
}
@@ -555,7 +556,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr
return err
}
// Make the link directory if it does not exist
- if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0700, rootUID, rootGID); err != nil && !os.IsExist(err) {
+ if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0700, rootUID, rootGID); err != nil {
return err
}
if err := idtools.MkdirAllAs(path.Dir(dir), 0700, rootUID, rootGID); err != nil {
@@ -767,7 +768,7 @@ func (d *Driver) recreateSymlinks() error {
if err != nil {
return err
}
- if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0700, rootUID, rootGID); err != nil && !os.IsExist(err) {
+ if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0700, rootUID, rootGID); err != nil {
return err
}
for _, dir := range dirs {
@@ -809,6 +810,13 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
}
readWrite := true
+ for _, o := range options.Options {
+ if o == "ro" {
+ readWrite = false
+ break
+ }
+ }
+
lowers, err := ioutil.ReadFile(path.Join(dir, lowerFile))
if err != nil && !os.IsNotExist(err) {
return "", err
@@ -886,7 +894,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
// if we are doing a readOnly mount, and there is only one lower
// We should just return the lower directory, no reason to mount.
- if !readWrite {
+ if !readWrite && d.options.mountProgram == "" {
if len(absLowers) == 0 {
return path.Join(dir, "empty"), nil
}
@@ -904,10 +912,8 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
return "", err
}
diffDir := path.Join(dir, "diff")
- if readWrite {
- if err := idtools.MkdirAllAs(diffDir, 0755, rootUID, rootGID); err != nil && !os.IsExist(err) {
- return "", err
- }
+ if err := idtools.MkdirAllAs(diffDir, 0755, rootUID, rootGID); err != nil {
+ return "", err
}
mergedDir := path.Join(dir, "merged")
@@ -932,7 +938,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
if readWrite {
opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), diffDir, path.Join(dir, "work"))
} else {
- opts = fmt.Sprintf("lowerdir=%s", strings.Join(absLowers, ":"))
+ opts = fmt.Sprintf("lowerdir=%s:%s", diffDir, strings.Join(absLowers, ":"))
}
if len(options.Options) > 0 {
opts = fmt.Sprintf("%s,%s", strings.Join(options.Options, ","), opts)
@@ -1018,7 +1024,7 @@ func (d *Driver) Put(id string) error {
// If they fail, fallback to unix.Unmount
for _, v := range []string{"fusermount3", "fusermount"} {
err := exec.Command(v, "-u", mountpoint).Run()
- if err != nil && !os.IsNotExist(err) {
+ if err != nil && errors.Cause(err) != exec.ErrNotFound {
logrus.Debugf("Error unmounting %s with %s - %v", mountpoint, v, err)
}
if err == nil {
@@ -1090,6 +1096,21 @@ func (d *Driver) getWhiteoutFormat() archive.WhiteoutFormat {
return whiteoutFormat
}
+type fileGetNilCloser struct {
+ storage.FileGetter
+}
+
+func (f fileGetNilCloser) Close() error {
+ return nil
+}
+
+// DiffGetter returns a FileGetCloser that can read files from the directory that
+// contains files for the layer differences. Used for direct access for tar-split.
+func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) {
+ p := d.getDiffPath(id)
+ return fileGetNilCloser{storage.NewPathFileGetter(p)}, nil
+}
+
// ApplyDiff applies the new layer into a root
func (d *Driver) ApplyDiff(id, parent string, options graphdriver.ApplyDiffOpts) (size int64, err error) {
diff --git a/vendor/github.com/containers/storage/drivers/vfs/driver.go b/vendor/github.com/containers/storage/drivers/vfs/driver.go
index f2859b427..679d89112 100644
--- a/vendor/github.com/containers/storage/drivers/vfs/driver.go
+++ b/vendor/github.com/containers/storage/drivers/vfs/driver.go
@@ -15,6 +15,7 @@ import (
"github.com/containers/storage/pkg/system"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/sirupsen/logrus"
+ "github.com/vbatts/tar-split/tar/storage"
)
var (
@@ -101,6 +102,21 @@ func (d *Driver) Cleanup() error {
return nil
}
+type fileGetNilCloser struct {
+ storage.FileGetter
+}
+
+func (f fileGetNilCloser) Close() error {
+ return nil
+}
+
+// DiffGetter returns a FileGetCloser that can read files from the directory that
+// contains files for the layer differences. Used for direct access for tar-split.
+func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) {
+ p := d.dir(id)
+ return fileGetNilCloser{storage.NewPathFileGetter(p)}, nil
+}
+
// CreateFromTemplate creates a layer with the same contents and parent as another layer.
func (d *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error {
if readWrite {
diff --git a/vendor/github.com/containers/storage/go.mod b/vendor/github.com/containers/storage/go.mod
index a7742bcdd..a7d9ade60 100644
--- a/vendor/github.com/containers/storage/go.mod
+++ b/vendor/github.com/containers/storage/go.mod
@@ -3,25 +3,25 @@ module github.com/containers/storage
require (
github.com/BurntSushi/toml v0.3.1
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5
- github.com/Microsoft/hcsshim v0.8.7
+ github.com/Microsoft/hcsshim v0.8.9
github.com/docker/go-units v0.4.0
github.com/hashicorp/go-multierror v1.0.0
github.com/klauspost/compress v1.10.5
- github.com/klauspost/pgzip v1.2.3
+ github.com/klauspost/pgzip v1.2.4
github.com/mattn/go-shellwords v1.0.10
github.com/mistifyio/go-zfs v2.1.1+incompatible
- github.com/opencontainers/go-digest v1.0.0-rc1
+ github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/runc v1.0.0-rc9
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700
github.com/opencontainers/selinux v1.5.1
github.com/pkg/errors v0.9.1
github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7
- github.com/sirupsen/logrus v1.4.2
+ github.com/sirupsen/logrus v1.6.0
github.com/stretchr/testify v1.5.1
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2
github.com/tchap/go-patricia v2.3.0+incompatible
github.com/vbatts/tar-split v0.11.1
- golang.org/x/net v0.0.0-20190628185345-da137c7871d7
+ golang.org/x/net v0.0.0-20191004110552-13f9640d40b9
golang.org/x/sys v0.0.0-20191127021746-63cb32ae39b2
gotest.tools v2.2.0+incompatible
)
diff --git a/vendor/github.com/containers/storage/go.sum b/vendor/github.com/containers/storage/go.sum
index 97076ffa6..eab0fd61e 100644
--- a/vendor/github.com/containers/storage/go.sum
+++ b/vendor/github.com/containers/storage/go.sum
@@ -3,14 +3,13 @@ github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5 h1:ygIc8M6trr62pF5DucadTWGdEB4mEyvzi0e2nbcmcyA=
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
-github.com/Microsoft/hcsshim v0.8.7 h1:ptnOoufxGSzauVTsdE+wMYnCWA301PdoN4xg5oRdZpg=
-github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ=
-github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
+github.com/Microsoft/hcsshim v0.8.9 h1:VrfodqvztU8YSOvygU+DN1BGaSGxmrNfqOv5oOuX2Bk=
+github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f h1:tSNMc+rJDfmYntojat8lljbt1mgKNpTxUZJsSzJ9Y1s=
github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
-github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
@@ -23,46 +22,52 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
-github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
+github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
+github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
-github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
+github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.10.5 h1:7q6vHIqubShURwQz8cQK6yIe/xC3IF0Vm7TGfqjewrc=
github.com/klauspost/compress v1.10.5/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/pgzip v1.2.3 h1:Ce2to9wvs/cuJ2b86/CKQoTYr9VHfpanYosZ0UBJqdw=
-github.com/klauspost/pgzip v1.2.3/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
-github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
+github.com/klauspost/pgzip v1.2.4 h1:TQ7CNpYKovDOmqzRHKxJh0BeaBI7UdQZYc6p7pMQh1A=
+github.com/klauspost/pgzip v1.2.4/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
+github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/mattn/go-shellwords v1.0.10 h1:Y7Xqm8piKOO3v10Thp7Z36h4FYFjt5xB//6XvOrs2Gw=
github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
github.com/mistifyio/go-zfs v2.1.1+incompatible h1:gAMO1HM9xBRONLHHYnu5iFsOJUiJdNZo6oqSENd4eW8=
github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
-github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=
-github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
+github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
+github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.0-rc9 h1:/k06BMULKF5hidyoZymkoDCzdJzltZpz/UU4LguQVtc=
github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700 h1:eNUVfm/RFLIi1G7flU5/ZRTHvd4kcVuzfRnL6OFlzCI=
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
github.com/opencontainers/selinux v1.5.1 h1:jskKwSMFYqyTrHEuJgQoUlTcId0av64S6EWObrIfn5Y=
github.com/opencontainers/selinux v1.5.1/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g=
-github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@@ -70,17 +75,17 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7 h1:gGBSHPOU7g8YjTbhwn+lvFm2VDEhhA+PwDIlstkgSxE=
github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M=
-github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
+github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
-github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
+github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
-github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 h1:b6uOv7YOFK0TYG7HtkIgExQo+2RdLuwRft63jn2HWj8=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/tchap/go-patricia v2.3.0+incompatible h1:GkY4dP3cEfEASBPPkWd+AmjYxhmDkqO9/zg7R0lSQRs=
@@ -88,9 +93,6 @@ github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/vbatts/tar-split v0.11.1 h1:0Odu65rhcZ3JZaPHxl7tCI3V/C/Q9Zf82UFravl02dE=
github.com/vbatts/tar-split v0.11.1/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g=
-github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
-github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
-github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
@@ -103,22 +105,20 @@ golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73r
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190628185345-da137c7871d7 h1:rTIdg5QFRR7XCaK4LCjBiPbx8j4DQRpdYMnGn/bJUEU=
-golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 h1:rjwSpXsdiK0dV8/Naq3kAw9ymfAeJIyd0upUIElB+lI=
+golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3 h1:7TYNF4UdlohbFwpNH04CoPMp1cHUZgO1Ebq5r2hIjfo=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191115151921-52ab43148777 h1:wejkGHRTr38uaKRqECZlsCsJ1/TGxIyFbH32x5zUdu4=
golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191127021746-63cb32ae39b2 h1:/J2nHFg1MTqaRLFO7M+J78ASNsJoz3r0cvHBPQ77fsE=
golang.org/x/sys v0.0.0-20191127021746-63cb32ae39b2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -126,20 +126,26 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
-gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
+honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go
index a188c510d..90f196371 100644
--- a/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go
+++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go
@@ -63,8 +63,6 @@ func NewPatternMatcher(patterns []string) (*PatternMatcher, error) {
func (pm *PatternMatcher) Matches(file string) (bool, error) {
matched := false
file = filepath.FromSlash(file)
- parentPath := filepath.Dir(file)
- parentPathDirs := strings.Split(parentPath, string(os.PathSeparator))
for _, pattern := range pm.patterns {
negative := false
@@ -78,13 +76,6 @@ func (pm *PatternMatcher) Matches(file string) (bool, error) {
return false, err
}
- if !match && parentPath != "." {
- // Check to see if the pattern matches one of our parent dirs.
- if len(pattern.dirs) <= len(parentPathDirs) {
- match, _ = pattern.match(strings.Join(parentPathDirs[:len(pattern.dirs)], string(os.PathSeparator)))
- }
- }
-
if match {
matched = !negative
}
@@ -122,8 +113,6 @@ func (m *MatchResult) Excludes() uint {
// an error. This method is not safe to be called concurrently.
func (pm *PatternMatcher) MatchesResult(file string) (res *MatchResult, err error) {
file = filepath.FromSlash(file)
- parentPath := filepath.Dir(file)
- parentPathDirs := strings.Split(parentPath, string(os.PathSeparator))
res = &MatchResult{false, 0, 0}
for _, pattern := range pm.patterns {
@@ -138,16 +127,6 @@ func (pm *PatternMatcher) MatchesResult(file string) (res *MatchResult, err erro
return nil, err
}
- if !match && parentPath != "." {
- // Check to see if the pattern matches one of our parent dirs.
- if len(pattern.dirs) <= len(parentPathDirs) {
- match, _ = pattern.match(strings.Join(
- parentPathDirs[:len(pattern.dirs)],
- string(os.PathSeparator)),
- )
- }
- }
-
if match {
res.isMatched = !negative
if negative {
@@ -265,8 +244,7 @@ func (p *Pattern) compile() error {
// in golang's filepath.Match
regStr += bs + string(ch)
} else if ch == '\\' {
- // escape next char. Note that a trailing \ in the pattern
- // will be left alone (but need to escape it)
+ // escape next char.
if sl == bs {
// On windows map "\" to "\\", meaning an escaped backslash,
// and then just continue because filepath.Match on
@@ -277,14 +255,14 @@ func (p *Pattern) compile() error {
if scan.Peek() != scanner.EOF {
regStr += bs + string(scan.Next())
} else {
- regStr += bs
+ return filepath.ErrBadPattern
}
} else {
regStr += string(ch)
}
}
- regStr += "$"
+ regStr += "(/.*)?$"
re, err := regexp.Compile(regStr)
if err != nil {
diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go
index 6b0f55030..9776b2a12 100644
--- a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go
+++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go
@@ -10,6 +10,7 @@ import (
"path/filepath"
"strings"
"sync"
+ "syscall"
"github.com/containers/storage/pkg/system"
"github.com/opencontainers/runc/libcontainer/user"
@@ -26,13 +27,18 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown
// so that we can chown all of them properly at the end. If chownExisting is false, we won't
// chown the full directory path if it exists
var paths []string
- if _, err := os.Stat(path); err != nil && os.IsNotExist(err) {
+ st, err := os.Stat(path)
+ if err != nil && os.IsNotExist(err) {
paths = []string{path}
- } else if err == nil && chownExisting {
- // short-circuit--we were called with an existing directory and chown was requested
- return SafeChown(path, ownerUID, ownerGID)
} else if err == nil {
- // nothing to do; directory path fully exists already and chown was NOT requested
+ if !st.IsDir() {
+ return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR}
+ }
+ if chownExisting {
+ // short-circuit--we were called with an existing directory and chown was requested
+ return SafeChown(path, ownerUID, ownerGID)
+ }
+ // nothing to do; directory exists and chown was NOT requested
return nil
}
@@ -49,7 +55,7 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown
paths = append(paths, dirPath)
}
}
- if err := os.MkdirAll(path, mode); err != nil && !os.IsExist(err) {
+ if err := os.MkdirAll(path, mode); err != nil {
return err
}
} else {
diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go
index 9c8508397..16be94f44 100644
--- a/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go
+++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go
@@ -9,7 +9,7 @@ import (
// Platforms such as Windows do not support the UID/GID concept. So make this
// just a wrapper around system.MkdirAll.
func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error {
- if err := os.MkdirAll(path, mode); err != nil && !os.IsExist(err) {
+ if err := os.MkdirAll(path, mode); err != nil {
return err
}
return nil
diff --git a/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go b/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go
index 0df326b03..a55937b49 100644
--- a/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go
+++ b/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go
@@ -7,26 +7,53 @@ import (
"path/filepath"
)
-// NewAtomicFileWriter returns WriteCloser so that writing to it writes to a
+// AtomicFileWriterOptions specifies options for creating the atomic file writer.
+type AtomicFileWriterOptions struct {
+ // NoSync specifies whether the sync call must be skipped for the file.
+ // If NoSync is not specified, the file is synced to the
+ // storage after it has been written and before it is moved to
+ // the specified path.
+ NoSync bool
+}
+
+var defaultWriterOptions AtomicFileWriterOptions = AtomicFileWriterOptions{}
+
+// SetDefaultOptions overrides the default options used when creating an
+// atomic file writer.
+func SetDefaultOptions(opts AtomicFileWriterOptions) {
+ defaultWriterOptions = opts
+}
+
+// NewAtomicFileWriterWithOpts returns WriteCloser so that writing to it writes to a
// temporary file and closing it atomically changes the temporary file to
// destination path. Writing and closing concurrently is not allowed.
-func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) {
+func NewAtomicFileWriterWithOpts(filename string, perm os.FileMode, opts *AtomicFileWriterOptions) (io.WriteCloser, error) {
f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename))
if err != nil {
return nil, err
}
-
+ if opts == nil {
+ opts = &defaultWriterOptions
+ }
abspath, err := filepath.Abs(filename)
if err != nil {
return nil, err
}
return &atomicFileWriter{
- f: f,
- fn: abspath,
- perm: perm,
+ f: f,
+ fn: abspath,
+ perm: perm,
+ noSync: opts.NoSync,
}, nil
}
+// NewAtomicFileWriter returns WriteCloser so that writing to it writes to a
+// temporary file and closing it atomically changes the temporary file to
+// destination path. Writing and closing concurrently is not allowed.
+func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) {
+ return NewAtomicFileWriterWithOpts(filename, perm, nil)
+}
+
// AtomicWriteFile atomically writes data to a file named by filename.
func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error {
f, err := NewAtomicFileWriter(filename, perm)
@@ -49,6 +76,7 @@ type atomicFileWriter struct {
fn string
writeErr error
perm os.FileMode
+ noSync bool
}
func (w *atomicFileWriter) Write(dt []byte) (int, error) {
@@ -65,9 +93,11 @@ func (w *atomicFileWriter) Close() (retErr error) {
os.Remove(w.f.Name())
}
}()
- if err := fdatasync(w.f); err != nil {
- w.f.Close()
- return err
+ if !w.noSync {
+ if err := fdatasync(w.f); err != nil {
+ w.f.Close()
+ return err
+ }
}
if err := w.f.Close(); err != nil {
return err
diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare.c b/vendor/github.com/containers/storage/pkg/unshare/unshare.c
index 8969191fa..dc7b9d570 100644
--- a/vendor/github.com/containers/storage/pkg/unshare/unshare.c
+++ b/vendor/github.com/containers/storage/pkg/unshare/unshare.c
@@ -15,6 +15,9 @@
#include <termios.h>
#include <errno.h>
#include <unistd.h>
+#include <sys/vfs.h>
+#include <sys/mount.h>
+#include <linux/limits.h>
/* Open Source projects like conda-forge, want to package podman and are based
off of centos:6, Conda-force has minimal libc requirements and is lacking
@@ -151,16 +154,74 @@ static char **parse_proc_stringlist(const char *list) {
return ret;
}
-static int containers_reexec(void) {
- char **argv, *exename;
+/*
+ * Taken from the runc cloned_binary.c file
+ * Copyright (C) 2019 Aleksa Sarai <cyphar@cyphar.com>
+ * Copyright (C) 2019 SUSE LLC
+ *
+ * This work is dual licensed under the following licenses. You may use,
+ * redistribute, and/or modify the work under the conditions of either (or
+ * both) licenses.
+ *
+ * === Apache-2.0 ===
+ */
+static int try_bindfd(void)
+{
+ int fd, ret = -1;
+ char src[PATH_MAX] = {0};
+ char template[64] = {0};
+
+ strncpy(template, "/tmp/containers.XXXXXX", sizeof(template) - 1);
+
+ /*
+ * We need somewhere to mount it, mounting anything over /proc/self is a
+ * BAD idea on the host -- even if we do it temporarily.
+ */
+ fd = mkstemp(template);
+ if (fd < 0)
+ return ret;
+ close(fd);
+
+ ret = -EPERM;
+
+ if (readlink("/proc/self/exe", src, sizeof (src) - 1) < 0)
+ goto out;
+
+ if (mount(src, template, NULL, MS_BIND, NULL) < 0)
+ goto out;
+ if (mount(NULL, template, NULL, MS_REMOUNT | MS_BIND | MS_RDONLY, NULL) < 0)
+ goto out_umount;
+
+ /* Get read-only handle that we're sure can't be made read-write. */
+ ret = open(template, O_PATH | O_CLOEXEC);
+
+out_umount:
+ /*
+ * Make sure the MNT_DETACH works, otherwise we could get remounted
+ * read-write and that would be quite bad (the fd would be made read-write
+ * too, invalidating the protection).
+ */
+ if (umount2(template, MNT_DETACH) < 0) {
+ if (ret >= 0)
+ close(ret);
+ ret = -ENOTRECOVERABLE;
+ }
+
+out:
+ /*
+ * We don't care about unlink errors, the worst that happens is that
+ * there's an empty file left around in STATEDIR.
+ */
+ unlink(template);
+ return ret;
+}
+
+static int copy_self_proc_exe(char **argv) {
+ char *exename;
int fd, mmfd, n_read, n_written;
struct stat st;
char buf[2048];
- argv = parse_proc_stringlist("/proc/self/cmdline");
- if (argv == NULL) {
- return -1;
- }
fd = open("/proc/self/exe", O_RDONLY | O_CLOEXEC);
if (fd == -1) {
fprintf(stderr, "open(\"/proc/self/exe\"): %m\n");
@@ -168,13 +229,14 @@ static int containers_reexec(void) {
}
if (fstat(fd, &st) == -1) {
fprintf(stderr, "fstat(\"/proc/self/exe\"): %m\n");
+ close(fd);
return -1;
}
exename = basename(argv[0]);
mmfd = syscall(SYS_memfd_create, exename, (long) MFD_ALLOW_SEALING | MFD_CLOEXEC);
if (mmfd == -1) {
fprintf(stderr, "memfd_create(): %m\n");
- return -1;
+ goto close_fd;
}
for (;;) {
n_read = read(fd, buf, sizeof(buf));
@@ -188,21 +250,45 @@ static int containers_reexec(void) {
n_written = write(mmfd, buf, n_read);
if (n_written < 0) {
fprintf(stderr, "write(anonfd): %m\n");
- return -1;
+ goto close_fd;
}
if (n_written != n_read) {
fprintf(stderr, "write(anonfd): short write (%d != %d)\n", n_written, n_read);
- return -1;
+ goto close_fd;
}
}
close(fd);
if (fcntl(mmfd, F_ADD_SEALS, F_SEAL_SHRINK | F_SEAL_GROW | F_SEAL_WRITE | F_SEAL_SEAL) == -1) {
- close(mmfd);
- fprintf(stderr, "Error sealing memfd copy: %m\n");
+ fprintf(stderr, "Close_Fd sealing memfd copy: %m\n");
+ goto close_mmfd;
+ }
+
+ return mmfd;
+
+close_fd:
+ close(fd);
+close_mmfd:
+ close(mmfd);
+ return -1;
+}
+static int containers_reexec(int flags) {
+ char **argv;
+ int fd = -1;
+
+ argv = parse_proc_stringlist("/proc/self/cmdline");
+ if (argv == NULL) {
return -1;
}
- if (fexecve(mmfd, argv, environ) == -1) {
- close(mmfd);
+
+ if (flags & CLONE_NEWNS)
+ fd = try_bindfd();
+ if (fd < 0)
+ fd = copy_self_proc_exe(argv);
+ if (fd < 0)
+ return fd;
+
+ if (fexecve(fd, argv, environ) == -1) {
+ close(fd);
fprintf(stderr, "Error during reexec(...): %m\n");
return -1;
}
@@ -282,7 +368,7 @@ void _containers_unshare(void)
_exit(1);
}
}
- if (containers_reexec() != 0) {
+ if (containers_reexec(flags) != 0) {
_exit(1);
}
return;
diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go
index 43b84d769..eaf622f43 100644
--- a/vendor/github.com/containers/storage/store.go
+++ b/vendor/github.com/containers/storage/store.go
@@ -629,14 +629,14 @@ func GetStore(options StoreOptions) (Store, error) {
return nil, errors.Wrap(ErrIncompleteOptions, "no storage runroot specified")
}
- if err := os.MkdirAll(options.RunRoot, 0700); err != nil && !os.IsExist(err) {
+ if err := os.MkdirAll(options.RunRoot, 0700); err != nil {
return nil, err
}
- if err := os.MkdirAll(options.GraphRoot, 0700); err != nil && !os.IsExist(err) {
+ if err := os.MkdirAll(options.GraphRoot, 0700); err != nil {
return nil, err
}
for _, subdir := range []string{"mounts", "tmp", options.GraphDriverName} {
- if err := os.MkdirAll(filepath.Join(options.GraphRoot, subdir), 0700); err != nil && !os.IsExist(err) {
+ if err := os.MkdirAll(filepath.Join(options.GraphRoot, subdir), 0700); err != nil {
return nil, err
}
}
diff --git a/vendor/github.com/containers/storage/userns.go b/vendor/github.com/containers/storage/userns.go
index 18e72efd1..34ff6a77a 100644
--- a/vendor/github.com/containers/storage/userns.go
+++ b/vendor/github.com/containers/storage/userns.go
@@ -113,7 +113,7 @@ func parseMountedFiles(containerMount, passwdFile, groupFile string) uint32 {
size = u.Uid
}
if u.Gid > size {
- size = u.Uid
+ size = u.Gid
}
}
}
diff --git a/vendor/github.com/cri-o/ocicni/pkg/ocicni/ocicni.go b/vendor/github.com/cri-o/ocicni/pkg/ocicni/ocicni.go
index 0cdbf14b7..a8e2fbfa8 100644
--- a/vendor/github.com/cri-o/ocicni/pkg/ocicni/ocicni.go
+++ b/vendor/github.com/cri-o/ocicni/pkg/ocicni/ocicni.go
@@ -3,7 +3,6 @@ package ocicni
import (
"context"
"encoding/json"
- "errors"
"fmt"
"io/ioutil"
"net"
@@ -62,7 +61,7 @@ type cniNetwork struct {
config *libcni.NetworkConfigList
}
-var errMissingDefaultNetwork = errors.New("Missing CNI default network")
+var errMissingDefaultNetwork = "No CNI configuration file in %s. Has your network provider started?"
type podLock struct {
// Count of in-flight operations for this pod; when this reaches zero
@@ -413,7 +412,7 @@ func (plugin *cniNetworkPlugin) getDefaultNetwork() *cniNetwork {
// to attach the pod to.
func (plugin *cniNetworkPlugin) networksAvailable(podNetwork *PodNetwork) error {
if len(podNetwork.Networks) == 0 && plugin.getDefaultNetwork() == nil {
- return errMissingDefaultNetwork
+ return fmt.Errorf(errMissingDefaultNetwork, plugin.confDir)
}
return nil
}
@@ -504,8 +503,8 @@ func (plugin *cniNetworkPlugin) forEachNetwork(podNetwork *PodNetwork, fromCache
var newRt *libcni.RuntimeConf
cniNet, newRt, err = plugin.loadNetworkFromCache(network.Name, rt)
if err != nil {
- logrus.Errorf("error loading cached network config: %v", err)
- // fall back to loading from existing plugins on disk
+ logrus.Debugf("error loading cached network config: %v", err)
+ logrus.Debugf("falling back to loading from existing plugins on disk")
} else {
// Use the updated RuntimeConf
rt = newRt
@@ -854,7 +853,7 @@ func buildCNIRuntimeConf(cacheDir string, podNetwork *PodNetwork, ifName string,
func (plugin *cniNetworkPlugin) Status() error {
if plugin.getDefaultNetwork() == nil {
- return errMissingDefaultNetwork
+ return fmt.Errorf(errMissingDefaultNetwork, plugin.confDir)
}
return nil
}
diff --git a/vendor/github.com/golang/protobuf/proto/buffer.go b/vendor/github.com/golang/protobuf/proto/buffer.go
new file mode 100644
index 000000000..e810e6fea
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/buffer.go
@@ -0,0 +1,324 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "errors"
+ "fmt"
+
+ "google.golang.org/protobuf/encoding/prototext"
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ WireVarint = 0
+ WireFixed32 = 5
+ WireFixed64 = 1
+ WireBytes = 2
+ WireStartGroup = 3
+ WireEndGroup = 4
+)
+
+// EncodeVarint returns the varint encoded bytes of v.
+func EncodeVarint(v uint64) []byte {
+ return protowire.AppendVarint(nil, v)
+}
+
+// SizeVarint returns the length of the varint encoded bytes of v.
+// This is equal to len(EncodeVarint(v)).
+func SizeVarint(v uint64) int {
+ return protowire.SizeVarint(v)
+}
+
+// DecodeVarint parses a varint encoded integer from b,
+// returning the integer value and the length of the varint.
+// It returns (0, 0) if there is a parse error.
+func DecodeVarint(b []byte) (uint64, int) {
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return 0, 0
+ }
+ return v, n
+}
+
+// Buffer is a buffer for encoding and decoding the protobuf wire format.
+// It may be reused between invocations to reduce memory usage.
+type Buffer struct {
+ buf []byte
+ idx int
+ deterministic bool
+}
+
+// NewBuffer allocates a new Buffer initialized with buf,
+// where the contents of buf are considered the unread portion of the buffer.
+func NewBuffer(buf []byte) *Buffer {
+ return &Buffer{buf: buf}
+}
+
+// SetDeterministic specifies whether to use deterministic serialization.
+//
+// Deterministic serialization guarantees that for a given binary, equal
+// messages will always be serialized to the same bytes. This implies:
+//
+// - Repeated serialization of a message will return the same bytes.
+// - Different processes of the same binary (which may be executing on
+// different machines) will serialize equal messages to the same bytes.
+//
+// Note that the deterministic serialization is NOT canonical across
+// languages. It is not guaranteed to remain stable over time. It is unstable
+// across different builds with schema changes due to unknown fields.
+// Users who need canonical serialization (e.g., persistent storage in a
+// canonical form, fingerprinting, etc.) should define their own
+// canonicalization specification and implement their own serializer rather
+// than relying on this API.
+//
+// If deterministic serialization is requested, map entries will be sorted
+// by keys in lexographical order. This is an implementation detail and
+// subject to change.
+func (b *Buffer) SetDeterministic(deterministic bool) {
+ b.deterministic = deterministic
+}
+
+// SetBuf sets buf as the internal buffer,
+// where the contents of buf are considered the unread portion of the buffer.
+func (b *Buffer) SetBuf(buf []byte) {
+ b.buf = buf
+ b.idx = 0
+}
+
+// Reset clears the internal buffer of all written and unread data.
+func (b *Buffer) Reset() {
+ b.buf = b.buf[:0]
+ b.idx = 0
+}
+
+// Bytes returns the internal buffer.
+func (b *Buffer) Bytes() []byte {
+ return b.buf
+}
+
+// Unread returns the unread portion of the buffer.
+func (b *Buffer) Unread() []byte {
+ return b.buf[b.idx:]
+}
+
+// Marshal appends the wire-format encoding of m to the buffer.
+func (b *Buffer) Marshal(m Message) error {
+ var err error
+ b.buf, err = marshalAppend(b.buf, m, b.deterministic)
+ return err
+}
+
+// Unmarshal parses the wire-format message in the buffer and
+// places the decoded results in m.
+// It does not reset m before unmarshaling.
+func (b *Buffer) Unmarshal(m Message) error {
+ err := UnmarshalMerge(b.Unread(), m)
+ b.idx = len(b.buf)
+ return err
+}
+
+type unknownFields struct{ XXX_unrecognized protoimpl.UnknownFields }
+
+func (m *unknownFields) String() string { panic("not implemented") }
+func (m *unknownFields) Reset() { panic("not implemented") }
+func (m *unknownFields) ProtoMessage() { panic("not implemented") }
+
+// DebugPrint dumps the encoded bytes of b with a header and footer including s
+// to stdout. This is only intended for debugging.
+func (*Buffer) DebugPrint(s string, b []byte) {
+ m := MessageReflect(new(unknownFields))
+ m.SetUnknown(b)
+ b, _ = prototext.MarshalOptions{AllowPartial: true, Indent: "\t"}.Marshal(m.Interface())
+ fmt.Printf("==== %s ====\n%s==== %s ====\n", s, b, s)
+}
+
+// EncodeVarint appends an unsigned varint encoding to the buffer.
+func (b *Buffer) EncodeVarint(v uint64) error {
+ b.buf = protowire.AppendVarint(b.buf, v)
+ return nil
+}
+
+// EncodeZigzag32 appends a 32-bit zig-zag varint encoding to the buffer.
+func (b *Buffer) EncodeZigzag32(v uint64) error {
+ return b.EncodeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
+}
+
+// EncodeZigzag64 appends a 64-bit zig-zag varint encoding to the buffer.
+func (b *Buffer) EncodeZigzag64(v uint64) error {
+ return b.EncodeVarint(uint64((uint64(v) << 1) ^ uint64((int64(v) >> 63))))
+}
+
+// EncodeFixed32 appends a 32-bit little-endian integer to the buffer.
+func (b *Buffer) EncodeFixed32(v uint64) error {
+ b.buf = protowire.AppendFixed32(b.buf, uint32(v))
+ return nil
+}
+
+// EncodeFixed64 appends a 64-bit little-endian integer to the buffer.
+func (b *Buffer) EncodeFixed64(v uint64) error {
+ b.buf = protowire.AppendFixed64(b.buf, uint64(v))
+ return nil
+}
+
+// EncodeRawBytes appends a length-prefixed raw bytes to the buffer.
+func (b *Buffer) EncodeRawBytes(v []byte) error {
+ b.buf = protowire.AppendBytes(b.buf, v)
+ return nil
+}
+
+// EncodeStringBytes appends a length-prefixed raw bytes to the buffer.
+// It does not validate whether v contains valid UTF-8.
+func (b *Buffer) EncodeStringBytes(v string) error {
+ b.buf = protowire.AppendString(b.buf, v)
+ return nil
+}
+
+// EncodeMessage appends a length-prefixed encoded message to the buffer.
+func (b *Buffer) EncodeMessage(m Message) error {
+ var err error
+ b.buf = protowire.AppendVarint(b.buf, uint64(Size(m)))
+ b.buf, err = marshalAppend(b.buf, m, b.deterministic)
+ return err
+}
+
+// DecodeVarint consumes an encoded unsigned varint from the buffer.
+func (b *Buffer) DecodeVarint() (uint64, error) {
+ v, n := protowire.ConsumeVarint(b.buf[b.idx:])
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ b.idx += n
+ return uint64(v), nil
+}
+
+// DecodeZigzag32 consumes an encoded 32-bit zig-zag varint from the buffer.
+func (b *Buffer) DecodeZigzag32() (uint64, error) {
+ v, err := b.DecodeVarint()
+ if err != nil {
+ return 0, err
+ }
+ return uint64((uint32(v) >> 1) ^ uint32((int32(v&1)<<31)>>31)), nil
+}
+
+// DecodeZigzag64 consumes an encoded 64-bit zig-zag varint from the buffer.
+func (b *Buffer) DecodeZigzag64() (uint64, error) {
+ v, err := b.DecodeVarint()
+ if err != nil {
+ return 0, err
+ }
+ return uint64((uint64(v) >> 1) ^ uint64((int64(v&1)<<63)>>63)), nil
+}
+
+// DecodeFixed32 consumes a 32-bit little-endian integer from the buffer.
+func (b *Buffer) DecodeFixed32() (uint64, error) {
+ v, n := protowire.ConsumeFixed32(b.buf[b.idx:])
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ b.idx += n
+ return uint64(v), nil
+}
+
+// DecodeFixed64 consumes a 64-bit little-endian integer from the buffer.
+func (b *Buffer) DecodeFixed64() (uint64, error) {
+ v, n := protowire.ConsumeFixed64(b.buf[b.idx:])
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ b.idx += n
+ return uint64(v), nil
+}
+
+// DecodeRawBytes consumes a length-prefixed raw bytes from the buffer.
+// If alloc is specified, it returns a copy the raw bytes
+// rather than a sub-slice of the buffer.
+func (b *Buffer) DecodeRawBytes(alloc bool) ([]byte, error) {
+ v, n := protowire.ConsumeBytes(b.buf[b.idx:])
+ if n < 0 {
+ return nil, protowire.ParseError(n)
+ }
+ b.idx += n
+ if alloc {
+ v = append([]byte(nil), v...)
+ }
+ return v, nil
+}
+
+// DecodeStringBytes consumes a length-prefixed raw bytes from the buffer.
+// It does not validate whether the raw bytes contain valid UTF-8.
+func (b *Buffer) DecodeStringBytes() (string, error) {
+ v, n := protowire.ConsumeString(b.buf[b.idx:])
+ if n < 0 {
+ return "", protowire.ParseError(n)
+ }
+ b.idx += n
+ return v, nil
+}
+
+// DecodeMessage consumes a length-prefixed message from the buffer.
+// It does not reset m before unmarshaling.
+func (b *Buffer) DecodeMessage(m Message) error {
+ v, err := b.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+ return UnmarshalMerge(v, m)
+}
+
+// DecodeGroup consumes a message group from the buffer.
+// It assumes that the start group marker has already been consumed and
+// consumes all bytes until (and including the end group marker).
+// It does not reset m before unmarshaling.
+func (b *Buffer) DecodeGroup(m Message) error {
+ v, n, err := consumeGroup(b.buf[b.idx:])
+ if err != nil {
+ return err
+ }
+ b.idx += n
+ return UnmarshalMerge(v, m)
+}
+
+// consumeGroup parses b until it finds an end group marker, returning
+// the raw bytes of the message (excluding the end group marker) and the
+// the total length of the message (including the end group marker).
+func consumeGroup(b []byte) ([]byte, int, error) {
+ b0 := b
+ depth := 1 // assume this follows a start group marker
+ for {
+ _, wtyp, tagLen := protowire.ConsumeTag(b)
+ if tagLen < 0 {
+ return nil, 0, protowire.ParseError(tagLen)
+ }
+ b = b[tagLen:]
+
+ var valLen int
+ switch wtyp {
+ case protowire.VarintType:
+ _, valLen = protowire.ConsumeVarint(b)
+ case protowire.Fixed32Type:
+ _, valLen = protowire.ConsumeFixed32(b)
+ case protowire.Fixed64Type:
+ _, valLen = protowire.ConsumeFixed64(b)
+ case protowire.BytesType:
+ _, valLen = protowire.ConsumeBytes(b)
+ case protowire.StartGroupType:
+ depth++
+ case protowire.EndGroupType:
+ depth--
+ default:
+ return nil, 0, errors.New("proto: cannot parse reserved wire type")
+ }
+ if valLen < 0 {
+ return nil, 0, protowire.ParseError(valLen)
+ }
+ b = b[valLen:]
+
+ if depth == 0 {
+ return b0[:len(b0)-len(b)-tagLen], len(b0) - len(b), nil
+ }
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go
deleted file mode 100644
index 3cd3249f7..000000000
--- a/vendor/github.com/golang/protobuf/proto/clone.go
+++ /dev/null
@@ -1,253 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2011 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Protocol buffer deep copy and merge.
-// TODO: RawMessage.
-
-package proto
-
-import (
- "fmt"
- "log"
- "reflect"
- "strings"
-)
-
-// Clone returns a deep copy of a protocol buffer.
-func Clone(src Message) Message {
- in := reflect.ValueOf(src)
- if in.IsNil() {
- return src
- }
- out := reflect.New(in.Type().Elem())
- dst := out.Interface().(Message)
- Merge(dst, src)
- return dst
-}
-
-// Merger is the interface representing objects that can merge messages of the same type.
-type Merger interface {
- // Merge merges src into this message.
- // Required and optional fields that are set in src will be set to that value in dst.
- // Elements of repeated fields will be appended.
- //
- // Merge may panic if called with a different argument type than the receiver.
- Merge(src Message)
-}
-
-// generatedMerger is the custom merge method that generated protos will have.
-// We must add this method since a generate Merge method will conflict with
-// many existing protos that have a Merge data field already defined.
-type generatedMerger interface {
- XXX_Merge(src Message)
-}
-
-// Merge merges src into dst.
-// Required and optional fields that are set in src will be set to that value in dst.
-// Elements of repeated fields will be appended.
-// Merge panics if src and dst are not the same type, or if dst is nil.
-func Merge(dst, src Message) {
- if m, ok := dst.(Merger); ok {
- m.Merge(src)
- return
- }
-
- in := reflect.ValueOf(src)
- out := reflect.ValueOf(dst)
- if out.IsNil() {
- panic("proto: nil destination")
- }
- if in.Type() != out.Type() {
- panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src))
- }
- if in.IsNil() {
- return // Merge from nil src is a noop
- }
- if m, ok := dst.(generatedMerger); ok {
- m.XXX_Merge(src)
- return
- }
- mergeStruct(out.Elem(), in.Elem())
-}
-
-func mergeStruct(out, in reflect.Value) {
- sprop := GetProperties(in.Type())
- for i := 0; i < in.NumField(); i++ {
- f := in.Type().Field(i)
- if strings.HasPrefix(f.Name, "XXX_") {
- continue
- }
- mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
- }
-
- if emIn, err := extendable(in.Addr().Interface()); err == nil {
- emOut, _ := extendable(out.Addr().Interface())
- mIn, muIn := emIn.extensionsRead()
- if mIn != nil {
- mOut := emOut.extensionsWrite()
- muIn.Lock()
- mergeExtension(mOut, mIn)
- muIn.Unlock()
- }
- }
-
- uf := in.FieldByName("XXX_unrecognized")
- if !uf.IsValid() {
- return
- }
- uin := uf.Bytes()
- if len(uin) > 0 {
- out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
- }
-}
-
-// mergeAny performs a merge between two values of the same type.
-// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
-// prop is set if this is a struct field (it may be nil).
-func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
- if in.Type() == protoMessageType {
- if !in.IsNil() {
- if out.IsNil() {
- out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
- } else {
- Merge(out.Interface().(Message), in.Interface().(Message))
- }
- }
- return
- }
- switch in.Kind() {
- case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
- reflect.String, reflect.Uint32, reflect.Uint64:
- if !viaPtr && isProto3Zero(in) {
- return
- }
- out.Set(in)
- case reflect.Interface:
- // Probably a oneof field; copy non-nil values.
- if in.IsNil() {
- return
- }
- // Allocate destination if it is not set, or set to a different type.
- // Otherwise we will merge as normal.
- if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
- out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
- }
- mergeAny(out.Elem(), in.Elem(), false, nil)
- case reflect.Map:
- if in.Len() == 0 {
- return
- }
- if out.IsNil() {
- out.Set(reflect.MakeMap(in.Type()))
- }
- // For maps with value types of *T or []byte we need to deep copy each value.
- elemKind := in.Type().Elem().Kind()
- for _, key := range in.MapKeys() {
- var val reflect.Value
- switch elemKind {
- case reflect.Ptr:
- val = reflect.New(in.Type().Elem().Elem())
- mergeAny(val, in.MapIndex(key), false, nil)
- case reflect.Slice:
- val = in.MapIndex(key)
- val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
- default:
- val = in.MapIndex(key)
- }
- out.SetMapIndex(key, val)
- }
- case reflect.Ptr:
- if in.IsNil() {
- return
- }
- if out.IsNil() {
- out.Set(reflect.New(in.Elem().Type()))
- }
- mergeAny(out.Elem(), in.Elem(), true, nil)
- case reflect.Slice:
- if in.IsNil() {
- return
- }
- if in.Type().Elem().Kind() == reflect.Uint8 {
- // []byte is a scalar bytes field, not a repeated field.
-
- // Edge case: if this is in a proto3 message, a zero length
- // bytes field is considered the zero value, and should not
- // be merged.
- if prop != nil && prop.proto3 && in.Len() == 0 {
- return
- }
-
- // Make a deep copy.
- // Append to []byte{} instead of []byte(nil) so that we never end up
- // with a nil result.
- out.SetBytes(append([]byte{}, in.Bytes()...))
- return
- }
- n := in.Len()
- if out.IsNil() {
- out.Set(reflect.MakeSlice(in.Type(), 0, n))
- }
- switch in.Type().Elem().Kind() {
- case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
- reflect.String, reflect.Uint32, reflect.Uint64:
- out.Set(reflect.AppendSlice(out, in))
- default:
- for i := 0; i < n; i++ {
- x := reflect.Indirect(reflect.New(in.Type().Elem()))
- mergeAny(x, in.Index(i), false, nil)
- out.Set(reflect.Append(out, x))
- }
- }
- case reflect.Struct:
- mergeStruct(out, in)
- default:
- // unknown type, so not a protocol buffer
- log.Printf("proto: don't know how to copy %v", in)
- }
-}
-
-func mergeExtension(out, in map[int32]Extension) {
- for extNum, eIn := range in {
- eOut := Extension{desc: eIn.desc}
- if eIn.value != nil {
- v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
- mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
- eOut.value = v.Interface()
- }
- if eIn.enc != nil {
- eOut.enc = make([]byte, len(eIn.enc))
- copy(eOut.enc, eIn.enc)
- }
-
- out[extNum] = eOut
- }
-}
diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go
deleted file mode 100644
index 63b0f08be..000000000
--- a/vendor/github.com/golang/protobuf/proto/decode.go
+++ /dev/null
@@ -1,427 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-/*
- * Routines for decoding protocol buffer data to construct in-memory representations.
- */
-
-import (
- "errors"
- "fmt"
- "io"
-)
-
-// errOverflow is returned when an integer is too large to be represented.
-var errOverflow = errors.New("proto: integer overflow")
-
-// ErrInternalBadWireType is returned by generated code when an incorrect
-// wire type is encountered. It does not get returned to user code.
-var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
-
-// DecodeVarint reads a varint-encoded integer from the slice.
-// It returns the integer and the number of bytes consumed, or
-// zero if there is not enough.
-// This is the format for the
-// int32, int64, uint32, uint64, bool, and enum
-// protocol buffer types.
-func DecodeVarint(buf []byte) (x uint64, n int) {
- for shift := uint(0); shift < 64; shift += 7 {
- if n >= len(buf) {
- return 0, 0
- }
- b := uint64(buf[n])
- n++
- x |= (b & 0x7F) << shift
- if (b & 0x80) == 0 {
- return x, n
- }
- }
-
- // The number is too large to represent in a 64-bit value.
- return 0, 0
-}
-
-func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
- i := p.index
- l := len(p.buf)
-
- for shift := uint(0); shift < 64; shift += 7 {
- if i >= l {
- err = io.ErrUnexpectedEOF
- return
- }
- b := p.buf[i]
- i++
- x |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- p.index = i
- return
- }
- }
-
- // The number is too large to represent in a 64-bit value.
- err = errOverflow
- return
-}
-
-// DecodeVarint reads a varint-encoded integer from the Buffer.
-// This is the format for the
-// int32, int64, uint32, uint64, bool, and enum
-// protocol buffer types.
-func (p *Buffer) DecodeVarint() (x uint64, err error) {
- i := p.index
- buf := p.buf
-
- if i >= len(buf) {
- return 0, io.ErrUnexpectedEOF
- } else if buf[i] < 0x80 {
- p.index++
- return uint64(buf[i]), nil
- } else if len(buf)-i < 10 {
- return p.decodeVarintSlow()
- }
-
- var b uint64
- // we already checked the first byte
- x = uint64(buf[i]) - 0x80
- i++
-
- b = uint64(buf[i])
- i++
- x += b << 7
- if b&0x80 == 0 {
- goto done
- }
- x -= 0x80 << 7
-
- b = uint64(buf[i])
- i++
- x += b << 14
- if b&0x80 == 0 {
- goto done
- }
- x -= 0x80 << 14
-
- b = uint64(buf[i])
- i++
- x += b << 21
- if b&0x80 == 0 {
- goto done
- }
- x -= 0x80 << 21
-
- b = uint64(buf[i])
- i++
- x += b << 28
- if b&0x80 == 0 {
- goto done
- }
- x -= 0x80 << 28
-
- b = uint64(buf[i])
- i++
- x += b << 35
- if b&0x80 == 0 {
- goto done
- }
- x -= 0x80 << 35
-
- b = uint64(buf[i])
- i++
- x += b << 42
- if b&0x80 == 0 {
- goto done
- }
- x -= 0x80 << 42
-
- b = uint64(buf[i])
- i++
- x += b << 49
- if b&0x80 == 0 {
- goto done
- }
- x -= 0x80 << 49
-
- b = uint64(buf[i])
- i++
- x += b << 56
- if b&0x80 == 0 {
- goto done
- }
- x -= 0x80 << 56
-
- b = uint64(buf[i])
- i++
- x += b << 63
- if b&0x80 == 0 {
- goto done
- }
-
- return 0, errOverflow
-
-done:
- p.index = i
- return x, nil
-}
-
-// DecodeFixed64 reads a 64-bit integer from the Buffer.
-// This is the format for the
-// fixed64, sfixed64, and double protocol buffer types.
-func (p *Buffer) DecodeFixed64() (x uint64, err error) {
- // x, err already 0
- i := p.index + 8
- if i < 0 || i > len(p.buf) {
- err = io.ErrUnexpectedEOF
- return
- }
- p.index = i
-
- x = uint64(p.buf[i-8])
- x |= uint64(p.buf[i-7]) << 8
- x |= uint64(p.buf[i-6]) << 16
- x |= uint64(p.buf[i-5]) << 24
- x |= uint64(p.buf[i-4]) << 32
- x |= uint64(p.buf[i-3]) << 40
- x |= uint64(p.buf[i-2]) << 48
- x |= uint64(p.buf[i-1]) << 56
- return
-}
-
-// DecodeFixed32 reads a 32-bit integer from the Buffer.
-// This is the format for the
-// fixed32, sfixed32, and float protocol buffer types.
-func (p *Buffer) DecodeFixed32() (x uint64, err error) {
- // x, err already 0
- i := p.index + 4
- if i < 0 || i > len(p.buf) {
- err = io.ErrUnexpectedEOF
- return
- }
- p.index = i
-
- x = uint64(p.buf[i-4])
- x |= uint64(p.buf[i-3]) << 8
- x |= uint64(p.buf[i-2]) << 16
- x |= uint64(p.buf[i-1]) << 24
- return
-}
-
-// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
-// from the Buffer.
-// This is the format used for the sint64 protocol buffer type.
-func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
- x, err = p.DecodeVarint()
- if err != nil {
- return
- }
- x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
- return
-}
-
-// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
-// from the Buffer.
-// This is the format used for the sint32 protocol buffer type.
-func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
- x, err = p.DecodeVarint()
- if err != nil {
- return
- }
- x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
- return
-}
-
-// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
-// This is the format used for the bytes protocol buffer
-// type and for embedded messages.
-func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
- n, err := p.DecodeVarint()
- if err != nil {
- return nil, err
- }
-
- nb := int(n)
- if nb < 0 {
- return nil, fmt.Errorf("proto: bad byte length %d", nb)
- }
- end := p.index + nb
- if end < p.index || end > len(p.buf) {
- return nil, io.ErrUnexpectedEOF
- }
-
- if !alloc {
- // todo: check if can get more uses of alloc=false
- buf = p.buf[p.index:end]
- p.index += nb
- return
- }
-
- buf = make([]byte, nb)
- copy(buf, p.buf[p.index:])
- p.index += nb
- return
-}
-
-// DecodeStringBytes reads an encoded string from the Buffer.
-// This is the format used for the proto2 string type.
-func (p *Buffer) DecodeStringBytes() (s string, err error) {
- buf, err := p.DecodeRawBytes(false)
- if err != nil {
- return
- }
- return string(buf), nil
-}
-
-// Unmarshaler is the interface representing objects that can
-// unmarshal themselves. The argument points to data that may be
-// overwritten, so implementations should not keep references to the
-// buffer.
-// Unmarshal implementations should not clear the receiver.
-// Any unmarshaled data should be merged into the receiver.
-// Callers of Unmarshal that do not want to retain existing data
-// should Reset the receiver before calling Unmarshal.
-type Unmarshaler interface {
- Unmarshal([]byte) error
-}
-
-// newUnmarshaler is the interface representing objects that can
-// unmarshal themselves. The semantics are identical to Unmarshaler.
-//
-// This exists to support protoc-gen-go generated messages.
-// The proto package will stop type-asserting to this interface in the future.
-//
-// DO NOT DEPEND ON THIS.
-type newUnmarshaler interface {
- XXX_Unmarshal([]byte) error
-}
-
-// Unmarshal parses the protocol buffer representation in buf and places the
-// decoded result in pb. If the struct underlying pb does not match
-// the data in buf, the results can be unpredictable.
-//
-// Unmarshal resets pb before starting to unmarshal, so any
-// existing data in pb is always removed. Use UnmarshalMerge
-// to preserve and append to existing data.
-func Unmarshal(buf []byte, pb Message) error {
- pb.Reset()
- if u, ok := pb.(newUnmarshaler); ok {
- return u.XXX_Unmarshal(buf)
- }
- if u, ok := pb.(Unmarshaler); ok {
- return u.Unmarshal(buf)
- }
- return NewBuffer(buf).Unmarshal(pb)
-}
-
-// UnmarshalMerge parses the protocol buffer representation in buf and
-// writes the decoded result to pb. If the struct underlying pb does not match
-// the data in buf, the results can be unpredictable.
-//
-// UnmarshalMerge merges into existing data in pb.
-// Most code should use Unmarshal instead.
-func UnmarshalMerge(buf []byte, pb Message) error {
- if u, ok := pb.(newUnmarshaler); ok {
- return u.XXX_Unmarshal(buf)
- }
- if u, ok := pb.(Unmarshaler); ok {
- // NOTE: The history of proto have unfortunately been inconsistent
- // whether Unmarshaler should or should not implicitly clear itself.
- // Some implementations do, most do not.
- // Thus, calling this here may or may not do what people want.
- //
- // See https://github.com/golang/protobuf/issues/424
- return u.Unmarshal(buf)
- }
- return NewBuffer(buf).Unmarshal(pb)
-}
-
-// DecodeMessage reads a count-delimited message from the Buffer.
-func (p *Buffer) DecodeMessage(pb Message) error {
- enc, err := p.DecodeRawBytes(false)
- if err != nil {
- return err
- }
- return NewBuffer(enc).Unmarshal(pb)
-}
-
-// DecodeGroup reads a tag-delimited group from the Buffer.
-// StartGroup tag is already consumed. This function consumes
-// EndGroup tag.
-func (p *Buffer) DecodeGroup(pb Message) error {
- b := p.buf[p.index:]
- x, y := findEndGroup(b)
- if x < 0 {
- return io.ErrUnexpectedEOF
- }
- err := Unmarshal(b[:x], pb)
- p.index += y
- return err
-}
-
-// Unmarshal parses the protocol buffer representation in the
-// Buffer and places the decoded result in pb. If the struct
-// underlying pb does not match the data in the buffer, the results can be
-// unpredictable.
-//
-// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
-func (p *Buffer) Unmarshal(pb Message) error {
- // If the object can unmarshal itself, let it.
- if u, ok := pb.(newUnmarshaler); ok {
- err := u.XXX_Unmarshal(p.buf[p.index:])
- p.index = len(p.buf)
- return err
- }
- if u, ok := pb.(Unmarshaler); ok {
- // NOTE: The history of proto have unfortunately been inconsistent
- // whether Unmarshaler should or should not implicitly clear itself.
- // Some implementations do, most do not.
- // Thus, calling this here may or may not do what people want.
- //
- // See https://github.com/golang/protobuf/issues/424
- err := u.Unmarshal(p.buf[p.index:])
- p.index = len(p.buf)
- return err
- }
-
- // Slow workaround for messages that aren't Unmarshalers.
- // This includes some hand-coded .pb.go files and
- // bootstrap protos.
- // TODO: fix all of those and then add Unmarshal to
- // the Message interface. Then:
- // The cast above and code below can be deleted.
- // The old unmarshaler can be deleted.
- // Clients can call Unmarshal directly (can already do that, actually).
- var info InternalMessageInfo
- err := info.Unmarshal(pb, p.buf[p.index:])
- p.index = len(p.buf)
- return err
-}
diff --git a/vendor/github.com/golang/protobuf/proto/defaults.go b/vendor/github.com/golang/protobuf/proto/defaults.go
new file mode 100644
index 000000000..d399bf069
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/defaults.go
@@ -0,0 +1,63 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// SetDefaults sets unpopulated scalar fields to their default values.
+// Fields within a oneof are not set even if they have a default value.
+// SetDefaults is recursively called upon any populated message fields.
+func SetDefaults(m Message) {
+ if m != nil {
+ setDefaults(MessageReflect(m))
+ }
+}
+
+func setDefaults(m protoreflect.Message) {
+ fds := m.Descriptor().Fields()
+ for i := 0; i < fds.Len(); i++ {
+ fd := fds.Get(i)
+ if !m.Has(fd) {
+ if fd.HasDefault() && fd.ContainingOneof() == nil {
+ v := fd.Default()
+ if fd.Kind() == protoreflect.BytesKind {
+ v = protoreflect.ValueOf(append([]byte(nil), v.Bytes()...)) // copy the default bytes
+ }
+ m.Set(fd, v)
+ }
+ continue
+ }
+ }
+
+ m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+ switch {
+ // Handle singular message.
+ case fd.Cardinality() != protoreflect.Repeated:
+ if fd.Message() != nil {
+ setDefaults(m.Get(fd).Message())
+ }
+ // Handle list of messages.
+ case fd.IsList():
+ if fd.Message() != nil {
+ ls := m.Get(fd).List()
+ for i := 0; i < ls.Len(); i++ {
+ setDefaults(ls.Get(i).Message())
+ }
+ }
+ // Handle map of messages.
+ case fd.IsMap():
+ if fd.MapValue().Message() != nil {
+ ms := m.Get(fd).Map()
+ ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
+ setDefaults(v.Message())
+ return true
+ })
+ }
+ }
+ return true
+ })
+}
diff --git a/vendor/github.com/golang/protobuf/proto/deprecated.go b/vendor/github.com/golang/protobuf/proto/deprecated.go
index 35b882c09..e8db57e09 100644
--- a/vendor/github.com/golang/protobuf/proto/deprecated.go
+++ b/vendor/github.com/golang/protobuf/proto/deprecated.go
@@ -1,63 +1,113 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2018 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
package proto
-import "errors"
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "strconv"
-// Deprecated: do not use.
+ protoV2 "google.golang.org/protobuf/proto"
+)
+
+var (
+ // Deprecated: No longer returned.
+ ErrNil = errors.New("proto: Marshal called with nil")
+
+ // Deprecated: No longer returned.
+ ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
+
+ // Deprecated: No longer returned.
+ ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
+)
+
+// Deprecated: Do not use.
type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
-// Deprecated: do not use.
+// Deprecated: Do not use.
func GetStats() Stats { return Stats{} }
-// Deprecated: do not use.
+// Deprecated: Do not use.
func MarshalMessageSet(interface{}) ([]byte, error) {
return nil, errors.New("proto: not implemented")
}
-// Deprecated: do not use.
+// Deprecated: Do not use.
func UnmarshalMessageSet([]byte, interface{}) error {
return errors.New("proto: not implemented")
}
-// Deprecated: do not use.
+// Deprecated: Do not use.
func MarshalMessageSetJSON(interface{}) ([]byte, error) {
return nil, errors.New("proto: not implemented")
}
-// Deprecated: do not use.
+// Deprecated: Do not use.
func UnmarshalMessageSetJSON([]byte, interface{}) error {
return errors.New("proto: not implemented")
}
-// Deprecated: do not use.
+// Deprecated: Do not use.
func RegisterMessageSetType(Message, int32, string) {}
+
+// Deprecated: Do not use.
+func EnumName(m map[int32]string, v int32) string {
+ s, ok := m[v]
+ if ok {
+ return s
+ }
+ return strconv.Itoa(int(v))
+}
+
+// Deprecated: Do not use.
+func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
+ if data[0] == '"' {
+ // New style: enums are strings.
+ var repr string
+ if err := json.Unmarshal(data, &repr); err != nil {
+ return -1, err
+ }
+ val, ok := m[repr]
+ if !ok {
+ return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
+ }
+ return val, nil
+ }
+ // Old style: enums are ints.
+ var val int32
+ if err := json.Unmarshal(data, &val); err != nil {
+ return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
+ }
+ return val, nil
+}
+
+// Deprecated: Do not use; this type existed for intenal-use only.
+type InternalMessageInfo struct{}
+
+// Deprecated: Do not use; this method existed for intenal-use only.
+func (*InternalMessageInfo) DiscardUnknown(m Message) {
+ DiscardUnknown(m)
+}
+
+// Deprecated: Do not use; this method existed for intenal-use only.
+func (*InternalMessageInfo) Marshal(b []byte, m Message, deterministic bool) ([]byte, error) {
+ return protoV2.MarshalOptions{Deterministic: deterministic}.MarshalAppend(b, MessageV2(m))
+}
+
+// Deprecated: Do not use; this method existed for intenal-use only.
+func (*InternalMessageInfo) Merge(dst, src Message) {
+ protoV2.Merge(MessageV2(dst), MessageV2(src))
+}
+
+// Deprecated: Do not use; this method existed for intenal-use only.
+func (*InternalMessageInfo) Size(m Message) int {
+ return protoV2.Size(MessageV2(m))
+}
+
+// Deprecated: Do not use; this method existed for intenal-use only.
+func (*InternalMessageInfo) Unmarshal(m Message, b []byte) error {
+ return protoV2.UnmarshalOptions{Merge: true}.Unmarshal(b, MessageV2(m))
+}
diff --git a/vendor/github.com/golang/protobuf/proto/discard.go b/vendor/github.com/golang/protobuf/proto/discard.go
index dea2617ce..2187e877f 100644
--- a/vendor/github.com/golang/protobuf/proto/discard.go
+++ b/vendor/github.com/golang/protobuf/proto/discard.go
@@ -1,48 +1,13 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2017 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
package proto
import (
- "fmt"
- "reflect"
- "strings"
- "sync"
- "sync/atomic"
+ "google.golang.org/protobuf/reflect/protoreflect"
)
-type generatedDiscarder interface {
- XXX_DiscardUnknown()
-}
-
// DiscardUnknown recursively discards all unknown fields from this message
// and all embedded messages.
//
@@ -51,300 +16,43 @@ type generatedDiscarder interface {
// marshal to be able to produce a message that continues to have those
// unrecognized fields. To avoid this, DiscardUnknown is used to
// explicitly clear the unknown fields after unmarshaling.
-//
-// For proto2 messages, the unknown fields of message extensions are only
-// discarded from messages that have been accessed via GetExtension.
func DiscardUnknown(m Message) {
- if m, ok := m.(generatedDiscarder); ok {
- m.XXX_DiscardUnknown()
- return
- }
- // TODO: Dynamically populate a InternalMessageInfo for legacy messages,
- // but the master branch has no implementation for InternalMessageInfo,
- // so it would be more work to replicate that approach.
- discardLegacy(m)
-}
-
-// DiscardUnknown recursively discards all unknown fields.
-func (a *InternalMessageInfo) DiscardUnknown(m Message) {
- di := atomicLoadDiscardInfo(&a.discard)
- if di == nil {
- di = getDiscardInfo(reflect.TypeOf(m).Elem())
- atomicStoreDiscardInfo(&a.discard, di)
- }
- di.discard(toPointer(&m))
-}
-
-type discardInfo struct {
- typ reflect.Type
-
- initialized int32 // 0: only typ is valid, 1: everything is valid
- lock sync.Mutex
-
- fields []discardFieldInfo
- unrecognized field
-}
-
-type discardFieldInfo struct {
- field field // Offset of field, guaranteed to be valid
- discard func(src pointer)
-}
-
-var (
- discardInfoMap = map[reflect.Type]*discardInfo{}
- discardInfoLock sync.Mutex
-)
-
-func getDiscardInfo(t reflect.Type) *discardInfo {
- discardInfoLock.Lock()
- defer discardInfoLock.Unlock()
- di := discardInfoMap[t]
- if di == nil {
- di = &discardInfo{typ: t}
- discardInfoMap[t] = di
+ if m != nil {
+ discardUnknown(MessageReflect(m))
}
- return di
}
-func (di *discardInfo) discard(src pointer) {
- if src.isNil() {
- return // Nothing to do.
- }
-
- if atomic.LoadInt32(&di.initialized) == 0 {
- di.computeDiscardInfo()
- }
-
- for _, fi := range di.fields {
- sfp := src.offset(fi.field)
- fi.discard(sfp)
- }
-
- // For proto2 messages, only discard unknown fields in message extensions
- // that have been accessed via GetExtension.
- if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil {
- // Ignore lock since DiscardUnknown is not concurrency safe.
- emm, _ := em.extensionsRead()
- for _, mx := range emm {
- if m, ok := mx.value.(Message); ok {
- DiscardUnknown(m)
+func discardUnknown(m protoreflect.Message) {
+ m.Range(func(fd protoreflect.FieldDescriptor, val protoreflect.Value) bool {
+ switch {
+ // Handle singular message.
+ case fd.Cardinality() != protoreflect.Repeated:
+ if fd.Message() != nil {
+ discardUnknown(m.Get(fd).Message())
}
- }
- }
-
- if di.unrecognized.IsValid() {
- *src.offset(di.unrecognized).toBytes() = nil
- }
-}
-
-func (di *discardInfo) computeDiscardInfo() {
- di.lock.Lock()
- defer di.lock.Unlock()
- if di.initialized != 0 {
- return
- }
- t := di.typ
- n := t.NumField()
-
- for i := 0; i < n; i++ {
- f := t.Field(i)
- if strings.HasPrefix(f.Name, "XXX_") {
- continue
- }
-
- dfi := discardFieldInfo{field: toField(&f)}
- tf := f.Type
-
- // Unwrap tf to get its most basic type.
- var isPointer, isSlice bool
- if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
- isSlice = true
- tf = tf.Elem()
- }
- if tf.Kind() == reflect.Ptr {
- isPointer = true
- tf = tf.Elem()
- }
- if isPointer && isSlice && tf.Kind() != reflect.Struct {
- panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name))
- }
-
- switch tf.Kind() {
- case reflect.Struct:
- switch {
- case !isPointer:
- panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name))
- case isSlice: // E.g., []*pb.T
- di := getDiscardInfo(tf)
- dfi.discard = func(src pointer) {
- sps := src.getPointerSlice()
- for _, sp := range sps {
- if !sp.isNil() {
- di.discard(sp)
- }
- }
- }
- default: // E.g., *pb.T
- di := getDiscardInfo(tf)
- dfi.discard = func(src pointer) {
- sp := src.getPointer()
- if !sp.isNil() {
- di.discard(sp)
- }
+ // Handle list of messages.
+ case fd.IsList():
+ if fd.Message() != nil {
+ ls := m.Get(fd).List()
+ for i := 0; i < ls.Len(); i++ {
+ discardUnknown(ls.Get(i).Message())
}
}
- case reflect.Map:
- switch {
- case isPointer || isSlice:
- panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name))
- default: // E.g., map[K]V
- if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T)
- dfi.discard = func(src pointer) {
- sm := src.asPointerTo(tf).Elem()
- if sm.Len() == 0 {
- return
- }
- for _, key := range sm.MapKeys() {
- val := sm.MapIndex(key)
- DiscardUnknown(val.Interface().(Message))
- }
- }
- } else {
- dfi.discard = func(pointer) {} // Noop
- }
- }
- case reflect.Interface:
- // Must be oneof field.
- switch {
- case isPointer || isSlice:
- panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name))
- default: // E.g., interface{}
- // TODO: Make this faster?
- dfi.discard = func(src pointer) {
- su := src.asPointerTo(tf).Elem()
- if !su.IsNil() {
- sv := su.Elem().Elem().Field(0)
- if sv.Kind() == reflect.Ptr && sv.IsNil() {
- return
- }
- switch sv.Type().Kind() {
- case reflect.Ptr: // Proto struct (e.g., *T)
- DiscardUnknown(sv.Interface().(Message))
- }
- }
- }
+ // Handle map of messages.
+ case fd.IsMap():
+ if fd.MapValue().Message() != nil {
+ ms := m.Get(fd).Map()
+ ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
+ discardUnknown(v.Message())
+ return true
+ })
}
- default:
- continue
- }
- di.fields = append(di.fields, dfi)
- }
-
- di.unrecognized = invalidField
- if f, ok := t.FieldByName("XXX_unrecognized"); ok {
- if f.Type != reflect.TypeOf([]byte{}) {
- panic("expected XXX_unrecognized to be of type []byte")
- }
- di.unrecognized = toField(&f)
- }
-
- atomic.StoreInt32(&di.initialized, 1)
-}
-
-func discardLegacy(m Message) {
- v := reflect.ValueOf(m)
- if v.Kind() != reflect.Ptr || v.IsNil() {
- return
- }
- v = v.Elem()
- if v.Kind() != reflect.Struct {
- return
- }
- t := v.Type()
-
- for i := 0; i < v.NumField(); i++ {
- f := t.Field(i)
- if strings.HasPrefix(f.Name, "XXX_") {
- continue
}
- vf := v.Field(i)
- tf := f.Type
+ return true
+ })
- // Unwrap tf to get its most basic type.
- var isPointer, isSlice bool
- if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
- isSlice = true
- tf = tf.Elem()
- }
- if tf.Kind() == reflect.Ptr {
- isPointer = true
- tf = tf.Elem()
- }
- if isPointer && isSlice && tf.Kind() != reflect.Struct {
- panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name))
- }
-
- switch tf.Kind() {
- case reflect.Struct:
- switch {
- case !isPointer:
- panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name))
- case isSlice: // E.g., []*pb.T
- for j := 0; j < vf.Len(); j++ {
- discardLegacy(vf.Index(j).Interface().(Message))
- }
- default: // E.g., *pb.T
- discardLegacy(vf.Interface().(Message))
- }
- case reflect.Map:
- switch {
- case isPointer || isSlice:
- panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name))
- default: // E.g., map[K]V
- tv := vf.Type().Elem()
- if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T)
- for _, key := range vf.MapKeys() {
- val := vf.MapIndex(key)
- discardLegacy(val.Interface().(Message))
- }
- }
- }
- case reflect.Interface:
- // Must be oneof field.
- switch {
- case isPointer || isSlice:
- panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name))
- default: // E.g., test_proto.isCommunique_Union interface
- if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" {
- vf = vf.Elem() // E.g., *test_proto.Communique_Msg
- if !vf.IsNil() {
- vf = vf.Elem() // E.g., test_proto.Communique_Msg
- vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value
- if vf.Kind() == reflect.Ptr {
- discardLegacy(vf.Interface().(Message))
- }
- }
- }
- }
- }
- }
-
- if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() {
- if vf.Type() != reflect.TypeOf([]byte{}) {
- panic("expected XXX_unrecognized to be of type []byte")
- }
- vf.Set(reflect.ValueOf([]byte(nil)))
- }
-
- // For proto2 messages, only discard unknown fields in message extensions
- // that have been accessed via GetExtension.
- if em, err := extendable(m); err == nil {
- // Ignore lock since discardLegacy is not concurrency safe.
- emm, _ := em.extensionsRead()
- for _, mx := range emm {
- if m, ok := mx.value.(Message); ok {
- discardLegacy(m)
- }
- }
+ // Discard unknown fields.
+ if len(m.GetUnknown()) > 0 {
+ m.SetUnknown(nil)
}
}
diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go
deleted file mode 100644
index 3abfed2cf..000000000
--- a/vendor/github.com/golang/protobuf/proto/encode.go
+++ /dev/null
@@ -1,203 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-/*
- * Routines for encoding data into the wire format for protocol buffers.
- */
-
-import (
- "errors"
- "reflect"
-)
-
-var (
- // errRepeatedHasNil is the error returned if Marshal is called with
- // a struct with a repeated field containing a nil element.
- errRepeatedHasNil = errors.New("proto: repeated field has nil element")
-
- // errOneofHasNil is the error returned if Marshal is called with
- // a struct with a oneof field containing a nil element.
- errOneofHasNil = errors.New("proto: oneof field has nil value")
-
- // ErrNil is the error returned if Marshal is called with nil.
- ErrNil = errors.New("proto: Marshal called with nil")
-
- // ErrTooLarge is the error returned if Marshal is called with a
- // message that encodes to >2GB.
- ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
-)
-
-// The fundamental encoders that put bytes on the wire.
-// Those that take integer types all accept uint64 and are
-// therefore of type valueEncoder.
-
-const maxVarintBytes = 10 // maximum length of a varint
-
-// EncodeVarint returns the varint encoding of x.
-// This is the format for the
-// int32, int64, uint32, uint64, bool, and enum
-// protocol buffer types.
-// Not used by the package itself, but helpful to clients
-// wishing to use the same encoding.
-func EncodeVarint(x uint64) []byte {
- var buf [maxVarintBytes]byte
- var n int
- for n = 0; x > 127; n++ {
- buf[n] = 0x80 | uint8(x&0x7F)
- x >>= 7
- }
- buf[n] = uint8(x)
- n++
- return buf[0:n]
-}
-
-// EncodeVarint writes a varint-encoded integer to the Buffer.
-// This is the format for the
-// int32, int64, uint32, uint64, bool, and enum
-// protocol buffer types.
-func (p *Buffer) EncodeVarint(x uint64) error {
- for x >= 1<<7 {
- p.buf = append(p.buf, uint8(x&0x7f|0x80))
- x >>= 7
- }
- p.buf = append(p.buf, uint8(x))
- return nil
-}
-
-// SizeVarint returns the varint encoding size of an integer.
-func SizeVarint(x uint64) int {
- switch {
- case x < 1<<7:
- return 1
- case x < 1<<14:
- return 2
- case x < 1<<21:
- return 3
- case x < 1<<28:
- return 4
- case x < 1<<35:
- return 5
- case x < 1<<42:
- return 6
- case x < 1<<49:
- return 7
- case x < 1<<56:
- return 8
- case x < 1<<63:
- return 9
- }
- return 10
-}
-
-// EncodeFixed64 writes a 64-bit integer to the Buffer.
-// This is the format for the
-// fixed64, sfixed64, and double protocol buffer types.
-func (p *Buffer) EncodeFixed64(x uint64) error {
- p.buf = append(p.buf,
- uint8(x),
- uint8(x>>8),
- uint8(x>>16),
- uint8(x>>24),
- uint8(x>>32),
- uint8(x>>40),
- uint8(x>>48),
- uint8(x>>56))
- return nil
-}
-
-// EncodeFixed32 writes a 32-bit integer to the Buffer.
-// This is the format for the
-// fixed32, sfixed32, and float protocol buffer types.
-func (p *Buffer) EncodeFixed32(x uint64) error {
- p.buf = append(p.buf,
- uint8(x),
- uint8(x>>8),
- uint8(x>>16),
- uint8(x>>24))
- return nil
-}
-
-// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
-// to the Buffer.
-// This is the format used for the sint64 protocol buffer type.
-func (p *Buffer) EncodeZigzag64(x uint64) error {
- // use signed number to get arithmetic right shift.
- return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-
-// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
-// to the Buffer.
-// This is the format used for the sint32 protocol buffer type.
-func (p *Buffer) EncodeZigzag32(x uint64) error {
- // use signed number to get arithmetic right shift.
- return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
-}
-
-// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
-// This is the format used for the bytes protocol buffer
-// type and for embedded messages.
-func (p *Buffer) EncodeRawBytes(b []byte) error {
- p.EncodeVarint(uint64(len(b)))
- p.buf = append(p.buf, b...)
- return nil
-}
-
-// EncodeStringBytes writes an encoded string to the Buffer.
-// This is the format used for the proto2 string type.
-func (p *Buffer) EncodeStringBytes(s string) error {
- p.EncodeVarint(uint64(len(s)))
- p.buf = append(p.buf, s...)
- return nil
-}
-
-// Marshaler is the interface representing objects that can marshal themselves.
-type Marshaler interface {
- Marshal() ([]byte, error)
-}
-
-// EncodeMessage writes the protocol buffer to the Buffer,
-// prefixed by a varint-encoded length.
-func (p *Buffer) EncodeMessage(pb Message) error {
- siz := Size(pb)
- p.EncodeVarint(uint64(siz))
- return p.Marshal(pb)
-}
-
-// All protocol buffer fields are nillable, but be careful.
-func isNil(v reflect.Value) bool {
- switch v.Kind() {
- case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
- return v.IsNil()
- }
- return false
-}
diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go
deleted file mode 100644
index f9b6e41b3..000000000
--- a/vendor/github.com/golang/protobuf/proto/equal.go
+++ /dev/null
@@ -1,301 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2011 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Protocol buffer comparison.
-
-package proto
-
-import (
- "bytes"
- "log"
- "reflect"
- "strings"
-)
-
-/*
-Equal returns true iff protocol buffers a and b are equal.
-The arguments must both be pointers to protocol buffer structs.
-
-Equality is defined in this way:
- - Two messages are equal iff they are the same type,
- corresponding fields are equal, unknown field sets
- are equal, and extensions sets are equal.
- - Two set scalar fields are equal iff their values are equal.
- If the fields are of a floating-point type, remember that
- NaN != x for all x, including NaN. If the message is defined
- in a proto3 .proto file, fields are not "set"; specifically,
- zero length proto3 "bytes" fields are equal (nil == {}).
- - Two repeated fields are equal iff their lengths are the same,
- and their corresponding elements are equal. Note a "bytes" field,
- although represented by []byte, is not a repeated field and the
- rule for the scalar fields described above applies.
- - Two unset fields are equal.
- - Two unknown field sets are equal if their current
- encoded state is equal.
- - Two extension sets are equal iff they have corresponding
- elements that are pairwise equal.
- - Two map fields are equal iff their lengths are the same,
- and they contain the same set of elements. Zero-length map
- fields are equal.
- - Every other combination of things are not equal.
-
-The return value is undefined if a and b are not protocol buffers.
-*/
-func Equal(a, b Message) bool {
- if a == nil || b == nil {
- return a == b
- }
- v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
- if v1.Type() != v2.Type() {
- return false
- }
- if v1.Kind() == reflect.Ptr {
- if v1.IsNil() {
- return v2.IsNil()
- }
- if v2.IsNil() {
- return false
- }
- v1, v2 = v1.Elem(), v2.Elem()
- }
- if v1.Kind() != reflect.Struct {
- return false
- }
- return equalStruct(v1, v2)
-}
-
-// v1 and v2 are known to have the same type.
-func equalStruct(v1, v2 reflect.Value) bool {
- sprop := GetProperties(v1.Type())
- for i := 0; i < v1.NumField(); i++ {
- f := v1.Type().Field(i)
- if strings.HasPrefix(f.Name, "XXX_") {
- continue
- }
- f1, f2 := v1.Field(i), v2.Field(i)
- if f.Type.Kind() == reflect.Ptr {
- if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
- // both unset
- continue
- } else if n1 != n2 {
- // set/unset mismatch
- return false
- }
- f1, f2 = f1.Elem(), f2.Elem()
- }
- if !equalAny(f1, f2, sprop.Prop[i]) {
- return false
- }
- }
-
- if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() {
- em2 := v2.FieldByName("XXX_InternalExtensions")
- if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) {
- return false
- }
- }
-
- if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
- em2 := v2.FieldByName("XXX_extensions")
- if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
- return false
- }
- }
-
- uf := v1.FieldByName("XXX_unrecognized")
- if !uf.IsValid() {
- return true
- }
-
- u1 := uf.Bytes()
- u2 := v2.FieldByName("XXX_unrecognized").Bytes()
- return bytes.Equal(u1, u2)
-}
-
-// v1 and v2 are known to have the same type.
-// prop may be nil.
-func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
- if v1.Type() == protoMessageType {
- m1, _ := v1.Interface().(Message)
- m2, _ := v2.Interface().(Message)
- return Equal(m1, m2)
- }
- switch v1.Kind() {
- case reflect.Bool:
- return v1.Bool() == v2.Bool()
- case reflect.Float32, reflect.Float64:
- return v1.Float() == v2.Float()
- case reflect.Int32, reflect.Int64:
- return v1.Int() == v2.Int()
- case reflect.Interface:
- // Probably a oneof field; compare the inner values.
- n1, n2 := v1.IsNil(), v2.IsNil()
- if n1 || n2 {
- return n1 == n2
- }
- e1, e2 := v1.Elem(), v2.Elem()
- if e1.Type() != e2.Type() {
- return false
- }
- return equalAny(e1, e2, nil)
- case reflect.Map:
- if v1.Len() != v2.Len() {
- return false
- }
- for _, key := range v1.MapKeys() {
- val2 := v2.MapIndex(key)
- if !val2.IsValid() {
- // This key was not found in the second map.
- return false
- }
- if !equalAny(v1.MapIndex(key), val2, nil) {
- return false
- }
- }
- return true
- case reflect.Ptr:
- // Maps may have nil values in them, so check for nil.
- if v1.IsNil() && v2.IsNil() {
- return true
- }
- if v1.IsNil() != v2.IsNil() {
- return false
- }
- return equalAny(v1.Elem(), v2.Elem(), prop)
- case reflect.Slice:
- if v1.Type().Elem().Kind() == reflect.Uint8 {
- // short circuit: []byte
-
- // Edge case: if this is in a proto3 message, a zero length
- // bytes field is considered the zero value.
- if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
- return true
- }
- if v1.IsNil() != v2.IsNil() {
- return false
- }
- return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
- }
-
- if v1.Len() != v2.Len() {
- return false
- }
- for i := 0; i < v1.Len(); i++ {
- if !equalAny(v1.Index(i), v2.Index(i), prop) {
- return false
- }
- }
- return true
- case reflect.String:
- return v1.Interface().(string) == v2.Interface().(string)
- case reflect.Struct:
- return equalStruct(v1, v2)
- case reflect.Uint32, reflect.Uint64:
- return v1.Uint() == v2.Uint()
- }
-
- // unknown type, so not a protocol buffer
- log.Printf("proto: don't know how to compare %v", v1)
- return false
-}
-
-// base is the struct type that the extensions are based on.
-// x1 and x2 are InternalExtensions.
-func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool {
- em1, _ := x1.extensionsRead()
- em2, _ := x2.extensionsRead()
- return equalExtMap(base, em1, em2)
-}
-
-func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
- if len(em1) != len(em2) {
- return false
- }
-
- for extNum, e1 := range em1 {
- e2, ok := em2[extNum]
- if !ok {
- return false
- }
-
- m1 := extensionAsLegacyType(e1.value)
- m2 := extensionAsLegacyType(e2.value)
-
- if m1 == nil && m2 == nil {
- // Both have only encoded form.
- if bytes.Equal(e1.enc, e2.enc) {
- continue
- }
- // The bytes are different, but the extensions might still be
- // equal. We need to decode them to compare.
- }
-
- if m1 != nil && m2 != nil {
- // Both are unencoded.
- if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
- return false
- }
- continue
- }
-
- // At least one is encoded. To do a semantically correct comparison
- // we need to unmarshal them first.
- var desc *ExtensionDesc
- if m := extensionMaps[base]; m != nil {
- desc = m[extNum]
- }
- if desc == nil {
- // If both have only encoded form and the bytes are the same,
- // it is handled above. We get here when the bytes are different.
- // We don't know how to decode it, so just compare them as byte
- // slices.
- log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
- return false
- }
- var err error
- if m1 == nil {
- m1, err = decodeExtension(e1.enc, desc)
- }
- if m2 == nil && err == nil {
- m2, err = decodeExtension(e2.enc, desc)
- }
- if err != nil {
- // The encoded form is invalid.
- log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
- return false
- }
- if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
- return false
- }
- }
-
- return true
-}
diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go
index fa88add30..42fc120c9 100644
--- a/vendor/github.com/golang/protobuf/proto/extensions.go
+++ b/vendor/github.com/golang/protobuf/proto/extensions.go
@@ -1,607 +1,356 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
package proto
-/*
- * Types and routines for supporting protocol buffer extensions.
- */
-
import (
"errors"
"fmt"
- "io"
"reflect"
- "strconv"
- "sync"
-)
-
-// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
-var ErrMissingExtension = errors.New("proto: missing extension")
-
-// ExtensionRange represents a range of message extensions for a protocol buffer.
-// Used in code generated by the protocol compiler.
-type ExtensionRange struct {
- Start, End int32 // both inclusive
-}
-
-// extendableProto is an interface implemented by any protocol buffer generated by the current
-// proto compiler that may be extended.
-type extendableProto interface {
- Message
- ExtensionRangeArray() []ExtensionRange
- extensionsWrite() map[int32]Extension
- extensionsRead() (map[int32]Extension, sync.Locker)
-}
-
-// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous
-// version of the proto compiler that may be extended.
-type extendableProtoV1 interface {
- Message
- ExtensionRangeArray() []ExtensionRange
- ExtensionMap() map[int32]Extension
-}
-// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
-type extensionAdapter struct {
- extendableProtoV1
-}
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+ "google.golang.org/protobuf/runtime/protoiface"
+ "google.golang.org/protobuf/runtime/protoimpl"
+)
-func (e extensionAdapter) extensionsWrite() map[int32]Extension {
- return e.ExtensionMap()
-}
+type (
+ // ExtensionDesc represents an extension descriptor and
+ // is used to interact with an extension field in a message.
+ //
+ // Variables of this type are generated in code by protoc-gen-go.
+ ExtensionDesc = protoimpl.ExtensionInfo
-func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
- return e.ExtensionMap(), notLocker{}
-}
+ // ExtensionRange represents a range of message extensions.
+ // Used in code generated by protoc-gen-go.
+ ExtensionRange = protoiface.ExtensionRangeV1
-// notLocker is a sync.Locker whose Lock and Unlock methods are nops.
-type notLocker struct{}
+ // Deprecated: Do not use; this is an internal type.
+ Extension = protoimpl.ExtensionFieldV1
-func (n notLocker) Lock() {}
-func (n notLocker) Unlock() {}
+ // Deprecated: Do not use; this is an internal type.
+ XXX_InternalExtensions = protoimpl.ExtensionFields
+)
-// extendable returns the extendableProto interface for the given generated proto message.
-// If the proto message has the old extension format, it returns a wrapper that implements
-// the extendableProto interface.
-func extendable(p interface{}) (extendableProto, error) {
- switch p := p.(type) {
- case extendableProto:
- if isNilPtr(p) {
- return nil, fmt.Errorf("proto: nil %T is not extendable", p)
- }
- return p, nil
- case extendableProtoV1:
- if isNilPtr(p) {
- return nil, fmt.Errorf("proto: nil %T is not extendable", p)
- }
- return extensionAdapter{p}, nil
- }
- // Don't allocate a specific error containing %T:
- // this is the hot path for Clone and MarshalText.
- return nil, errNotExtendable
-}
+// ErrMissingExtension reports whether the extension was not present.
+var ErrMissingExtension = errors.New("proto: missing extension")
var errNotExtendable = errors.New("proto: not an extendable proto.Message")
-func isNilPtr(x interface{}) bool {
- v := reflect.ValueOf(x)
- return v.Kind() == reflect.Ptr && v.IsNil()
-}
-
-// XXX_InternalExtensions is an internal representation of proto extensions.
-//
-// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,
-// thus gaining the unexported 'extensions' method, which can be called only from the proto package.
-//
-// The methods of XXX_InternalExtensions are not concurrency safe in general,
-// but calls to logically read-only methods such as has and get may be executed concurrently.
-type XXX_InternalExtensions struct {
- // The struct must be indirect so that if a user inadvertently copies a
- // generated message and its embedded XXX_InternalExtensions, they
- // avoid the mayhem of a copied mutex.
- //
- // The mutex serializes all logically read-only operations to p.extensionMap.
- // It is up to the client to ensure that write operations to p.extensionMap are
- // mutually exclusive with other accesses.
- p *struct {
- mu sync.Mutex
- extensionMap map[int32]Extension
+// HasExtension reports whether the extension field is present in m
+// either as an explicitly populated field or as an unknown field.
+func HasExtension(m Message, xt *ExtensionDesc) (has bool) {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() {
+ return false
}
-}
-// extensionsWrite returns the extension map, creating it on first use.
-func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension {
- if e.p == nil {
- e.p = new(struct {
- mu sync.Mutex
- extensionMap map[int32]Extension
+ // Check whether any populated known field matches the field number.
+ xtd := xt.TypeDescriptor()
+ if isValidExtension(mr.Descriptor(), xtd) {
+ has = mr.Has(xtd)
+ } else {
+ mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
+ has = int32(fd.Number()) == xt.Field
+ return !has
})
- e.p.extensionMap = make(map[int32]Extension)
}
- return e.p.extensionMap
-}
-// extensionsRead returns the extensions map for read-only use. It may be nil.
-// The caller must hold the returned mutex's lock when accessing Elements within the map.
-func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) {
- if e.p == nil {
- return nil, nil
+ // Check whether any unknown field matches the field number.
+ for b := mr.GetUnknown(); !has && len(b) > 0; {
+ num, _, n := protowire.ConsumeField(b)
+ has = int32(num) == xt.Field
+ b = b[n:]
}
- return e.p.extensionMap, &e.p.mu
-}
-
-// ExtensionDesc represents an extension specification.
-// Used in generated code from the protocol compiler.
-type ExtensionDesc struct {
- ExtendedType Message // nil pointer to the type that is being extended
- ExtensionType interface{} // nil pointer to the extension type
- Field int32 // field number
- Name string // fully-qualified name of extension, for text formatting
- Tag string // protobuf tag style
- Filename string // name of the file in which the extension is defined
-}
-
-func (ed *ExtensionDesc) repeated() bool {
- t := reflect.TypeOf(ed.ExtensionType)
- return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
-}
-
-// Extension represents an extension in a message.
-type Extension struct {
- // When an extension is stored in a message using SetExtension
- // only desc and value are set. When the message is marshaled
- // enc will be set to the encoded form of the message.
- //
- // When a message is unmarshaled and contains extensions, each
- // extension will have only enc set. When such an extension is
- // accessed using GetExtension (or GetExtensions) desc and value
- // will be set.
- desc *ExtensionDesc
-
- // value is a concrete value for the extension field. Let the type of
- // desc.ExtensionType be the "API type" and the type of Extension.value
- // be the "storage type". The API type and storage type are the same except:
- // * For scalars (except []byte), the API type uses *T,
- // while the storage type uses T.
- // * For repeated fields, the API type uses []T, while the storage type
- // uses *[]T.
- //
- // The reason for the divergence is so that the storage type more naturally
- // matches what is expected of when retrieving the values through the
- // protobuf reflection APIs.
- //
- // The value may only be populated if desc is also populated.
- value interface{}
-
- // enc is the raw bytes for the extension field.
- enc []byte
+ return has
}
-// SetRawExtension is for testing only.
-func SetRawExtension(base Message, id int32, b []byte) {
- epb, err := extendable(base)
- if err != nil {
+// ClearExtension removes the extension field from m
+// either as an explicitly populated field or as an unknown field.
+func ClearExtension(m Message, xt *ExtensionDesc) {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() {
return
}
- extmap := epb.extensionsWrite()
- extmap[id] = Extension{enc: b}
-}
-// isExtensionField returns true iff the given field number is in an extension range.
-func isExtensionField(pb extendableProto, field int32) bool {
- for _, er := range pb.ExtensionRangeArray() {
- if er.Start <= field && field <= er.End {
+ xtd := xt.TypeDescriptor()
+ if isValidExtension(mr.Descriptor(), xtd) {
+ mr.Clear(xtd)
+ } else {
+ mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
+ if int32(fd.Number()) == xt.Field {
+ mr.Clear(fd)
+ return false
+ }
return true
- }
- }
- return false
-}
-
-// checkExtensionTypes checks that the given extension is valid for pb.
-func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
- var pbi interface{} = pb
- // Check the extended type.
- if ea, ok := pbi.(extensionAdapter); ok {
- pbi = ea.extendableProtoV1
- }
- if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
- return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a)
- }
- // Check the range.
- if !isExtensionField(pb, extension.Field) {
- return errors.New("proto: bad extension number; not in declared ranges")
- }
- return nil
-}
-
-// extPropKey is sufficient to uniquely identify an extension.
-type extPropKey struct {
- base reflect.Type
- field int32
-}
-
-var extProp = struct {
- sync.RWMutex
- m map[extPropKey]*Properties
-}{
- m: make(map[extPropKey]*Properties),
-}
-
-func extensionProperties(ed *ExtensionDesc) *Properties {
- key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
-
- extProp.RLock()
- if prop, ok := extProp.m[key]; ok {
- extProp.RUnlock()
- return prop
- }
- extProp.RUnlock()
-
- extProp.Lock()
- defer extProp.Unlock()
- // Check again.
- if prop, ok := extProp.m[key]; ok {
- return prop
- }
-
- prop := new(Properties)
- prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
- extProp.m[key] = prop
- return prop
-}
-
-// HasExtension returns whether the given extension is present in pb.
-func HasExtension(pb Message, extension *ExtensionDesc) bool {
- // TODO: Check types, field numbers, etc.?
- epb, err := extendable(pb)
- if err != nil {
- return false
- }
- extmap, mu := epb.extensionsRead()
- if extmap == nil {
- return false
+ })
}
- mu.Lock()
- _, ok := extmap[extension.Field]
- mu.Unlock()
- return ok
+ clearUnknown(mr, fieldNum(xt.Field))
}
-// ClearExtension removes the given extension from pb.
-func ClearExtension(pb Message, extension *ExtensionDesc) {
- epb, err := extendable(pb)
- if err != nil {
+// ClearAllExtensions clears all extensions from m.
+// This includes populated fields and unknown fields in the extension range.
+func ClearAllExtensions(m Message) {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() {
return
}
- // TODO: Check types, field numbers, etc.?
- extmap := epb.extensionsWrite()
- delete(extmap, extension.Field)
+
+ mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
+ if fd.IsExtension() {
+ mr.Clear(fd)
+ }
+ return true
+ })
+ clearUnknown(mr, mr.Descriptor().ExtensionRanges())
}
-// GetExtension retrieves a proto2 extended field from pb.
+// GetExtension retrieves a proto2 extended field from m.
//
// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
// then GetExtension parses the encoded field and returns a Go value of the specified type.
// If the field is not present, then the default value is returned (if one is specified),
// otherwise ErrMissingExtension is reported.
//
-// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil),
-// then GetExtension returns the raw encoded bytes of the field extension.
-func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
- epb, err := extendable(pb)
- if err != nil {
- return nil, err
- }
-
- if extension.ExtendedType != nil {
- // can only check type if this is a complete descriptor
- if err := checkExtensionTypes(epb, extension); err != nil {
- return nil, err
+// If the descriptor is type incomplete (i.e., ExtensionDesc.ExtensionType is nil),
+// then GetExtension returns the raw encoded bytes for the extension field.
+func GetExtension(m Message, xt *ExtensionDesc) (interface{}, error) {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
+ return nil, errNotExtendable
+ }
+
+ // Retrieve the unknown fields for this extension field.
+ var bo protoreflect.RawFields
+ for bi := mr.GetUnknown(); len(bi) > 0; {
+ num, _, n := protowire.ConsumeField(bi)
+ if int32(num) == xt.Field {
+ bo = append(bo, bi[:n]...)
}
+ bi = bi[n:]
}
- emap, mu := epb.extensionsRead()
- if emap == nil {
- return defaultExtensionValue(extension)
- }
- mu.Lock()
- defer mu.Unlock()
- e, ok := emap[extension.Field]
- if !ok {
- // defaultExtensionValue returns the default value or
- // ErrMissingExtension if there is no default.
- return defaultExtensionValue(extension)
- }
-
- if e.value != nil {
- // Already decoded. Check the descriptor, though.
- if e.desc != extension {
- // This shouldn't happen. If it does, it means that
- // GetExtension was called twice with two different
- // descriptors with the same field number.
- return nil, errors.New("proto: descriptor conflict")
- }
- return extensionAsLegacyType(e.value), nil
+ // For type incomplete descriptors, only retrieve the unknown fields.
+ if xt.ExtensionType == nil {
+ return []byte(bo), nil
}
- if extension.ExtensionType == nil {
- // incomplete descriptor
- return e.enc, nil
+ // If the extension field only exists as unknown fields, unmarshal it.
+ // This is rarely done since proto.Unmarshal eagerly unmarshals extensions.
+ xtd := xt.TypeDescriptor()
+ if !isValidExtension(mr.Descriptor(), xtd) {
+ return nil, fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m)
}
-
- v, err := decodeExtension(e.enc, extension)
- if err != nil {
- return nil, err
+ if !mr.Has(xtd) && len(bo) > 0 {
+ m2 := mr.New()
+ if err := (proto.UnmarshalOptions{
+ Resolver: extensionResolver{xt},
+ }.Unmarshal(bo, m2.Interface())); err != nil {
+ return nil, err
+ }
+ if m2.Has(xtd) {
+ mr.Set(xtd, m2.Get(xtd))
+ clearUnknown(mr, fieldNum(xt.Field))
+ }
}
- // Remember the decoded version and drop the encoded version.
- // That way it is safe to mutate what we return.
- e.value = extensionAsStorageType(v)
- e.desc = extension
- e.enc = nil
- emap[extension.Field] = e
- return extensionAsLegacyType(e.value), nil
-}
-
-// defaultExtensionValue returns the default value for extension.
-// If no default for an extension is defined ErrMissingExtension is returned.
-func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
- if extension.ExtensionType == nil {
- // incomplete descriptor, so no default
+ // Check whether the message has the extension field set or a default.
+ var pv protoreflect.Value
+ switch {
+ case mr.Has(xtd):
+ pv = mr.Get(xtd)
+ case xtd.HasDefault():
+ pv = xtd.Default()
+ default:
return nil, ErrMissingExtension
}
- t := reflect.TypeOf(extension.ExtensionType)
- props := extensionProperties(extension)
-
- sf, _, err := fieldDefault(t, props)
- if err != nil {
- return nil, err
- }
-
- if sf == nil || sf.value == nil {
- // There is no default value.
- return nil, ErrMissingExtension
+ v := xt.InterfaceOf(pv)
+ rv := reflect.ValueOf(v)
+ if isScalarKind(rv.Kind()) {
+ rv2 := reflect.New(rv.Type())
+ rv2.Elem().Set(rv)
+ v = rv2.Interface()
}
+ return v, nil
+}
- if t.Kind() != reflect.Ptr {
- // We do not need to return a Ptr, we can directly return sf.value.
- return sf.value, nil
- }
+// extensionResolver is a custom extension resolver that stores a single
+// extension type that takes precedence over the global registry.
+type extensionResolver struct{ xt protoreflect.ExtensionType }
- // We need to return an interface{} that is a pointer to sf.value.
- value := reflect.New(t).Elem()
- value.Set(reflect.New(value.Type().Elem()))
- if sf.kind == reflect.Int32 {
- // We may have an int32 or an enum, but the underlying data is int32.
- // Since we can't set an int32 into a non int32 reflect.value directly
- // set it as a int32.
- value.Elem().SetInt(int64(sf.value.(int32)))
- } else {
- value.Elem().Set(reflect.ValueOf(sf.value))
+func (r extensionResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
+ if xtd := r.xt.TypeDescriptor(); xtd.FullName() == field {
+ return r.xt, nil
}
- return value.Interface(), nil
+ return protoregistry.GlobalTypes.FindExtensionByName(field)
}
-// decodeExtension decodes an extension encoded in b.
-func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
- t := reflect.TypeOf(extension.ExtensionType)
- unmarshal := typeUnmarshaler(t, extension.Tag)
-
- // t is a pointer to a struct, pointer to basic type or a slice.
- // Allocate space to store the pointer/slice.
- value := reflect.New(t).Elem()
-
- var err error
- for {
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- wire := int(x) & 7
-
- b, err = unmarshal(b, valToPointer(value.Addr()), wire)
- if err != nil {
- return nil, err
- }
-
- if len(b) == 0 {
- break
- }
+func (r extensionResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
+ if xtd := r.xt.TypeDescriptor(); xtd.ContainingMessage().FullName() == message && xtd.Number() == field {
+ return r.xt, nil
}
- return value.Interface(), nil
+ return protoregistry.GlobalTypes.FindExtensionByNumber(message, field)
}
-// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
-// The returned slice has the same length as es; missing extensions will appear as nil elements.
-func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
- epb, err := extendable(pb)
- if err != nil {
- return nil, err
+// GetExtensions returns a list of the extensions values present in m,
+// corresponding with the provided list of extension descriptors, xts.
+// If an extension is missing in m, the corresponding value is nil.
+func GetExtensions(m Message, xts []*ExtensionDesc) ([]interface{}, error) {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() {
+ return nil, errNotExtendable
}
- extensions = make([]interface{}, len(es))
- for i, e := range es {
- extensions[i], err = GetExtension(epb, e)
- if err == ErrMissingExtension {
- err = nil
- }
+
+ vs := make([]interface{}, len(xts))
+ for i, xt := range xts {
+ v, err := GetExtension(m, xt)
if err != nil {
- return
+ if err == ErrMissingExtension {
+ continue
+ }
+ return vs, err
}
+ vs[i] = v
}
- return
+ return vs, nil
}
-// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
-// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
-// just the Field field, which defines the extension's field number.
-func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
- epb, err := extendable(pb)
- if err != nil {
- return nil, err
+// SetExtension sets an extension field in m to the provided value.
+func SetExtension(m Message, xt *ExtensionDesc, v interface{}) error {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
+ return errNotExtendable
}
- registeredExtensions := RegisteredExtensions(pb)
- emap, mu := epb.extensionsRead()
- if emap == nil {
- return nil, nil
+ rv := reflect.ValueOf(v)
+ if reflect.TypeOf(v) != reflect.TypeOf(xt.ExtensionType) {
+ return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", v, xt.ExtensionType)
}
- mu.Lock()
- defer mu.Unlock()
- extensions := make([]*ExtensionDesc, 0, len(emap))
- for extid, e := range emap {
- desc := e.desc
- if desc == nil {
- desc = registeredExtensions[extid]
- if desc == nil {
- desc = &ExtensionDesc{Field: extid}
- }
+ if rv.Kind() == reflect.Ptr {
+ if rv.IsNil() {
+ return fmt.Errorf("proto: SetExtension called with nil value of type %T", v)
+ }
+ if isScalarKind(rv.Elem().Kind()) {
+ v = rv.Elem().Interface()
}
-
- extensions = append(extensions, desc)
}
- return extensions, nil
-}
-// SetExtension sets the specified extension of pb to the specified value.
-func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
- epb, err := extendable(pb)
- if err != nil {
- return err
- }
- if err := checkExtensionTypes(epb, extension); err != nil {
- return err
- }
- typ := reflect.TypeOf(extension.ExtensionType)
- if typ != reflect.TypeOf(value) {
- return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType)
+ xtd := xt.TypeDescriptor()
+ if !isValidExtension(mr.Descriptor(), xtd) {
+ return fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m)
}
- // nil extension values need to be caught early, because the
- // encoder can't distinguish an ErrNil due to a nil extension
- // from an ErrNil due to a missing field. Extensions are
- // always optional, so the encoder would just swallow the error
- // and drop all the extensions from the encoded message.
- if reflect.ValueOf(value).IsNil() {
- return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
- }
-
- extmap := epb.extensionsWrite()
- extmap[extension.Field] = Extension{desc: extension, value: extensionAsStorageType(value)}
+ mr.Set(xtd, xt.ValueOf(v))
+ clearUnknown(mr, fieldNum(xt.Field))
return nil
}
-// ClearAllExtensions clears all extensions from pb.
-func ClearAllExtensions(pb Message) {
- epb, err := extendable(pb)
- if err != nil {
+// SetRawExtension inserts b into the unknown fields of m.
+//
+// Deprecated: Use Message.ProtoReflect.SetUnknown instead.
+func SetRawExtension(m Message, fnum int32, b []byte) {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() {
return
}
- m := epb.extensionsWrite()
- for k := range m {
- delete(m, k)
+
+ // Verify that the raw field is valid.
+ for b0 := b; len(b0) > 0; {
+ num, _, n := protowire.ConsumeField(b0)
+ if int32(num) != fnum {
+ panic(fmt.Sprintf("mismatching field number: got %d, want %d", num, fnum))
+ }
+ b0 = b0[n:]
}
-}
-// A global registry of extensions.
-// The generated code will register the generated descriptors by calling RegisterExtension.
+ ClearExtension(m, &ExtensionDesc{Field: fnum})
+ mr.SetUnknown(append(mr.GetUnknown(), b...))
+}
-var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
+// ExtensionDescs returns a list of extension descriptors found in m,
+// containing descriptors for both populated extension fields in m and
+// also unknown fields of m that are in the extension range.
+// For the later case, an type incomplete descriptor is provided where only
+// the ExtensionDesc.Field field is populated.
+// The order of the extension descriptors is undefined.
+func ExtensionDescs(m Message) ([]*ExtensionDesc, error) {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
+ return nil, errNotExtendable
+ }
-// RegisterExtension is called from the generated code.
-func RegisterExtension(desc *ExtensionDesc) {
- st := reflect.TypeOf(desc.ExtendedType).Elem()
- m := extensionMaps[st]
- if m == nil {
- m = make(map[int32]*ExtensionDesc)
- extensionMaps[st] = m
+ // Collect a set of known extension descriptors.
+ extDescs := make(map[protoreflect.FieldNumber]*ExtensionDesc)
+ mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+ if fd.IsExtension() {
+ xt := fd.(protoreflect.ExtensionTypeDescriptor)
+ if xd, ok := xt.Type().(*ExtensionDesc); ok {
+ extDescs[fd.Number()] = xd
+ }
+ }
+ return true
+ })
+
+ // Collect a set of unknown extension descriptors.
+ extRanges := mr.Descriptor().ExtensionRanges()
+ for b := mr.GetUnknown(); len(b) > 0; {
+ num, _, n := protowire.ConsumeField(b)
+ if extRanges.Has(num) && extDescs[num] == nil {
+ extDescs[num] = nil
+ }
+ b = b[n:]
}
- if _, ok := m[desc.Field]; ok {
- panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
+
+ // Transpose the set of descriptors into a list.
+ var xts []*ExtensionDesc
+ for num, xt := range extDescs {
+ if xt == nil {
+ xt = &ExtensionDesc{Field: int32(num)}
+ }
+ xts = append(xts, xt)
}
- m[desc.Field] = desc
+ return xts, nil
}
-// RegisteredExtensions returns a map of the registered extensions of a
-// protocol buffer struct, indexed by the extension number.
-// The argument pb should be a nil pointer to the struct type.
-func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
- return extensionMaps[reflect.TypeOf(pb).Elem()]
+// isValidExtension reports whether xtd is a valid extension descriptor for md.
+func isValidExtension(md protoreflect.MessageDescriptor, xtd protoreflect.ExtensionTypeDescriptor) bool {
+ return xtd.ContainingMessage() == md && md.ExtensionRanges().Has(xtd.Number())
}
-// extensionAsLegacyType converts an value in the storage type as the API type.
-// See Extension.value.
-func extensionAsLegacyType(v interface{}) interface{} {
- switch rv := reflect.ValueOf(v); rv.Kind() {
+// isScalarKind reports whether k is a protobuf scalar kind (except bytes).
+// This function exists for historical reasons since the representation of
+// scalars differs between v1 and v2, where v1 uses *T and v2 uses T.
+func isScalarKind(k reflect.Kind) bool {
+ switch k {
case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
- // Represent primitive types as a pointer to the value.
- rv2 := reflect.New(rv.Type())
- rv2.Elem().Set(rv)
- v = rv2.Interface()
- case reflect.Ptr:
- // Represent slice types as the value itself.
- switch rv.Type().Elem().Kind() {
- case reflect.Slice:
- if rv.IsNil() {
- v = reflect.Zero(rv.Type().Elem()).Interface()
- } else {
- v = rv.Elem().Interface()
- }
- }
+ return true
+ default:
+ return false
}
- return v
}
-// extensionAsStorageType converts an value in the API type as the storage type.
-// See Extension.value.
-func extensionAsStorageType(v interface{}) interface{} {
- switch rv := reflect.ValueOf(v); rv.Kind() {
- case reflect.Ptr:
- // Represent slice types as the value itself.
- switch rv.Type().Elem().Kind() {
- case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
- if rv.IsNil() {
- v = reflect.Zero(rv.Type().Elem()).Interface()
- } else {
- v = rv.Elem().Interface()
- }
- }
- case reflect.Slice:
- // Represent slice types as a pointer to the value.
- if rv.Type().Elem().Kind() != reflect.Uint8 {
- rv2 := reflect.New(rv.Type())
- rv2.Elem().Set(rv)
- v = rv2.Interface()
+// clearUnknown removes unknown fields from m where remover.Has reports true.
+func clearUnknown(m protoreflect.Message, remover interface {
+ Has(protoreflect.FieldNumber) bool
+}) {
+ var bo protoreflect.RawFields
+ for bi := m.GetUnknown(); len(bi) > 0; {
+ num, _, n := protowire.ConsumeField(bi)
+ if !remover.Has(num) {
+ bo = append(bo, bi[:n]...)
}
+ bi = bi[n:]
}
- return v
+ if bi := m.GetUnknown(); len(bi) != len(bo) {
+ m.SetUnknown(bo)
+ }
+}
+
+type fieldNum protoreflect.FieldNumber
+
+func (n1 fieldNum) Has(n2 protoreflect.FieldNumber) bool {
+ return protoreflect.FieldNumber(n1) == n2
}
diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go
deleted file mode 100644
index fdd328bb7..000000000
--- a/vendor/github.com/golang/protobuf/proto/lib.go
+++ /dev/null
@@ -1,965 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-/*
-Package proto converts data structures to and from the wire format of
-protocol buffers. It works in concert with the Go source code generated
-for .proto files by the protocol compiler.
-
-A summary of the properties of the protocol buffer interface
-for a protocol buffer variable v:
-
- - Names are turned from camel_case to CamelCase for export.
- - There are no methods on v to set fields; just treat
- them as structure fields.
- - There are getters that return a field's value if set,
- and return the field's default value if unset.
- The getters work even if the receiver is a nil message.
- - The zero value for a struct is its correct initialization state.
- All desired fields must be set before marshaling.
- - A Reset() method will restore a protobuf struct to its zero state.
- - Non-repeated fields are pointers to the values; nil means unset.
- That is, optional or required field int32 f becomes F *int32.
- - Repeated fields are slices.
- - Helper functions are available to aid the setting of fields.
- msg.Foo = proto.String("hello") // set field
- - Constants are defined to hold the default values of all fields that
- have them. They have the form Default_StructName_FieldName.
- Because the getter methods handle defaulted values,
- direct use of these constants should be rare.
- - Enums are given type names and maps from names to values.
- Enum values are prefixed by the enclosing message's name, or by the
- enum's type name if it is a top-level enum. Enum types have a String
- method, and a Enum method to assist in message construction.
- - Nested messages, groups and enums have type names prefixed with the name of
- the surrounding message type.
- - Extensions are given descriptor names that start with E_,
- followed by an underscore-delimited list of the nested messages
- that contain it (if any) followed by the CamelCased name of the
- extension field itself. HasExtension, ClearExtension, GetExtension
- and SetExtension are functions for manipulating extensions.
- - Oneof field sets are given a single field in their message,
- with distinguished wrapper types for each possible field value.
- - Marshal and Unmarshal are functions to encode and decode the wire format.
-
-When the .proto file specifies `syntax="proto3"`, there are some differences:
-
- - Non-repeated fields of non-message type are values instead of pointers.
- - Enum types do not get an Enum method.
-
-The simplest way to describe this is to see an example.
-Given file test.proto, containing
-
- package example;
-
- enum FOO { X = 17; }
-
- message Test {
- required string label = 1;
- optional int32 type = 2 [default=77];
- repeated int64 reps = 3;
- optional group OptionalGroup = 4 {
- required string RequiredField = 5;
- }
- oneof union {
- int32 number = 6;
- string name = 7;
- }
- }
-
-The resulting file, test.pb.go, is:
-
- package example
-
- import proto "github.com/golang/protobuf/proto"
- import math "math"
-
- type FOO int32
- const (
- FOO_X FOO = 17
- )
- var FOO_name = map[int32]string{
- 17: "X",
- }
- var FOO_value = map[string]int32{
- "X": 17,
- }
-
- func (x FOO) Enum() *FOO {
- p := new(FOO)
- *p = x
- return p
- }
- func (x FOO) String() string {
- return proto.EnumName(FOO_name, int32(x))
- }
- func (x *FOO) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(FOO_value, data)
- if err != nil {
- return err
- }
- *x = FOO(value)
- return nil
- }
-
- type Test struct {
- Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
- Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
- Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
- Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
- // Types that are valid to be assigned to Union:
- // *Test_Number
- // *Test_Name
- Union isTest_Union `protobuf_oneof:"union"`
- XXX_unrecognized []byte `json:"-"`
- }
- func (m *Test) Reset() { *m = Test{} }
- func (m *Test) String() string { return proto.CompactTextString(m) }
- func (*Test) ProtoMessage() {}
-
- type isTest_Union interface {
- isTest_Union()
- }
-
- type Test_Number struct {
- Number int32 `protobuf:"varint,6,opt,name=number"`
- }
- type Test_Name struct {
- Name string `protobuf:"bytes,7,opt,name=name"`
- }
-
- func (*Test_Number) isTest_Union() {}
- func (*Test_Name) isTest_Union() {}
-
- func (m *Test) GetUnion() isTest_Union {
- if m != nil {
- return m.Union
- }
- return nil
- }
- const Default_Test_Type int32 = 77
-
- func (m *Test) GetLabel() string {
- if m != nil && m.Label != nil {
- return *m.Label
- }
- return ""
- }
-
- func (m *Test) GetType() int32 {
- if m != nil && m.Type != nil {
- return *m.Type
- }
- return Default_Test_Type
- }
-
- func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
- if m != nil {
- return m.Optionalgroup
- }
- return nil
- }
-
- type Test_OptionalGroup struct {
- RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
- }
- func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} }
- func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
-
- func (m *Test_OptionalGroup) GetRequiredField() string {
- if m != nil && m.RequiredField != nil {
- return *m.RequiredField
- }
- return ""
- }
-
- func (m *Test) GetNumber() int32 {
- if x, ok := m.GetUnion().(*Test_Number); ok {
- return x.Number
- }
- return 0
- }
-
- func (m *Test) GetName() string {
- if x, ok := m.GetUnion().(*Test_Name); ok {
- return x.Name
- }
- return ""
- }
-
- func init() {
- proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
- }
-
-To create and play with a Test object:
-
- package main
-
- import (
- "log"
-
- "github.com/golang/protobuf/proto"
- pb "./example.pb"
- )
-
- func main() {
- test := &pb.Test{
- Label: proto.String("hello"),
- Type: proto.Int32(17),
- Reps: []int64{1, 2, 3},
- Optionalgroup: &pb.Test_OptionalGroup{
- RequiredField: proto.String("good bye"),
- },
- Union: &pb.Test_Name{"fred"},
- }
- data, err := proto.Marshal(test)
- if err != nil {
- log.Fatal("marshaling error: ", err)
- }
- newTest := &pb.Test{}
- err = proto.Unmarshal(data, newTest)
- if err != nil {
- log.Fatal("unmarshaling error: ", err)
- }
- // Now test and newTest contain the same data.
- if test.GetLabel() != newTest.GetLabel() {
- log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
- }
- // Use a type switch to determine which oneof was set.
- switch u := test.Union.(type) {
- case *pb.Test_Number: // u.Number contains the number.
- case *pb.Test_Name: // u.Name contains the string.
- }
- // etc.
- }
-*/
-package proto
-
-import (
- "encoding/json"
- "fmt"
- "log"
- "reflect"
- "sort"
- "strconv"
- "sync"
-)
-
-// RequiredNotSetError is an error type returned by either Marshal or Unmarshal.
-// Marshal reports this when a required field is not initialized.
-// Unmarshal reports this when a required field is missing from the wire data.
-type RequiredNotSetError struct{ field string }
-
-func (e *RequiredNotSetError) Error() string {
- if e.field == "" {
- return fmt.Sprintf("proto: required field not set")
- }
- return fmt.Sprintf("proto: required field %q not set", e.field)
-}
-func (e *RequiredNotSetError) RequiredNotSet() bool {
- return true
-}
-
-type invalidUTF8Error struct{ field string }
-
-func (e *invalidUTF8Error) Error() string {
- if e.field == "" {
- return "proto: invalid UTF-8 detected"
- }
- return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field)
-}
-func (e *invalidUTF8Error) InvalidUTF8() bool {
- return true
-}
-
-// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8.
-// This error should not be exposed to the external API as such errors should
-// be recreated with the field information.
-var errInvalidUTF8 = &invalidUTF8Error{}
-
-// isNonFatal reports whether the error is either a RequiredNotSet error
-// or a InvalidUTF8 error.
-func isNonFatal(err error) bool {
- if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() {
- return true
- }
- if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() {
- return true
- }
- return false
-}
-
-type nonFatal struct{ E error }
-
-// Merge merges err into nf and reports whether it was successful.
-// Otherwise it returns false for any fatal non-nil errors.
-func (nf *nonFatal) Merge(err error) (ok bool) {
- if err == nil {
- return true // not an error
- }
- if !isNonFatal(err) {
- return false // fatal error
- }
- if nf.E == nil {
- nf.E = err // store first instance of non-fatal error
- }
- return true
-}
-
-// Message is implemented by generated protocol buffer messages.
-type Message interface {
- Reset()
- String() string
- ProtoMessage()
-}
-
-// A Buffer is a buffer manager for marshaling and unmarshaling
-// protocol buffers. It may be reused between invocations to
-// reduce memory usage. It is not necessary to use a Buffer;
-// the global functions Marshal and Unmarshal create a
-// temporary Buffer and are fine for most applications.
-type Buffer struct {
- buf []byte // encode/decode byte stream
- index int // read point
-
- deterministic bool
-}
-
-// NewBuffer allocates a new Buffer and initializes its internal data to
-// the contents of the argument slice.
-func NewBuffer(e []byte) *Buffer {
- return &Buffer{buf: e}
-}
-
-// Reset resets the Buffer, ready for marshaling a new protocol buffer.
-func (p *Buffer) Reset() {
- p.buf = p.buf[0:0] // for reading/writing
- p.index = 0 // for reading
-}
-
-// SetBuf replaces the internal buffer with the slice,
-// ready for unmarshaling the contents of the slice.
-func (p *Buffer) SetBuf(s []byte) {
- p.buf = s
- p.index = 0
-}
-
-// Bytes returns the contents of the Buffer.
-func (p *Buffer) Bytes() []byte { return p.buf }
-
-// SetDeterministic sets whether to use deterministic serialization.
-//
-// Deterministic serialization guarantees that for a given binary, equal
-// messages will always be serialized to the same bytes. This implies:
-//
-// - Repeated serialization of a message will return the same bytes.
-// - Different processes of the same binary (which may be executing on
-// different machines) will serialize equal messages to the same bytes.
-//
-// Note that the deterministic serialization is NOT canonical across
-// languages. It is not guaranteed to remain stable over time. It is unstable
-// across different builds with schema changes due to unknown fields.
-// Users who need canonical serialization (e.g., persistent storage in a
-// canonical form, fingerprinting, etc.) should define their own
-// canonicalization specification and implement their own serializer rather
-// than relying on this API.
-//
-// If deterministic serialization is requested, map entries will be sorted
-// by keys in lexographical order. This is an implementation detail and
-// subject to change.
-func (p *Buffer) SetDeterministic(deterministic bool) {
- p.deterministic = deterministic
-}
-
-/*
- * Helper routines for simplifying the creation of optional fields of basic type.
- */
-
-// Bool is a helper routine that allocates a new bool value
-// to store v and returns a pointer to it.
-func Bool(v bool) *bool {
- return &v
-}
-
-// Int32 is a helper routine that allocates a new int32 value
-// to store v and returns a pointer to it.
-func Int32(v int32) *int32 {
- return &v
-}
-
-// Int is a helper routine that allocates a new int32 value
-// to store v and returns a pointer to it, but unlike Int32
-// its argument value is an int.
-func Int(v int) *int32 {
- p := new(int32)
- *p = int32(v)
- return p
-}
-
-// Int64 is a helper routine that allocates a new int64 value
-// to store v and returns a pointer to it.
-func Int64(v int64) *int64 {
- return &v
-}
-
-// Float32 is a helper routine that allocates a new float32 value
-// to store v and returns a pointer to it.
-func Float32(v float32) *float32 {
- return &v
-}
-
-// Float64 is a helper routine that allocates a new float64 value
-// to store v and returns a pointer to it.
-func Float64(v float64) *float64 {
- return &v
-}
-
-// Uint32 is a helper routine that allocates a new uint32 value
-// to store v and returns a pointer to it.
-func Uint32(v uint32) *uint32 {
- return &v
-}
-
-// Uint64 is a helper routine that allocates a new uint64 value
-// to store v and returns a pointer to it.
-func Uint64(v uint64) *uint64 {
- return &v
-}
-
-// String is a helper routine that allocates a new string value
-// to store v and returns a pointer to it.
-func String(v string) *string {
- return &v
-}
-
-// EnumName is a helper function to simplify printing protocol buffer enums
-// by name. Given an enum map and a value, it returns a useful string.
-func EnumName(m map[int32]string, v int32) string {
- s, ok := m[v]
- if ok {
- return s
- }
- return strconv.Itoa(int(v))
-}
-
-// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
-// from their JSON-encoded representation. Given a map from the enum's symbolic
-// names to its int values, and a byte buffer containing the JSON-encoded
-// value, it returns an int32 that can be cast to the enum type by the caller.
-//
-// The function can deal with both JSON representations, numeric and symbolic.
-func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
- if data[0] == '"' {
- // New style: enums are strings.
- var repr string
- if err := json.Unmarshal(data, &repr); err != nil {
- return -1, err
- }
- val, ok := m[repr]
- if !ok {
- return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
- }
- return val, nil
- }
- // Old style: enums are ints.
- var val int32
- if err := json.Unmarshal(data, &val); err != nil {
- return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
- }
- return val, nil
-}
-
-// DebugPrint dumps the encoded data in b in a debugging format with a header
-// including the string s. Used in testing but made available for general debugging.
-func (p *Buffer) DebugPrint(s string, b []byte) {
- var u uint64
-
- obuf := p.buf
- index := p.index
- p.buf = b
- p.index = 0
- depth := 0
-
- fmt.Printf("\n--- %s ---\n", s)
-
-out:
- for {
- for i := 0; i < depth; i++ {
- fmt.Print(" ")
- }
-
- index := p.index
- if index == len(p.buf) {
- break
- }
-
- op, err := p.DecodeVarint()
- if err != nil {
- fmt.Printf("%3d: fetching op err %v\n", index, err)
- break out
- }
- tag := op >> 3
- wire := op & 7
-
- switch wire {
- default:
- fmt.Printf("%3d: t=%3d unknown wire=%d\n",
- index, tag, wire)
- break out
-
- case WireBytes:
- var r []byte
-
- r, err = p.DecodeRawBytes(false)
- if err != nil {
- break out
- }
- fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
- if len(r) <= 6 {
- for i := 0; i < len(r); i++ {
- fmt.Printf(" %.2x", r[i])
- }
- } else {
- for i := 0; i < 3; i++ {
- fmt.Printf(" %.2x", r[i])
- }
- fmt.Printf(" ..")
- for i := len(r) - 3; i < len(r); i++ {
- fmt.Printf(" %.2x", r[i])
- }
- }
- fmt.Printf("\n")
-
- case WireFixed32:
- u, err = p.DecodeFixed32()
- if err != nil {
- fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
- break out
- }
- fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
-
- case WireFixed64:
- u, err = p.DecodeFixed64()
- if err != nil {
- fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
- break out
- }
- fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
-
- case WireVarint:
- u, err = p.DecodeVarint()
- if err != nil {
- fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
- break out
- }
- fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
-
- case WireStartGroup:
- fmt.Printf("%3d: t=%3d start\n", index, tag)
- depth++
-
- case WireEndGroup:
- depth--
- fmt.Printf("%3d: t=%3d end\n", index, tag)
- }
- }
-
- if depth != 0 {
- fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
- }
- fmt.Printf("\n")
-
- p.buf = obuf
- p.index = index
-}
-
-// SetDefaults sets unset protocol buffer fields to their default values.
-// It only modifies fields that are both unset and have defined defaults.
-// It recursively sets default values in any non-nil sub-messages.
-func SetDefaults(pb Message) {
- setDefaults(reflect.ValueOf(pb), true, false)
-}
-
-// v is a pointer to a struct.
-func setDefaults(v reflect.Value, recur, zeros bool) {
- v = v.Elem()
-
- defaultMu.RLock()
- dm, ok := defaults[v.Type()]
- defaultMu.RUnlock()
- if !ok {
- dm = buildDefaultMessage(v.Type())
- defaultMu.Lock()
- defaults[v.Type()] = dm
- defaultMu.Unlock()
- }
-
- for _, sf := range dm.scalars {
- f := v.Field(sf.index)
- if !f.IsNil() {
- // field already set
- continue
- }
- dv := sf.value
- if dv == nil && !zeros {
- // no explicit default, and don't want to set zeros
- continue
- }
- fptr := f.Addr().Interface() // **T
- // TODO: Consider batching the allocations we do here.
- switch sf.kind {
- case reflect.Bool:
- b := new(bool)
- if dv != nil {
- *b = dv.(bool)
- }
- *(fptr.(**bool)) = b
- case reflect.Float32:
- f := new(float32)
- if dv != nil {
- *f = dv.(float32)
- }
- *(fptr.(**float32)) = f
- case reflect.Float64:
- f := new(float64)
- if dv != nil {
- *f = dv.(float64)
- }
- *(fptr.(**float64)) = f
- case reflect.Int32:
- // might be an enum
- if ft := f.Type(); ft != int32PtrType {
- // enum
- f.Set(reflect.New(ft.Elem()))
- if dv != nil {
- f.Elem().SetInt(int64(dv.(int32)))
- }
- } else {
- // int32 field
- i := new(int32)
- if dv != nil {
- *i = dv.(int32)
- }
- *(fptr.(**int32)) = i
- }
- case reflect.Int64:
- i := new(int64)
- if dv != nil {
- *i = dv.(int64)
- }
- *(fptr.(**int64)) = i
- case reflect.String:
- s := new(string)
- if dv != nil {
- *s = dv.(string)
- }
- *(fptr.(**string)) = s
- case reflect.Uint8:
- // exceptional case: []byte
- var b []byte
- if dv != nil {
- db := dv.([]byte)
- b = make([]byte, len(db))
- copy(b, db)
- } else {
- b = []byte{}
- }
- *(fptr.(*[]byte)) = b
- case reflect.Uint32:
- u := new(uint32)
- if dv != nil {
- *u = dv.(uint32)
- }
- *(fptr.(**uint32)) = u
- case reflect.Uint64:
- u := new(uint64)
- if dv != nil {
- *u = dv.(uint64)
- }
- *(fptr.(**uint64)) = u
- default:
- log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
- }
- }
-
- for _, ni := range dm.nested {
- f := v.Field(ni)
- // f is *T or []*T or map[T]*T
- switch f.Kind() {
- case reflect.Ptr:
- if f.IsNil() {
- continue
- }
- setDefaults(f, recur, zeros)
-
- case reflect.Slice:
- for i := 0; i < f.Len(); i++ {
- e := f.Index(i)
- if e.IsNil() {
- continue
- }
- setDefaults(e, recur, zeros)
- }
-
- case reflect.Map:
- for _, k := range f.MapKeys() {
- e := f.MapIndex(k)
- if e.IsNil() {
- continue
- }
- setDefaults(e, recur, zeros)
- }
- }
- }
-}
-
-var (
- // defaults maps a protocol buffer struct type to a slice of the fields,
- // with its scalar fields set to their proto-declared non-zero default values.
- defaultMu sync.RWMutex
- defaults = make(map[reflect.Type]defaultMessage)
-
- int32PtrType = reflect.TypeOf((*int32)(nil))
-)
-
-// defaultMessage represents information about the default values of a message.
-type defaultMessage struct {
- scalars []scalarField
- nested []int // struct field index of nested messages
-}
-
-type scalarField struct {
- index int // struct field index
- kind reflect.Kind // element type (the T in *T or []T)
- value interface{} // the proto-declared default value, or nil
-}
-
-// t is a struct type.
-func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
- sprop := GetProperties(t)
- for _, prop := range sprop.Prop {
- fi, ok := sprop.decoderTags.get(prop.Tag)
- if !ok {
- // XXX_unrecognized
- continue
- }
- ft := t.Field(fi).Type
-
- sf, nested, err := fieldDefault(ft, prop)
- switch {
- case err != nil:
- log.Print(err)
- case nested:
- dm.nested = append(dm.nested, fi)
- case sf != nil:
- sf.index = fi
- dm.scalars = append(dm.scalars, *sf)
- }
- }
-
- return dm
-}
-
-// fieldDefault returns the scalarField for field type ft.
-// sf will be nil if the field can not have a default.
-// nestedMessage will be true if this is a nested message.
-// Note that sf.index is not set on return.
-func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
- var canHaveDefault bool
- switch ft.Kind() {
- case reflect.Ptr:
- if ft.Elem().Kind() == reflect.Struct {
- nestedMessage = true
- } else {
- canHaveDefault = true // proto2 scalar field
- }
-
- case reflect.Slice:
- switch ft.Elem().Kind() {
- case reflect.Ptr:
- nestedMessage = true // repeated message
- case reflect.Uint8:
- canHaveDefault = true // bytes field
- }
-
- case reflect.Map:
- if ft.Elem().Kind() == reflect.Ptr {
- nestedMessage = true // map with message values
- }
- }
-
- if !canHaveDefault {
- if nestedMessage {
- return nil, true, nil
- }
- return nil, false, nil
- }
-
- // We now know that ft is a pointer or slice.
- sf = &scalarField{kind: ft.Elem().Kind()}
-
- // scalar fields without defaults
- if !prop.HasDefault {
- return sf, false, nil
- }
-
- // a scalar field: either *T or []byte
- switch ft.Elem().Kind() {
- case reflect.Bool:
- x, err := strconv.ParseBool(prop.Default)
- if err != nil {
- return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
- }
- sf.value = x
- case reflect.Float32:
- x, err := strconv.ParseFloat(prop.Default, 32)
- if err != nil {
- return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
- }
- sf.value = float32(x)
- case reflect.Float64:
- x, err := strconv.ParseFloat(prop.Default, 64)
- if err != nil {
- return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
- }
- sf.value = x
- case reflect.Int32:
- x, err := strconv.ParseInt(prop.Default, 10, 32)
- if err != nil {
- return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
- }
- sf.value = int32(x)
- case reflect.Int64:
- x, err := strconv.ParseInt(prop.Default, 10, 64)
- if err != nil {
- return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
- }
- sf.value = x
- case reflect.String:
- sf.value = prop.Default
- case reflect.Uint8:
- // []byte (not *uint8)
- sf.value = []byte(prop.Default)
- case reflect.Uint32:
- x, err := strconv.ParseUint(prop.Default, 10, 32)
- if err != nil {
- return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
- }
- sf.value = uint32(x)
- case reflect.Uint64:
- x, err := strconv.ParseUint(prop.Default, 10, 64)
- if err != nil {
- return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
- }
- sf.value = x
- default:
- return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
- }
-
- return sf, false, nil
-}
-
-// mapKeys returns a sort.Interface to be used for sorting the map keys.
-// Map fields may have key types of non-float scalars, strings and enums.
-func mapKeys(vs []reflect.Value) sort.Interface {
- s := mapKeySorter{vs: vs}
-
- // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps.
- if len(vs) == 0 {
- return s
- }
- switch vs[0].Kind() {
- case reflect.Int32, reflect.Int64:
- s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
- case reflect.Uint32, reflect.Uint64:
- s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
- case reflect.Bool:
- s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true
- case reflect.String:
- s.less = func(a, b reflect.Value) bool { return a.String() < b.String() }
- default:
- panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind()))
- }
-
- return s
-}
-
-type mapKeySorter struct {
- vs []reflect.Value
- less func(a, b reflect.Value) bool
-}
-
-func (s mapKeySorter) Len() int { return len(s.vs) }
-func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
-func (s mapKeySorter) Less(i, j int) bool {
- return s.less(s.vs[i], s.vs[j])
-}
-
-// isProto3Zero reports whether v is a zero proto3 value.
-func isProto3Zero(v reflect.Value) bool {
- switch v.Kind() {
- case reflect.Bool:
- return !v.Bool()
- case reflect.Int32, reflect.Int64:
- return v.Int() == 0
- case reflect.Uint32, reflect.Uint64:
- return v.Uint() == 0
- case reflect.Float32, reflect.Float64:
- return v.Float() == 0
- case reflect.String:
- return v.String() == ""
- }
- return false
-}
-
-const (
- // ProtoPackageIsVersion3 is referenced from generated protocol buffer files
- // to assert that that code is compatible with this version of the proto package.
- ProtoPackageIsVersion3 = true
-
- // ProtoPackageIsVersion2 is referenced from generated protocol buffer files
- // to assert that that code is compatible with this version of the proto package.
- ProtoPackageIsVersion2 = true
-
- // ProtoPackageIsVersion1 is referenced from generated protocol buffer files
- // to assert that that code is compatible with this version of the proto package.
- ProtoPackageIsVersion1 = true
-)
-
-// InternalMessageInfo is a type used internally by generated .pb.go files.
-// This type is not intended to be used by non-generated code.
-// This type is not subject to any compatibility guarantee.
-type InternalMessageInfo struct {
- marshal *marshalInfo
- unmarshal *unmarshalInfo
- merge *mergeInfo
- discard *discardInfo
-}
diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go
deleted file mode 100644
index f48a75676..000000000
--- a/vendor/github.com/golang/protobuf/proto/message_set.go
+++ /dev/null
@@ -1,181 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-/*
- * Support for message sets.
- */
-
-import (
- "errors"
-)
-
-// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
-// A message type ID is required for storing a protocol buffer in a message set.
-var errNoMessageTypeID = errors.New("proto does not have a message type ID")
-
-// The first two types (_MessageSet_Item and messageSet)
-// model what the protocol compiler produces for the following protocol message:
-// message MessageSet {
-// repeated group Item = 1 {
-// required int32 type_id = 2;
-// required string message = 3;
-// };
-// }
-// That is the MessageSet wire format. We can't use a proto to generate these
-// because that would introduce a circular dependency between it and this package.
-
-type _MessageSet_Item struct {
- TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
- Message []byte `protobuf:"bytes,3,req,name=message"`
-}
-
-type messageSet struct {
- Item []*_MessageSet_Item `protobuf:"group,1,rep"`
- XXX_unrecognized []byte
- // TODO: caching?
-}
-
-// Make sure messageSet is a Message.
-var _ Message = (*messageSet)(nil)
-
-// messageTypeIder is an interface satisfied by a protocol buffer type
-// that may be stored in a MessageSet.
-type messageTypeIder interface {
- MessageTypeId() int32
-}
-
-func (ms *messageSet) find(pb Message) *_MessageSet_Item {
- mti, ok := pb.(messageTypeIder)
- if !ok {
- return nil
- }
- id := mti.MessageTypeId()
- for _, item := range ms.Item {
- if *item.TypeId == id {
- return item
- }
- }
- return nil
-}
-
-func (ms *messageSet) Has(pb Message) bool {
- return ms.find(pb) != nil
-}
-
-func (ms *messageSet) Unmarshal(pb Message) error {
- if item := ms.find(pb); item != nil {
- return Unmarshal(item.Message, pb)
- }
- if _, ok := pb.(messageTypeIder); !ok {
- return errNoMessageTypeID
- }
- return nil // TODO: return error instead?
-}
-
-func (ms *messageSet) Marshal(pb Message) error {
- msg, err := Marshal(pb)
- if err != nil {
- return err
- }
- if item := ms.find(pb); item != nil {
- // reuse existing item
- item.Message = msg
- return nil
- }
-
- mti, ok := pb.(messageTypeIder)
- if !ok {
- return errNoMessageTypeID
- }
-
- mtid := mti.MessageTypeId()
- ms.Item = append(ms.Item, &_MessageSet_Item{
- TypeId: &mtid,
- Message: msg,
- })
- return nil
-}
-
-func (ms *messageSet) Reset() { *ms = messageSet{} }
-func (ms *messageSet) String() string { return CompactTextString(ms) }
-func (*messageSet) ProtoMessage() {}
-
-// Support for the message_set_wire_format message option.
-
-func skipVarint(buf []byte) []byte {
- i := 0
- for ; buf[i]&0x80 != 0; i++ {
- }
- return buf[i+1:]
-}
-
-// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
-// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
-func unmarshalMessageSet(buf []byte, exts interface{}) error {
- var m map[int32]Extension
- switch exts := exts.(type) {
- case *XXX_InternalExtensions:
- m = exts.extensionsWrite()
- case map[int32]Extension:
- m = exts
- default:
- return errors.New("proto: not an extension map")
- }
-
- ms := new(messageSet)
- if err := Unmarshal(buf, ms); err != nil {
- return err
- }
- for _, item := range ms.Item {
- id := *item.TypeId
- msg := item.Message
-
- // Restore wire type and field number varint, plus length varint.
- // Be careful to preserve duplicate items.
- b := EncodeVarint(uint64(id)<<3 | WireBytes)
- if ext, ok := m[id]; ok {
- // Existing data; rip off the tag and length varint
- // so we join the new data correctly.
- // We can assume that ext.enc is set because we are unmarshaling.
- o := ext.enc[len(b):] // skip wire type and field number
- _, n := DecodeVarint(o) // calculate length of length varint
- o = o[n:] // skip length varint
- msg = append(o, msg...) // join old data and new data
- }
- b = append(b, EncodeVarint(uint64(len(msg)))...)
- b = append(b, msg...)
-
- m[id] = Extension{enc: b}
- }
- return nil
-}
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
deleted file mode 100644
index 94fa9194a..000000000
--- a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
+++ /dev/null
@@ -1,360 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2012 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// +build purego appengine js
-
-// This file contains an implementation of proto field accesses using package reflect.
-// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
-// be used on App Engine.
-
-package proto
-
-import (
- "reflect"
- "sync"
-)
-
-const unsafeAllowed = false
-
-// A field identifies a field in a struct, accessible from a pointer.
-// In this implementation, a field is identified by the sequence of field indices
-// passed to reflect's FieldByIndex.
-type field []int
-
-// toField returns a field equivalent to the given reflect field.
-func toField(f *reflect.StructField) field {
- return f.Index
-}
-
-// invalidField is an invalid field identifier.
-var invalidField = field(nil)
-
-// zeroField is a noop when calling pointer.offset.
-var zeroField = field([]int{})
-
-// IsValid reports whether the field identifier is valid.
-func (f field) IsValid() bool { return f != nil }
-
-// The pointer type is for the table-driven decoder.
-// The implementation here uses a reflect.Value of pointer type to
-// create a generic pointer. In pointer_unsafe.go we use unsafe
-// instead of reflect to implement the same (but faster) interface.
-type pointer struct {
- v reflect.Value
-}
-
-// toPointer converts an interface of pointer type to a pointer
-// that points to the same target.
-func toPointer(i *Message) pointer {
- return pointer{v: reflect.ValueOf(*i)}
-}
-
-// toAddrPointer converts an interface to a pointer that points to
-// the interface data.
-func toAddrPointer(i *interface{}, isptr, deref bool) pointer {
- v := reflect.ValueOf(*i)
- u := reflect.New(v.Type())
- u.Elem().Set(v)
- if deref {
- u = u.Elem()
- }
- return pointer{v: u}
-}
-
-// valToPointer converts v to a pointer. v must be of pointer type.
-func valToPointer(v reflect.Value) pointer {
- return pointer{v: v}
-}
-
-// offset converts from a pointer to a structure to a pointer to
-// one of its fields.
-func (p pointer) offset(f field) pointer {
- return pointer{v: p.v.Elem().FieldByIndex(f).Addr()}
-}
-
-func (p pointer) isNil() bool {
- return p.v.IsNil()
-}
-
-// grow updates the slice s in place to make it one element longer.
-// s must be addressable.
-// Returns the (addressable) new element.
-func grow(s reflect.Value) reflect.Value {
- n, m := s.Len(), s.Cap()
- if n < m {
- s.SetLen(n + 1)
- } else {
- s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem())))
- }
- return s.Index(n)
-}
-
-func (p pointer) toInt64() *int64 {
- return p.v.Interface().(*int64)
-}
-func (p pointer) toInt64Ptr() **int64 {
- return p.v.Interface().(**int64)
-}
-func (p pointer) toInt64Slice() *[]int64 {
- return p.v.Interface().(*[]int64)
-}
-
-var int32ptr = reflect.TypeOf((*int32)(nil))
-
-func (p pointer) toInt32() *int32 {
- return p.v.Convert(int32ptr).Interface().(*int32)
-}
-
-// The toInt32Ptr/Slice methods don't work because of enums.
-// Instead, we must use set/get methods for the int32ptr/slice case.
-/*
- func (p pointer) toInt32Ptr() **int32 {
- return p.v.Interface().(**int32)
-}
- func (p pointer) toInt32Slice() *[]int32 {
- return p.v.Interface().(*[]int32)
-}
-*/
-func (p pointer) getInt32Ptr() *int32 {
- if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
- // raw int32 type
- return p.v.Elem().Interface().(*int32)
- }
- // an enum
- return p.v.Elem().Convert(int32PtrType).Interface().(*int32)
-}
-func (p pointer) setInt32Ptr(v int32) {
- // Allocate value in a *int32. Possibly convert that to a *enum.
- // Then assign it to a **int32 or **enum.
- // Note: we can convert *int32 to *enum, but we can't convert
- // **int32 to **enum!
- p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem()))
-}
-
-// getInt32Slice copies []int32 from p as a new slice.
-// This behavior differs from the implementation in pointer_unsafe.go.
-func (p pointer) getInt32Slice() []int32 {
- if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
- // raw int32 type
- return p.v.Elem().Interface().([]int32)
- }
- // an enum
- // Allocate a []int32, then assign []enum's values into it.
- // Note: we can't convert []enum to []int32.
- slice := p.v.Elem()
- s := make([]int32, slice.Len())
- for i := 0; i < slice.Len(); i++ {
- s[i] = int32(slice.Index(i).Int())
- }
- return s
-}
-
-// setInt32Slice copies []int32 into p as a new slice.
-// This behavior differs from the implementation in pointer_unsafe.go.
-func (p pointer) setInt32Slice(v []int32) {
- if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
- // raw int32 type
- p.v.Elem().Set(reflect.ValueOf(v))
- return
- }
- // an enum
- // Allocate a []enum, then assign []int32's values into it.
- // Note: we can't convert []enum to []int32.
- slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v))
- for i, x := range v {
- slice.Index(i).SetInt(int64(x))
- }
- p.v.Elem().Set(slice)
-}
-func (p pointer) appendInt32Slice(v int32) {
- grow(p.v.Elem()).SetInt(int64(v))
-}
-
-func (p pointer) toUint64() *uint64 {
- return p.v.Interface().(*uint64)
-}
-func (p pointer) toUint64Ptr() **uint64 {
- return p.v.Interface().(**uint64)
-}
-func (p pointer) toUint64Slice() *[]uint64 {
- return p.v.Interface().(*[]uint64)
-}
-func (p pointer) toUint32() *uint32 {
- return p.v.Interface().(*uint32)
-}
-func (p pointer) toUint32Ptr() **uint32 {
- return p.v.Interface().(**uint32)
-}
-func (p pointer) toUint32Slice() *[]uint32 {
- return p.v.Interface().(*[]uint32)
-}
-func (p pointer) toBool() *bool {
- return p.v.Interface().(*bool)
-}
-func (p pointer) toBoolPtr() **bool {
- return p.v.Interface().(**bool)
-}
-func (p pointer) toBoolSlice() *[]bool {
- return p.v.Interface().(*[]bool)
-}
-func (p pointer) toFloat64() *float64 {
- return p.v.Interface().(*float64)
-}
-func (p pointer) toFloat64Ptr() **float64 {
- return p.v.Interface().(**float64)
-}
-func (p pointer) toFloat64Slice() *[]float64 {
- return p.v.Interface().(*[]float64)
-}
-func (p pointer) toFloat32() *float32 {
- return p.v.Interface().(*float32)
-}
-func (p pointer) toFloat32Ptr() **float32 {
- return p.v.Interface().(**float32)
-}
-func (p pointer) toFloat32Slice() *[]float32 {
- return p.v.Interface().(*[]float32)
-}
-func (p pointer) toString() *string {
- return p.v.Interface().(*string)
-}
-func (p pointer) toStringPtr() **string {
- return p.v.Interface().(**string)
-}
-func (p pointer) toStringSlice() *[]string {
- return p.v.Interface().(*[]string)
-}
-func (p pointer) toBytes() *[]byte {
- return p.v.Interface().(*[]byte)
-}
-func (p pointer) toBytesSlice() *[][]byte {
- return p.v.Interface().(*[][]byte)
-}
-func (p pointer) toExtensions() *XXX_InternalExtensions {
- return p.v.Interface().(*XXX_InternalExtensions)
-}
-func (p pointer) toOldExtensions() *map[int32]Extension {
- return p.v.Interface().(*map[int32]Extension)
-}
-func (p pointer) getPointer() pointer {
- return pointer{v: p.v.Elem()}
-}
-func (p pointer) setPointer(q pointer) {
- p.v.Elem().Set(q.v)
-}
-func (p pointer) appendPointer(q pointer) {
- grow(p.v.Elem()).Set(q.v)
-}
-
-// getPointerSlice copies []*T from p as a new []pointer.
-// This behavior differs from the implementation in pointer_unsafe.go.
-func (p pointer) getPointerSlice() []pointer {
- if p.v.IsNil() {
- return nil
- }
- n := p.v.Elem().Len()
- s := make([]pointer, n)
- for i := 0; i < n; i++ {
- s[i] = pointer{v: p.v.Elem().Index(i)}
- }
- return s
-}
-
-// setPointerSlice copies []pointer into p as a new []*T.
-// This behavior differs from the implementation in pointer_unsafe.go.
-func (p pointer) setPointerSlice(v []pointer) {
- if v == nil {
- p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem())
- return
- }
- s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v))
- for _, p := range v {
- s = reflect.Append(s, p.v)
- }
- p.v.Elem().Set(s)
-}
-
-// getInterfacePointer returns a pointer that points to the
-// interface data of the interface pointed by p.
-func (p pointer) getInterfacePointer() pointer {
- if p.v.Elem().IsNil() {
- return pointer{v: p.v.Elem()}
- }
- return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct
-}
-
-func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
- // TODO: check that p.v.Type().Elem() == t?
- return p.v
-}
-
-func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
- atomicLock.Lock()
- defer atomicLock.Unlock()
- return *p
-}
-func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
- atomicLock.Lock()
- defer atomicLock.Unlock()
- *p = v
-}
-func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
- atomicLock.Lock()
- defer atomicLock.Unlock()
- return *p
-}
-func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
- atomicLock.Lock()
- defer atomicLock.Unlock()
- *p = v
-}
-func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
- atomicLock.Lock()
- defer atomicLock.Unlock()
- return *p
-}
-func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
- atomicLock.Lock()
- defer atomicLock.Unlock()
- *p = v
-}
-func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
- atomicLock.Lock()
- defer atomicLock.Unlock()
- return *p
-}
-func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
- atomicLock.Lock()
- defer atomicLock.Unlock()
- *p = v
-}
-
-var atomicLock sync.Mutex
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
deleted file mode 100644
index dbfffe071..000000000
--- a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
+++ /dev/null
@@ -1,313 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2012 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// +build !purego,!appengine,!js
-
-// This file contains the implementation of the proto field accesses using package unsafe.
-
-package proto
-
-import (
- "reflect"
- "sync/atomic"
- "unsafe"
-)
-
-const unsafeAllowed = true
-
-// A field identifies a field in a struct, accessible from a pointer.
-// In this implementation, a field is identified by its byte offset from the start of the struct.
-type field uintptr
-
-// toField returns a field equivalent to the given reflect field.
-func toField(f *reflect.StructField) field {
- return field(f.Offset)
-}
-
-// invalidField is an invalid field identifier.
-const invalidField = ^field(0)
-
-// zeroField is a noop when calling pointer.offset.
-const zeroField = field(0)
-
-// IsValid reports whether the field identifier is valid.
-func (f field) IsValid() bool {
- return f != invalidField
-}
-
-// The pointer type below is for the new table-driven encoder/decoder.
-// The implementation here uses unsafe.Pointer to create a generic pointer.
-// In pointer_reflect.go we use reflect instead of unsafe to implement
-// the same (but slower) interface.
-type pointer struct {
- p unsafe.Pointer
-}
-
-// size of pointer
-var ptrSize = unsafe.Sizeof(uintptr(0))
-
-// toPointer converts an interface of pointer type to a pointer
-// that points to the same target.
-func toPointer(i *Message) pointer {
- // Super-tricky - read pointer out of data word of interface value.
- // Saves ~25ns over the equivalent:
- // return valToPointer(reflect.ValueOf(*i))
- return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
-}
-
-// toAddrPointer converts an interface to a pointer that points to
-// the interface data.
-func toAddrPointer(i *interface{}, isptr, deref bool) (p pointer) {
- // Super-tricky - read or get the address of data word of interface value.
- if isptr {
- // The interface is of pointer type, thus it is a direct interface.
- // The data word is the pointer data itself. We take its address.
- p = pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
- } else {
- // The interface is not of pointer type. The data word is the pointer
- // to the data.
- p = pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
- }
- if deref {
- p.p = *(*unsafe.Pointer)(p.p)
- }
- return p
-}
-
-// valToPointer converts v to a pointer. v must be of pointer type.
-func valToPointer(v reflect.Value) pointer {
- return pointer{p: unsafe.Pointer(v.Pointer())}
-}
-
-// offset converts from a pointer to a structure to a pointer to
-// one of its fields.
-func (p pointer) offset(f field) pointer {
- // For safety, we should panic if !f.IsValid, however calling panic causes
- // this to no longer be inlineable, which is a serious performance cost.
- /*
- if !f.IsValid() {
- panic("invalid field")
- }
- */
- return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))}
-}
-
-func (p pointer) isNil() bool {
- return p.p == nil
-}
-
-func (p pointer) toInt64() *int64 {
- return (*int64)(p.p)
-}
-func (p pointer) toInt64Ptr() **int64 {
- return (**int64)(p.p)
-}
-func (p pointer) toInt64Slice() *[]int64 {
- return (*[]int64)(p.p)
-}
-func (p pointer) toInt32() *int32 {
- return (*int32)(p.p)
-}
-
-// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist.
-/*
- func (p pointer) toInt32Ptr() **int32 {
- return (**int32)(p.p)
- }
- func (p pointer) toInt32Slice() *[]int32 {
- return (*[]int32)(p.p)
- }
-*/
-func (p pointer) getInt32Ptr() *int32 {
- return *(**int32)(p.p)
-}
-func (p pointer) setInt32Ptr(v int32) {
- *(**int32)(p.p) = &v
-}
-
-// getInt32Slice loads a []int32 from p.
-// The value returned is aliased with the original slice.
-// This behavior differs from the implementation in pointer_reflect.go.
-func (p pointer) getInt32Slice() []int32 {
- return *(*[]int32)(p.p)
-}
-
-// setInt32Slice stores a []int32 to p.
-// The value set is aliased with the input slice.
-// This behavior differs from the implementation in pointer_reflect.go.
-func (p pointer) setInt32Slice(v []int32) {
- *(*[]int32)(p.p) = v
-}
-
-// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead?
-func (p pointer) appendInt32Slice(v int32) {
- s := (*[]int32)(p.p)
- *s = append(*s, v)
-}
-
-func (p pointer) toUint64() *uint64 {
- return (*uint64)(p.p)
-}
-func (p pointer) toUint64Ptr() **uint64 {
- return (**uint64)(p.p)
-}
-func (p pointer) toUint64Slice() *[]uint64 {
- return (*[]uint64)(p.p)
-}
-func (p pointer) toUint32() *uint32 {
- return (*uint32)(p.p)
-}
-func (p pointer) toUint32Ptr() **uint32 {
- return (**uint32)(p.p)
-}
-func (p pointer) toUint32Slice() *[]uint32 {
- return (*[]uint32)(p.p)
-}
-func (p pointer) toBool() *bool {
- return (*bool)(p.p)
-}
-func (p pointer) toBoolPtr() **bool {
- return (**bool)(p.p)
-}
-func (p pointer) toBoolSlice() *[]bool {
- return (*[]bool)(p.p)
-}
-func (p pointer) toFloat64() *float64 {
- return (*float64)(p.p)
-}
-func (p pointer) toFloat64Ptr() **float64 {
- return (**float64)(p.p)
-}
-func (p pointer) toFloat64Slice() *[]float64 {
- return (*[]float64)(p.p)
-}
-func (p pointer) toFloat32() *float32 {
- return (*float32)(p.p)
-}
-func (p pointer) toFloat32Ptr() **float32 {
- return (**float32)(p.p)
-}
-func (p pointer) toFloat32Slice() *[]float32 {
- return (*[]float32)(p.p)
-}
-func (p pointer) toString() *string {
- return (*string)(p.p)
-}
-func (p pointer) toStringPtr() **string {
- return (**string)(p.p)
-}
-func (p pointer) toStringSlice() *[]string {
- return (*[]string)(p.p)
-}
-func (p pointer) toBytes() *[]byte {
- return (*[]byte)(p.p)
-}
-func (p pointer) toBytesSlice() *[][]byte {
- return (*[][]byte)(p.p)
-}
-func (p pointer) toExtensions() *XXX_InternalExtensions {
- return (*XXX_InternalExtensions)(p.p)
-}
-func (p pointer) toOldExtensions() *map[int32]Extension {
- return (*map[int32]Extension)(p.p)
-}
-
-// getPointerSlice loads []*T from p as a []pointer.
-// The value returned is aliased with the original slice.
-// This behavior differs from the implementation in pointer_reflect.go.
-func (p pointer) getPointerSlice() []pointer {
- // Super-tricky - p should point to a []*T where T is a
- // message type. We load it as []pointer.
- return *(*[]pointer)(p.p)
-}
-
-// setPointerSlice stores []pointer into p as a []*T.
-// The value set is aliased with the input slice.
-// This behavior differs from the implementation in pointer_reflect.go.
-func (p pointer) setPointerSlice(v []pointer) {
- // Super-tricky - p should point to a []*T where T is a
- // message type. We store it as []pointer.
- *(*[]pointer)(p.p) = v
-}
-
-// getPointer loads the pointer at p and returns it.
-func (p pointer) getPointer() pointer {
- return pointer{p: *(*unsafe.Pointer)(p.p)}
-}
-
-// setPointer stores the pointer q at p.
-func (p pointer) setPointer(q pointer) {
- *(*unsafe.Pointer)(p.p) = q.p
-}
-
-// append q to the slice pointed to by p.
-func (p pointer) appendPointer(q pointer) {
- s := (*[]unsafe.Pointer)(p.p)
- *s = append(*s, q.p)
-}
-
-// getInterfacePointer returns a pointer that points to the
-// interface data of the interface pointed by p.
-func (p pointer) getInterfacePointer() pointer {
- // Super-tricky - read pointer out of data word of interface value.
- return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]}
-}
-
-// asPointerTo returns a reflect.Value that is a pointer to an
-// object of type t stored at p.
-func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
- return reflect.NewAt(t, p.p)
-}
-
-func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
- return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
-}
-func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
- atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
-}
-func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
- return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
-}
-func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
- atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
-}
-func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
- return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
-}
-func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
- atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
-}
-func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
- return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
-}
-func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
- atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
-}
diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go
index a4b8c0cd3..dcdc2202f 100644
--- a/vendor/github.com/golang/protobuf/proto/properties.go
+++ b/vendor/github.com/golang/protobuf/proto/properties.go
@@ -1,162 +1,104 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
package proto
-/*
- * Routines for encoding data into the wire format for protocol buffers.
- */
-
import (
"fmt"
- "log"
"reflect"
- "sort"
"strconv"
"strings"
"sync"
-)
-
-const debug bool = false
-// Constants that identify the encoding of a value on the wire.
-const (
- WireVarint = 0
- WireFixed64 = 1
- WireBytes = 2
- WireStartGroup = 3
- WireEndGroup = 4
- WireFixed32 = 5
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/runtime/protoimpl"
)
-// tagMap is an optimization over map[int]int for typical protocol buffer
-// use-cases. Encoded protocol buffers are often in tag order with small tag
-// numbers.
-type tagMap struct {
- fastTags []int
- slowTags map[int]int
-}
-
-// tagMapFastLimit is the upper bound on the tag number that will be stored in
-// the tagMap slice rather than its map.
-const tagMapFastLimit = 1024
-
-func (p *tagMap) get(t int) (int, bool) {
- if t > 0 && t < tagMapFastLimit {
- if t >= len(p.fastTags) {
- return 0, false
- }
- fi := p.fastTags[t]
- return fi, fi >= 0
- }
- fi, ok := p.slowTags[t]
- return fi, ok
-}
-
-func (p *tagMap) put(t int, fi int) {
- if t > 0 && t < tagMapFastLimit {
- for len(p.fastTags) < t+1 {
- p.fastTags = append(p.fastTags, -1)
- }
- p.fastTags[t] = fi
- return
- }
- if p.slowTags == nil {
- p.slowTags = make(map[int]int)
- }
- p.slowTags[t] = fi
-}
-
-// StructProperties represents properties for all the fields of a struct.
-// decoderTags and decoderOrigNames should only be used by the decoder.
+// StructProperties represents protocol buffer type information for a
+// generated protobuf message in the open-struct API.
+//
+// Deprecated: Do not use.
type StructProperties struct {
- Prop []*Properties // properties for each field
- reqCount int // required count
- decoderTags tagMap // map from proto tag to struct field number
- decoderOrigNames map[string]int // map from original name to struct field number
- order []int // list of struct field numbers in tag order
+ // Prop are the properties for each field.
+ //
+ // Fields belonging to a oneof are stored in OneofTypes instead, with a
+ // single Properties representing the parent oneof held here.
+ //
+ // The order of Prop matches the order of fields in the Go struct.
+ // Struct fields that are not related to protobufs have a "XXX_" prefix
+ // in the Properties.Name and must be ignored by the user.
+ Prop []*Properties
// OneofTypes contains information about the oneof fields in this message.
- // It is keyed by the original name of a field.
+ // It is keyed by the protobuf field name.
OneofTypes map[string]*OneofProperties
}
-// OneofProperties represents information about a specific field in a oneof.
-type OneofProperties struct {
- Type reflect.Type // pointer to generated struct type for this oneof field
- Field int // struct field number of the containing oneof in the message
- Prop *Properties
-}
-
-// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
-// See encode.go, (*Buffer).enc_struct.
-
-func (sp *StructProperties) Len() int { return len(sp.order) }
-func (sp *StructProperties) Less(i, j int) bool {
- return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
-}
-func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
-
-// Properties represents the protocol-specific behavior of a single struct field.
+// Properties represents the type information for a protobuf message field.
+//
+// Deprecated: Do not use.
type Properties struct {
- Name string // name of the field, for error messages
- OrigName string // original name before protocol compiler (always set)
- JSONName string // name to use for JSON; determined by protoc
- Wire string
+ // Name is a placeholder name with little meaningful semantic value.
+ // If the name has an "XXX_" prefix, the entire Properties must be ignored.
+ Name string
+ // OrigName is the protobuf field name or oneof name.
+ OrigName string
+ // JSONName is the JSON name for the protobuf field.
+ JSONName string
+ // Enum is a placeholder name for enums.
+ // For historical reasons, this is neither the Go name for the enum,
+ // nor the protobuf name for the enum.
+ Enum string // Deprecated: Do not use.
+ // Weak contains the full name of the weakly referenced message.
+ Weak string
+ // Wire is a string representation of the wire type.
+ Wire string
+ // WireType is the protobuf wire type for the field.
WireType int
- Tag int
+ // Tag is the protobuf field number.
+ Tag int
+ // Required reports whether this is a required field.
Required bool
+ // Optional reports whether this is a optional field.
Optional bool
+ // Repeated reports whether this is a repeated field.
Repeated bool
- Packed bool // relevant for repeated primitives only
- Enum string // set for enum types only
- proto3 bool // whether this is known to be a proto3 field
- oneof bool // whether this is a oneof field
-
- Default string // default value
- HasDefault bool // whether an explicit default was provided
-
- stype reflect.Type // set for struct types only
- sprop *StructProperties // set for struct types only
+ // Packed reports whether this is a packed repeated field of scalars.
+ Packed bool
+ // Proto3 reports whether this field operates under the proto3 syntax.
+ Proto3 bool
+ // Oneof reports whether this field belongs within a oneof.
+ Oneof bool
+
+ // Default is the default value in string form.
+ Default string
+ // HasDefault reports whether the field has a default value.
+ HasDefault bool
+
+ // MapKeyProp is the properties for the key field for a map field.
+ MapKeyProp *Properties
+ // MapValProp is the properties for the value field for a map field.
+ MapValProp *Properties
+}
- mtype reflect.Type // set for map types only
- MapKeyProp *Properties // set for map types only
- MapValProp *Properties // set for map types only
+// OneofProperties represents the type information for a protobuf oneof.
+//
+// Deprecated: Do not use.
+type OneofProperties struct {
+ // Type is a pointer to the generated wrapper type for the field value.
+ // This is nil for messages that are not in the open-struct API.
+ Type reflect.Type
+ // Field is the index into StructProperties.Prop for the containing oneof.
+ Field int
+ // Prop is the properties for the field.
+ Prop *Properties
}
// String formats the properties in the protobuf struct field tag style.
func (p *Properties) String() string {
s := p.Wire
- s += ","
- s += strconv.Itoa(p.Tag)
+ s += "," + strconv.Itoa(p.Tag)
if p.Required {
s += ",req"
}
@@ -170,18 +112,21 @@ func (p *Properties) String() string {
s += ",packed"
}
s += ",name=" + p.OrigName
- if p.JSONName != p.OrigName {
+ if p.JSONName != "" {
s += ",json=" + p.JSONName
}
- if p.proto3 {
+ if len(p.Enum) > 0 {
+ s += ",enum=" + p.Enum
+ }
+ if len(p.Weak) > 0 {
+ s += ",weak=" + p.Weak
+ }
+ if p.Proto3 {
s += ",proto3"
}
- if p.oneof {
+ if p.Oneof {
s += ",oneof"
}
- if len(p.Enum) > 0 {
- s += ",enum=" + p.Enum
- }
if p.HasDefault {
s += ",def=" + p.Default
}
@@ -189,356 +134,173 @@ func (p *Properties) String() string {
}
// Parse populates p by parsing a string in the protobuf struct field tag style.
-func (p *Properties) Parse(s string) {
- // "bytes,49,opt,name=foo,def=hello!"
- fields := strings.Split(s, ",") // breaks def=, but handled below.
- if len(fields) < 2 {
- log.Printf("proto: tag has too few fields: %q", s)
- return
- }
-
- p.Wire = fields[0]
- switch p.Wire {
- case "varint":
- p.WireType = WireVarint
- case "fixed32":
- p.WireType = WireFixed32
- case "fixed64":
- p.WireType = WireFixed64
- case "zigzag32":
- p.WireType = WireVarint
- case "zigzag64":
- p.WireType = WireVarint
- case "bytes", "group":
- p.WireType = WireBytes
- // no numeric converter for non-numeric types
- default:
- log.Printf("proto: tag has unknown wire type: %q", s)
- return
- }
-
- var err error
- p.Tag, err = strconv.Atoi(fields[1])
- if err != nil {
- return
- }
-
-outer:
- for i := 2; i < len(fields); i++ {
- f := fields[i]
- switch {
- case f == "req":
- p.Required = true
- case f == "opt":
+func (p *Properties) Parse(tag string) {
+ // For example: "bytes,49,opt,name=foo,def=hello!"
+ for len(tag) > 0 {
+ i := strings.IndexByte(tag, ',')
+ if i < 0 {
+ i = len(tag)
+ }
+ switch s := tag[:i]; {
+ case strings.HasPrefix(s, "name="):
+ p.OrigName = s[len("name="):]
+ case strings.HasPrefix(s, "json="):
+ p.JSONName = s[len("json="):]
+ case strings.HasPrefix(s, "enum="):
+ p.Enum = s[len("enum="):]
+ case strings.HasPrefix(s, "weak="):
+ p.Weak = s[len("weak="):]
+ case strings.Trim(s, "0123456789") == "":
+ n, _ := strconv.ParseUint(s, 10, 32)
+ p.Tag = int(n)
+ case s == "opt":
p.Optional = true
- case f == "rep":
+ case s == "req":
+ p.Required = true
+ case s == "rep":
p.Repeated = true
- case f == "packed":
+ case s == "varint" || s == "zigzag32" || s == "zigzag64":
+ p.Wire = s
+ p.WireType = WireVarint
+ case s == "fixed32":
+ p.Wire = s
+ p.WireType = WireFixed32
+ case s == "fixed64":
+ p.Wire = s
+ p.WireType = WireFixed64
+ case s == "bytes":
+ p.Wire = s
+ p.WireType = WireBytes
+ case s == "group":
+ p.Wire = s
+ p.WireType = WireStartGroup
+ case s == "packed":
p.Packed = true
- case strings.HasPrefix(f, "name="):
- p.OrigName = f[5:]
- case strings.HasPrefix(f, "json="):
- p.JSONName = f[5:]
- case strings.HasPrefix(f, "enum="):
- p.Enum = f[5:]
- case f == "proto3":
- p.proto3 = true
- case f == "oneof":
- p.oneof = true
- case strings.HasPrefix(f, "def="):
+ case s == "proto3":
+ p.Proto3 = true
+ case s == "oneof":
+ p.Oneof = true
+ case strings.HasPrefix(s, "def="):
+ // The default tag is special in that everything afterwards is the
+ // default regardless of the presence of commas.
p.HasDefault = true
- p.Default = f[4:] // rest of string
- if i+1 < len(fields) {
- // Commas aren't escaped, and def is always last.
- p.Default += "," + strings.Join(fields[i+1:], ",")
- break outer
- }
- }
- }
-}
-
-var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
-
-// setFieldProps initializes the field properties for submessages and maps.
-func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
- switch t1 := typ; t1.Kind() {
- case reflect.Ptr:
- if t1.Elem().Kind() == reflect.Struct {
- p.stype = t1.Elem()
- }
-
- case reflect.Slice:
- if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct {
- p.stype = t2.Elem()
- }
-
- case reflect.Map:
- p.mtype = t1
- p.MapKeyProp = &Properties{}
- p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
- p.MapValProp = &Properties{}
- vtype := p.mtype.Elem()
- if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
- // The value type is not a message (*T) or bytes ([]byte),
- // so we need encoders for the pointer to this type.
- vtype = reflect.PtrTo(vtype)
- }
- p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
- }
-
- if p.stype != nil {
- if lockGetProp {
- p.sprop = GetProperties(p.stype)
- } else {
- p.sprop = getPropertiesLocked(p.stype)
+ p.Default, i = tag[len("def="):], len(tag)
}
+ tag = strings.TrimPrefix(tag[i:], ",")
}
}
-var (
- marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
-)
-
// Init populates the properties from a protocol buffer struct tag.
+//
+// Deprecated: Do not use.
func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
- p.init(typ, name, tag, f, true)
-}
-
-func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
- // "bytes,49,opt,def=hello!"
p.Name = name
p.OrigName = name
if tag == "" {
return
}
p.Parse(tag)
- p.setFieldProps(typ, f, lockGetProp)
+
+ if typ != nil && typ.Kind() == reflect.Map {
+ p.MapKeyProp = new(Properties)
+ p.MapKeyProp.Init(nil, "Key", f.Tag.Get("protobuf_key"), nil)
+ p.MapValProp = new(Properties)
+ p.MapValProp.Init(nil, "Value", f.Tag.Get("protobuf_val"), nil)
+ }
}
-var (
- propertiesMu sync.RWMutex
- propertiesMap = make(map[reflect.Type]*StructProperties)
-)
+var propertiesCache sync.Map // map[reflect.Type]*StructProperties
-// GetProperties returns the list of properties for the type represented by t.
-// t must represent a generated struct type of a protocol message.
+// GetProperties returns the list of properties for the type represented by t,
+// which must be a generated protocol buffer message in the open-struct API,
+// where protobuf message fields are represented by exported Go struct fields.
+//
+// Deprecated: Use protobuf reflection instead.
func GetProperties(t reflect.Type) *StructProperties {
- if t.Kind() != reflect.Struct {
- panic("proto: type must have kind struct")
- }
-
- // Most calls to GetProperties in a long-running program will be
- // retrieving details for types we have seen before.
- propertiesMu.RLock()
- sprop, ok := propertiesMap[t]
- propertiesMu.RUnlock()
- if ok {
- return sprop
+ if p, ok := propertiesCache.Load(t); ok {
+ return p.(*StructProperties)
}
-
- propertiesMu.Lock()
- sprop = getPropertiesLocked(t)
- propertiesMu.Unlock()
- return sprop
+ p, _ := propertiesCache.LoadOrStore(t, newProperties(t))
+ return p.(*StructProperties)
}
-type (
- oneofFuncsIface interface {
- XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
- }
- oneofWrappersIface interface {
- XXX_OneofWrappers() []interface{}
- }
-)
-
-// getPropertiesLocked requires that propertiesMu is held.
-func getPropertiesLocked(t reflect.Type) *StructProperties {
- if prop, ok := propertiesMap[t]; ok {
- return prop
+func newProperties(t reflect.Type) *StructProperties {
+ if t.Kind() != reflect.Struct {
+ panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t))
}
+ var hasOneof bool
prop := new(StructProperties)
- // in case of recursive protos, fill this in now.
- propertiesMap[t] = prop
-
- // build properties
- prop.Prop = make([]*Properties, t.NumField())
- prop.order = make([]int, t.NumField())
+ // Construct a list of properties for each field in the struct.
for i := 0; i < t.NumField(); i++ {
- f := t.Field(i)
p := new(Properties)
- name := f.Name
- p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
+ f := t.Field(i)
+ tagField := f.Tag.Get("protobuf")
+ p.Init(f.Type, f.Name, tagField, &f)
- oneof := f.Tag.Get("protobuf_oneof") // special case
- if oneof != "" {
- // Oneof fields don't use the traditional protobuf tag.
- p.OrigName = oneof
+ tagOneof := f.Tag.Get("protobuf_oneof")
+ if tagOneof != "" {
+ hasOneof = true
+ p.OrigName = tagOneof
}
- prop.Prop[i] = p
- prop.order[i] = i
- if debug {
- print(i, " ", f.Name, " ", t.String(), " ")
- if p.Tag > 0 {
- print(p.String())
- }
- print("\n")
+
+ // Rename unrelated struct fields with the "XXX_" prefix since so much
+ // user code simply checks for this to exclude special fields.
+ if tagField == "" && tagOneof == "" && !strings.HasPrefix(p.Name, "XXX_") {
+ p.Name = "XXX_" + p.Name
+ p.OrigName = "XXX_" + p.OrigName
+ } else if p.Weak != "" {
+ p.Name = p.OrigName // avoid possible "XXX_" prefix on weak field
}
+
+ prop.Prop = append(prop.Prop, p)
}
- // Re-order prop.order.
- sort.Sort(prop)
+ // Construct a mapping of oneof field names to properties.
+ if hasOneof {
+ var oneofWrappers []interface{}
+ if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok {
+ oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[3].Interface().([]interface{})
+ }
+ if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok {
+ oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0].Interface().([]interface{})
+ }
+ if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(protoreflect.ProtoMessage); ok {
+ if m, ok := m.ProtoReflect().(interface{ ProtoMessageInfo() *protoimpl.MessageInfo }); ok {
+ oneofWrappers = m.ProtoMessageInfo().OneofWrappers
+ }
+ }
- var oots []interface{}
- switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
- case oneofFuncsIface:
- _, _, _, oots = m.XXX_OneofFuncs()
- case oneofWrappersIface:
- oots = m.XXX_OneofWrappers()
- }
- if len(oots) > 0 {
- // Interpret oneof metadata.
prop.OneofTypes = make(map[string]*OneofProperties)
- for _, oot := range oots {
- oop := &OneofProperties{
- Type: reflect.ValueOf(oot).Type(), // *T
+ for _, wrapper := range oneofWrappers {
+ p := &OneofProperties{
+ Type: reflect.ValueOf(wrapper).Type(), // *T
Prop: new(Properties),
}
- sft := oop.Type.Elem().Field(0)
- oop.Prop.Name = sft.Name
- oop.Prop.Parse(sft.Tag.Get("protobuf"))
- // There will be exactly one interface field that
- // this new value is assignable to.
- for i := 0; i < t.NumField(); i++ {
- f := t.Field(i)
- if f.Type.Kind() != reflect.Interface {
- continue
+ f := p.Type.Elem().Field(0)
+ p.Prop.Name = f.Name
+ p.Prop.Parse(f.Tag.Get("protobuf"))
+
+ // Determine the struct field that contains this oneof.
+ // Each wrapper is assignable to exactly one parent field.
+ var foundOneof bool
+ for i := 0; i < t.NumField() && !foundOneof; i++ {
+ if p.Type.AssignableTo(t.Field(i).Type) {
+ p.Field = i
+ foundOneof = true
}
- if !oop.Type.AssignableTo(f.Type) {
- continue
- }
- oop.Field = i
- break
}
- prop.OneofTypes[oop.Prop.OrigName] = oop
- }
- }
-
- // build required counts
- // build tags
- reqCount := 0
- prop.decoderOrigNames = make(map[string]int)
- for i, p := range prop.Prop {
- if strings.HasPrefix(p.Name, "XXX_") {
- // Internal fields should not appear in tags/origNames maps.
- // They are handled specially when encoding and decoding.
- continue
- }
- if p.Required {
- reqCount++
+ if !foundOneof {
+ panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t))
+ }
+ prop.OneofTypes[p.Prop.OrigName] = p
}
- prop.decoderTags.put(p.Tag, i)
- prop.decoderOrigNames[p.OrigName] = i
}
- prop.reqCount = reqCount
return prop
}
-// A global registry of enum types.
-// The generated code will register the generated maps by calling RegisterEnum.
-
-var enumValueMaps = make(map[string]map[string]int32)
-
-// RegisterEnum is called from the generated code to install the enum descriptor
-// maps into the global table to aid parsing text format protocol buffers.
-func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
- if _, ok := enumValueMaps[typeName]; ok {
- panic("proto: duplicate enum registered: " + typeName)
- }
- enumValueMaps[typeName] = valueMap
-}
-
-// EnumValueMap returns the mapping from names to integers of the
-// enum type enumType, or a nil if not found.
-func EnumValueMap(enumType string) map[string]int32 {
- return enumValueMaps[enumType]
-}
-
-// A registry of all linked message types.
-// The string is a fully-qualified proto name ("pkg.Message").
-var (
- protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers
- protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types
- revProtoTypes = make(map[reflect.Type]string)
-)
-
-// RegisterType is called from generated code and maps from the fully qualified
-// proto name to the type (pointer to struct) of the protocol buffer.
-func RegisterType(x Message, name string) {
- if _, ok := protoTypedNils[name]; ok {
- // TODO: Some day, make this a panic.
- log.Printf("proto: duplicate proto type registered: %s", name)
- return
- }
- t := reflect.TypeOf(x)
- if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 {
- // Generated code always calls RegisterType with nil x.
- // This check is just for extra safety.
- protoTypedNils[name] = x
- } else {
- protoTypedNils[name] = reflect.Zero(t).Interface().(Message)
- }
- revProtoTypes[t] = name
-}
-
-// RegisterMapType is called from generated code and maps from the fully qualified
-// proto name to the native map type of the proto map definition.
-func RegisterMapType(x interface{}, name string) {
- if reflect.TypeOf(x).Kind() != reflect.Map {
- panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name))
- }
- if _, ok := protoMapTypes[name]; ok {
- log.Printf("proto: duplicate proto type registered: %s", name)
- return
- }
- t := reflect.TypeOf(x)
- protoMapTypes[name] = t
- revProtoTypes[t] = name
-}
-
-// MessageName returns the fully-qualified proto name for the given message type.
-func MessageName(x Message) string {
- type xname interface {
- XXX_MessageName() string
- }
- if m, ok := x.(xname); ok {
- return m.XXX_MessageName()
- }
- return revProtoTypes[reflect.TypeOf(x)]
-}
-
-// MessageType returns the message type (pointer to struct) for a named message.
-// The type is not guaranteed to implement proto.Message if the name refers to a
-// map entry.
-func MessageType(name string) reflect.Type {
- if t, ok := protoTypedNils[name]; ok {
- return reflect.TypeOf(t)
- }
- return protoMapTypes[name]
-}
-
-// A registry of all linked proto files.
-var (
- protoFiles = make(map[string][]byte) // file name => fileDescriptor
-)
-
-// RegisterFile is called from generated code and maps from the
-// full file name of a .proto file to its compressed FileDescriptorProto.
-func RegisterFile(filename string, fileDescriptor []byte) {
- protoFiles[filename] = fileDescriptor
-}
-
-// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
-func FileDescriptor(filename string) []byte { return protoFiles[filename] }
+func (sp *StructProperties) Len() int { return len(sp.Prop) }
+func (sp *StructProperties) Less(i, j int) bool { return false }
+func (sp *StructProperties) Swap(i, j int) { return }
diff --git a/vendor/github.com/golang/protobuf/proto/proto.go b/vendor/github.com/golang/protobuf/proto/proto.go
new file mode 100644
index 000000000..5aee89c32
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/proto.go
@@ -0,0 +1,167 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package proto provides functionality for handling protocol buffer messages.
+// In particular, it provides marshaling and unmarshaling between a protobuf
+// message and the binary wire format.
+//
+// See https://developers.google.com/protocol-buffers/docs/gotutorial for
+// more information.
+//
+// Deprecated: Use the "google.golang.org/protobuf/proto" package instead.
+package proto
+
+import (
+ protoV2 "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/runtime/protoiface"
+ "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ ProtoPackageIsVersion1 = true
+ ProtoPackageIsVersion2 = true
+ ProtoPackageIsVersion3 = true
+ ProtoPackageIsVersion4 = true
+)
+
+// GeneratedEnum is any enum type generated by protoc-gen-go
+// which is a named int32 kind.
+// This type exists for documentation purposes.
+type GeneratedEnum interface{}
+
+// GeneratedMessage is any message type generated by protoc-gen-go
+// which is a pointer to a named struct kind.
+// This type exists for documentation purposes.
+type GeneratedMessage interface{}
+
+// Message is a protocol buffer message.
+//
+// This is the v1 version of the message interface and is marginally better
+// than an empty interface as it lacks any method to programatically interact
+// with the contents of the message.
+//
+// A v2 message is declared in "google.golang.org/protobuf/proto".Message and
+// exposes protobuf reflection as a first-class feature of the interface.
+//
+// To convert a v1 message to a v2 message, use the MessageV2 function.
+// To convert a v2 message to a v1 message, use the MessageV1 function.
+type Message = protoiface.MessageV1
+
+// MessageV1 converts either a v1 or v2 message to a v1 message.
+// It returns nil if m is nil.
+func MessageV1(m GeneratedMessage) protoiface.MessageV1 {
+ return protoimpl.X.ProtoMessageV1Of(m)
+}
+
+// MessageV2 converts either a v1 or v2 message to a v2 message.
+// It returns nil if m is nil.
+func MessageV2(m GeneratedMessage) protoV2.Message {
+ return protoimpl.X.ProtoMessageV2Of(m)
+}
+
+// MessageReflect returns a reflective view for a message.
+// It returns nil if m is nil.
+func MessageReflect(m Message) protoreflect.Message {
+ return protoimpl.X.MessageOf(m)
+}
+
+// Marshaler is implemented by messages that can marshal themselves.
+// This interface is used by the following functions: Size, Marshal,
+// Buffer.Marshal, and Buffer.EncodeMessage.
+//
+// Deprecated: Do not implement.
+type Marshaler interface {
+ // Marshal formats the encoded bytes of the message.
+ // It should be deterministic and emit valid protobuf wire data.
+ // The caller takes ownership of the returned buffer.
+ Marshal() ([]byte, error)
+}
+
+// Unmarshaler is implemented by messages that can unmarshal themselves.
+// This interface is used by the following functions: Unmarshal, UnmarshalMerge,
+// Buffer.Unmarshal, Buffer.DecodeMessage, and Buffer.DecodeGroup.
+//
+// Deprecated: Do not implement.
+type Unmarshaler interface {
+ // Unmarshal parses the encoded bytes of the protobuf wire input.
+ // The provided buffer is only valid for during method call.
+ // It should not reset the receiver message.
+ Unmarshal([]byte) error
+}
+
+// Merger is implemented by messages that can merge themselves.
+// This interface is used by the following functions: Clone and Merge.
+//
+// Deprecated: Do not implement.
+type Merger interface {
+ // Merge merges the contents of src into the receiver message.
+ // It clones all data structures in src such that it aliases no mutable
+ // memory referenced by src.
+ Merge(src Message)
+}
+
+// RequiredNotSetError is an error type returned when
+// marshaling or unmarshaling a message with missing required fields.
+type RequiredNotSetError struct {
+ err error
+}
+
+func (e *RequiredNotSetError) Error() string {
+ if e.err != nil {
+ return e.err.Error()
+ }
+ return "proto: required field not set"
+}
+func (e *RequiredNotSetError) RequiredNotSet() bool {
+ return true
+}
+
+func checkRequiredNotSet(m protoV2.Message) error {
+ if err := protoV2.CheckInitialized(m); err != nil {
+ return &RequiredNotSetError{err: err}
+ }
+ return nil
+}
+
+// Clone returns a deep copy of src.
+func Clone(src Message) Message {
+ return MessageV1(protoV2.Clone(MessageV2(src)))
+}
+
+// Merge merges src into dst, which must be messages of the same type.
+//
+// Populated scalar fields in src are copied to dst, while populated
+// singular messages in src are merged into dst by recursively calling Merge.
+// The elements of every list field in src is appended to the corresponded
+// list fields in dst. The entries of every map field in src is copied into
+// the corresponding map field in dst, possibly replacing existing entries.
+// The unknown fields of src are appended to the unknown fields of dst.
+func Merge(dst, src Message) {
+ protoV2.Merge(MessageV2(dst), MessageV2(src))
+}
+
+// Equal reports whether two messages are equal.
+// If two messages marshal to the same bytes under deterministic serialization,
+// then Equal is guaranteed to report true.
+//
+// Two messages are equal if they are the same protobuf message type,
+// have the same set of populated known and extension field values,
+// and the same set of unknown fields values.
+//
+// Scalar values are compared with the equivalent of the == operator in Go,
+// except bytes values which are compared using bytes.Equal and
+// floating point values which specially treat NaNs as equal.
+// Message values are compared by recursively calling Equal.
+// Lists are equal if each element value is also equal.
+// Maps are equal if they have the same set of keys, where the pair of values
+// for each key is also equal.
+func Equal(x, y Message) bool {
+ return protoV2.Equal(MessageV2(x), MessageV2(y))
+}
+
+func isMessageSet(md protoreflect.MessageDescriptor) bool {
+ ms, ok := md.(interface{ IsMessageSet() bool })
+ return ok && ms.IsMessageSet()
+}
diff --git a/vendor/github.com/golang/protobuf/proto/registry.go b/vendor/github.com/golang/protobuf/proto/registry.go
new file mode 100644
index 000000000..1e7ff6420
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/registry.go
@@ -0,0 +1,323 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io/ioutil"
+ "reflect"
+ "strings"
+ "sync"
+
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+ "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+// filePath is the path to the proto source file.
+type filePath = string // e.g., "google/protobuf/descriptor.proto"
+
+// fileDescGZIP is the compressed contents of the encoded FileDescriptorProto.
+type fileDescGZIP = []byte
+
+var fileCache sync.Map // map[filePath]fileDescGZIP
+
+// RegisterFile is called from generated code to register the compressed
+// FileDescriptorProto with the file path for a proto source file.
+//
+// Deprecated: Use protoregistry.GlobalFiles.RegisterFile instead.
+func RegisterFile(s filePath, d fileDescGZIP) {
+ // Decompress the descriptor.
+ zr, err := gzip.NewReader(bytes.NewReader(d))
+ if err != nil {
+ panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err))
+ }
+ b, err := ioutil.ReadAll(zr)
+ if err != nil {
+ panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err))
+ }
+
+ // Construct a protoreflect.FileDescriptor from the raw descriptor.
+ // Note that DescBuilder.Build automatically registers the constructed
+ // file descriptor with the v2 registry.
+ protoimpl.DescBuilder{RawDescriptor: b}.Build()
+
+ // Locally cache the raw descriptor form for the file.
+ fileCache.Store(s, d)
+}
+
+// FileDescriptor returns the compressed FileDescriptorProto given the file path
+// for a proto source file. It returns nil if not found.
+//
+// Deprecated: Use protoregistry.GlobalFiles.FindFileByPath instead.
+func FileDescriptor(s filePath) fileDescGZIP {
+ if v, ok := fileCache.Load(s); ok {
+ return v.(fileDescGZIP)
+ }
+
+ // Find the descriptor in the v2 registry.
+ var b []byte
+ if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil {
+ if fd, ok := fd.(interface{ ProtoLegacyRawDesc() []byte }); ok {
+ b = fd.ProtoLegacyRawDesc()
+ } else {
+ // TODO: Use protodesc.ToFileDescriptorProto to construct
+ // a descriptorpb.FileDescriptorProto and marshal it.
+ // However, doing so causes the proto package to have a dependency
+ // on descriptorpb, leading to cyclic dependency issues.
+ }
+ }
+
+ // Locally cache the raw descriptor form for the file.
+ if len(b) > 0 {
+ v, _ := fileCache.LoadOrStore(s, protoimpl.X.CompressGZIP(b))
+ return v.(fileDescGZIP)
+ }
+ return nil
+}
+
+// enumName is the name of an enum. For historical reasons, the enum name is
+// neither the full Go name nor the full protobuf name of the enum.
+// The name is the dot-separated combination of just the proto package that the
+// enum is declared within followed by the Go type name of the generated enum.
+type enumName = string // e.g., "my.proto.package.GoMessage_GoEnum"
+
+// enumsByName maps enum values by name to their numeric counterpart.
+type enumsByName = map[string]int32
+
+// enumsByNumber maps enum values by number to their name counterpart.
+type enumsByNumber = map[int32]string
+
+var enumCache sync.Map // map[enumName]enumsByName
+var numFilesCache sync.Map // map[protoreflect.FullName]int
+
+// RegisterEnum is called from the generated code to register the mapping of
+// enum value names to enum numbers for the enum identified by s.
+//
+// Deprecated: Use protoregistry.GlobalTypes.RegisterEnum instead.
+func RegisterEnum(s enumName, _ enumsByNumber, m enumsByName) {
+ if _, ok := enumCache.Load(s); ok {
+ panic("proto: duplicate enum registered: " + s)
+ }
+ enumCache.Store(s, m)
+
+ // This does not forward registration to the v2 registry since this API
+ // lacks sufficient information to construct a complete v2 enum descriptor.
+}
+
+// EnumValueMap returns the mapping from enum value names to enum numbers for
+// the enum of the given name. It returns nil if not found.
+//
+// Deprecated: Use protoregistry.GlobalTypes.FindEnumByName instead.
+func EnumValueMap(s enumName) enumsByName {
+ if v, ok := enumCache.Load(s); ok {
+ return v.(enumsByName)
+ }
+
+ // Check whether the cache is stale. If the number of files in the current
+ // package differs, then it means that some enums may have been recently
+ // registered upstream that we do not know about.
+ var protoPkg protoreflect.FullName
+ if i := strings.LastIndexByte(s, '.'); i >= 0 {
+ protoPkg = protoreflect.FullName(s[:i])
+ }
+ v, _ := numFilesCache.Load(protoPkg)
+ numFiles, _ := v.(int)
+ if protoregistry.GlobalFiles.NumFilesByPackage(protoPkg) == numFiles {
+ return nil // cache is up-to-date; was not found earlier
+ }
+
+ // Update the enum cache for all enums declared in the given proto package.
+ numFiles = 0
+ protoregistry.GlobalFiles.RangeFilesByPackage(protoPkg, func(fd protoreflect.FileDescriptor) bool {
+ walkEnums(fd, func(ed protoreflect.EnumDescriptor) {
+ name := protoimpl.X.LegacyEnumName(ed)
+ if _, ok := enumCache.Load(name); !ok {
+ m := make(enumsByName)
+ evs := ed.Values()
+ for i := evs.Len() - 1; i >= 0; i-- {
+ ev := evs.Get(i)
+ m[string(ev.Name())] = int32(ev.Number())
+ }
+ enumCache.LoadOrStore(name, m)
+ }
+ })
+ numFiles++
+ return true
+ })
+ numFilesCache.Store(protoPkg, numFiles)
+
+ // Check cache again for enum map.
+ if v, ok := enumCache.Load(s); ok {
+ return v.(enumsByName)
+ }
+ return nil
+}
+
+// walkEnums recursively walks all enums declared in d.
+func walkEnums(d interface {
+ Enums() protoreflect.EnumDescriptors
+ Messages() protoreflect.MessageDescriptors
+}, f func(protoreflect.EnumDescriptor)) {
+ eds := d.Enums()
+ for i := eds.Len() - 1; i >= 0; i-- {
+ f(eds.Get(i))
+ }
+ mds := d.Messages()
+ for i := mds.Len() - 1; i >= 0; i-- {
+ walkEnums(mds.Get(i), f)
+ }
+}
+
+// messageName is the full name of protobuf message.
+type messageName = string
+
+var messageTypeCache sync.Map // map[messageName]reflect.Type
+
+// RegisterType is called from generated code to register the message Go type
+// for a message of the given name.
+//
+// Deprecated: Use protoregistry.GlobalTypes.RegisterMessage instead.
+func RegisterType(m Message, s messageName) {
+ mt := protoimpl.X.LegacyMessageTypeOf(m, protoreflect.FullName(s))
+ if err := protoregistry.GlobalTypes.RegisterMessage(mt); err != nil {
+ panic(err)
+ }
+ messageTypeCache.Store(s, reflect.TypeOf(m))
+}
+
+// RegisterMapType is called from generated code to register the Go map type
+// for a protobuf message representing a map entry.
+//
+// Deprecated: Do not use.
+func RegisterMapType(m interface{}, s messageName) {
+ t := reflect.TypeOf(m)
+ if t.Kind() != reflect.Map {
+ panic(fmt.Sprintf("invalid map kind: %v", t))
+ }
+ if _, ok := messageTypeCache.Load(s); ok {
+ panic(fmt.Errorf("proto: duplicate proto message registered: %s", s))
+ }
+ messageTypeCache.Store(s, t)
+}
+
+// MessageType returns the message type for a named message.
+// It returns nil if not found.
+//
+// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead.
+func MessageType(s messageName) reflect.Type {
+ if v, ok := messageTypeCache.Load(s); ok {
+ return v.(reflect.Type)
+ }
+
+ // Derive the message type from the v2 registry.
+ var t reflect.Type
+ if mt, _ := protoregistry.GlobalTypes.FindMessageByName(protoreflect.FullName(s)); mt != nil {
+ t = messageGoType(mt)
+ }
+
+ // If we could not get a concrete type, it is possible that it is a
+ // pseudo-message for a map entry.
+ if t == nil {
+ d, _ := protoregistry.GlobalFiles.FindDescriptorByName(protoreflect.FullName(s))
+ if md, _ := d.(protoreflect.MessageDescriptor); md != nil && md.IsMapEntry() {
+ kt := goTypeForField(md.Fields().ByNumber(1))
+ vt := goTypeForField(md.Fields().ByNumber(2))
+ t = reflect.MapOf(kt, vt)
+ }
+ }
+
+ // Locally cache the message type for the given name.
+ if t != nil {
+ v, _ := messageTypeCache.LoadOrStore(s, t)
+ return v.(reflect.Type)
+ }
+ return nil
+}
+
+func goTypeForField(fd protoreflect.FieldDescriptor) reflect.Type {
+ switch k := fd.Kind(); k {
+ case protoreflect.EnumKind:
+ if et, _ := protoregistry.GlobalTypes.FindEnumByName(fd.Enum().FullName()); et != nil {
+ return enumGoType(et)
+ }
+ return reflect.TypeOf(protoreflect.EnumNumber(0))
+ case protoreflect.MessageKind, protoreflect.GroupKind:
+ if mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()); mt != nil {
+ return messageGoType(mt)
+ }
+ return reflect.TypeOf((*protoreflect.Message)(nil)).Elem()
+ default:
+ return reflect.TypeOf(fd.Default().Interface())
+ }
+}
+
+func enumGoType(et protoreflect.EnumType) reflect.Type {
+ return reflect.TypeOf(et.New(0))
+}
+
+func messageGoType(mt protoreflect.MessageType) reflect.Type {
+ return reflect.TypeOf(MessageV1(mt.Zero().Interface()))
+}
+
+// MessageName returns the full protobuf name for the given message type.
+//
+// Deprecated: Use protoreflect.MessageDescriptor.FullName instead.
+func MessageName(m Message) messageName {
+ if m == nil {
+ return ""
+ }
+ if m, ok := m.(interface{ XXX_MessageName() messageName }); ok {
+ return m.XXX_MessageName()
+ }
+ return messageName(protoimpl.X.MessageDescriptorOf(m).FullName())
+}
+
+// RegisterExtension is called from the generated code to register
+// the extension descriptor.
+//
+// Deprecated: Use protoregistry.GlobalTypes.RegisterExtension instead.
+func RegisterExtension(d *ExtensionDesc) {
+ if err := protoregistry.GlobalTypes.RegisterExtension(d); err != nil {
+ panic(err)
+ }
+}
+
+type extensionsByNumber = map[int32]*ExtensionDesc
+
+var extensionCache sync.Map // map[messageName]extensionsByNumber
+
+// RegisteredExtensions returns a map of the registered extensions for the
+// provided protobuf message, indexed by the extension field number.
+//
+// Deprecated: Use protoregistry.GlobalTypes.RangeExtensionsByMessage instead.
+func RegisteredExtensions(m Message) extensionsByNumber {
+ // Check whether the cache is stale. If the number of extensions for
+ // the given message differs, then it means that some extensions were
+ // recently registered upstream that we do not know about.
+ s := MessageName(m)
+ v, _ := extensionCache.Load(s)
+ xs, _ := v.(extensionsByNumber)
+ if protoregistry.GlobalTypes.NumExtensionsByMessage(protoreflect.FullName(s)) == len(xs) {
+ return xs // cache is up-to-date
+ }
+
+ // Cache is stale, re-compute the extensions map.
+ xs = make(extensionsByNumber)
+ protoregistry.GlobalTypes.RangeExtensionsByMessage(protoreflect.FullName(s), func(xt protoreflect.ExtensionType) bool {
+ if xd, ok := xt.(*ExtensionDesc); ok {
+ xs[int32(xt.TypeDescriptor().Number())] = xd
+ } else {
+ // TODO: This implies that the protoreflect.ExtensionType is a
+ // custom type not generated by protoc-gen-go. We could try and
+ // convert the type to an ExtensionDesc.
+ }
+ return true
+ })
+ extensionCache.Store(s, xs)
+ return xs
+}
diff --git a/vendor/github.com/golang/protobuf/proto/table_marshal.go b/vendor/github.com/golang/protobuf/proto/table_marshal.go
deleted file mode 100644
index 5cb11fa95..000000000
--- a/vendor/github.com/golang/protobuf/proto/table_marshal.go
+++ /dev/null
@@ -1,2776 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2016 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-import (
- "errors"
- "fmt"
- "math"
- "reflect"
- "sort"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
- "unicode/utf8"
-)
-
-// a sizer takes a pointer to a field and the size of its tag, computes the size of
-// the encoded data.
-type sizer func(pointer, int) int
-
-// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format),
-// marshals the field to the end of the slice, returns the slice and error (if any).
-type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error)
-
-// marshalInfo is the information used for marshaling a message.
-type marshalInfo struct {
- typ reflect.Type
- fields []*marshalFieldInfo
- unrecognized field // offset of XXX_unrecognized
- extensions field // offset of XXX_InternalExtensions
- v1extensions field // offset of XXX_extensions
- sizecache field // offset of XXX_sizecache
- initialized int32 // 0 -- only typ is set, 1 -- fully initialized
- messageset bool // uses message set wire format
- hasmarshaler bool // has custom marshaler
- sync.RWMutex // protect extElems map, also for initialization
- extElems map[int32]*marshalElemInfo // info of extension elements
-}
-
-// marshalFieldInfo is the information used for marshaling a field of a message.
-type marshalFieldInfo struct {
- field field
- wiretag uint64 // tag in wire format
- tagsize int // size of tag in wire format
- sizer sizer
- marshaler marshaler
- isPointer bool
- required bool // field is required
- name string // name of the field, for error reporting
- oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements
-}
-
-// marshalElemInfo is the information used for marshaling an extension or oneof element.
-type marshalElemInfo struct {
- wiretag uint64 // tag in wire format
- tagsize int // size of tag in wire format
- sizer sizer
- marshaler marshaler
- isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only)
- deref bool // dereference the pointer before operating on it; implies isptr
-}
-
-var (
- marshalInfoMap = map[reflect.Type]*marshalInfo{}
- marshalInfoLock sync.Mutex
-)
-
-// getMarshalInfo returns the information to marshal a given type of message.
-// The info it returns may not necessarily initialized.
-// t is the type of the message (NOT the pointer to it).
-func getMarshalInfo(t reflect.Type) *marshalInfo {
- marshalInfoLock.Lock()
- u, ok := marshalInfoMap[t]
- if !ok {
- u = &marshalInfo{typ: t}
- marshalInfoMap[t] = u
- }
- marshalInfoLock.Unlock()
- return u
-}
-
-// Size is the entry point from generated code,
-// and should be ONLY called by generated code.
-// It computes the size of encoded data of msg.
-// a is a pointer to a place to store cached marshal info.
-func (a *InternalMessageInfo) Size(msg Message) int {
- u := getMessageMarshalInfo(msg, a)
- ptr := toPointer(&msg)
- if ptr.isNil() {
- // We get here if msg is a typed nil ((*SomeMessage)(nil)),
- // so it satisfies the interface, and msg == nil wouldn't
- // catch it. We don't want crash in this case.
- return 0
- }
- return u.size(ptr)
-}
-
-// Marshal is the entry point from generated code,
-// and should be ONLY called by generated code.
-// It marshals msg to the end of b.
-// a is a pointer to a place to store cached marshal info.
-func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) {
- u := getMessageMarshalInfo(msg, a)
- ptr := toPointer(&msg)
- if ptr.isNil() {
- // We get here if msg is a typed nil ((*SomeMessage)(nil)),
- // so it satisfies the interface, and msg == nil wouldn't
- // catch it. We don't want crash in this case.
- return b, ErrNil
- }
- return u.marshal(b, ptr, deterministic)
-}
-
-func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo {
- // u := a.marshal, but atomically.
- // We use an atomic here to ensure memory consistency.
- u := atomicLoadMarshalInfo(&a.marshal)
- if u == nil {
- // Get marshal information from type of message.
- t := reflect.ValueOf(msg).Type()
- if t.Kind() != reflect.Ptr {
- panic(fmt.Sprintf("cannot handle non-pointer message type %v", t))
- }
- u = getMarshalInfo(t.Elem())
- // Store it in the cache for later users.
- // a.marshal = u, but atomically.
- atomicStoreMarshalInfo(&a.marshal, u)
- }
- return u
-}
-
-// size is the main function to compute the size of the encoded data of a message.
-// ptr is the pointer to the message.
-func (u *marshalInfo) size(ptr pointer) int {
- if atomic.LoadInt32(&u.initialized) == 0 {
- u.computeMarshalInfo()
- }
-
- // If the message can marshal itself, let it do it, for compatibility.
- // NOTE: This is not efficient.
- if u.hasmarshaler {
- m := ptr.asPointerTo(u.typ).Interface().(Marshaler)
- b, _ := m.Marshal()
- return len(b)
- }
-
- n := 0
- for _, f := range u.fields {
- if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
- // nil pointer always marshals to nothing
- continue
- }
- n += f.sizer(ptr.offset(f.field), f.tagsize)
- }
- if u.extensions.IsValid() {
- e := ptr.offset(u.extensions).toExtensions()
- if u.messageset {
- n += u.sizeMessageSet(e)
- } else {
- n += u.sizeExtensions(e)
- }
- }
- if u.v1extensions.IsValid() {
- m := *ptr.offset(u.v1extensions).toOldExtensions()
- n += u.sizeV1Extensions(m)
- }
- if u.unrecognized.IsValid() {
- s := *ptr.offset(u.unrecognized).toBytes()
- n += len(s)
- }
- // cache the result for use in marshal
- if u.sizecache.IsValid() {
- atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n))
- }
- return n
-}
-
-// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated),
-// fall back to compute the size.
-func (u *marshalInfo) cachedsize(ptr pointer) int {
- if u.sizecache.IsValid() {
- return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32()))
- }
- return u.size(ptr)
-}
-
-// marshal is the main function to marshal a message. It takes a byte slice and appends
-// the encoded data to the end of the slice, returns the slice and error (if any).
-// ptr is the pointer to the message.
-// If deterministic is true, map is marshaled in deterministic order.
-func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) {
- if atomic.LoadInt32(&u.initialized) == 0 {
- u.computeMarshalInfo()
- }
-
- // If the message can marshal itself, let it do it, for compatibility.
- // NOTE: This is not efficient.
- if u.hasmarshaler {
- m := ptr.asPointerTo(u.typ).Interface().(Marshaler)
- b1, err := m.Marshal()
- b = append(b, b1...)
- return b, err
- }
-
- var err, errLater error
- // The old marshaler encodes extensions at beginning.
- if u.extensions.IsValid() {
- e := ptr.offset(u.extensions).toExtensions()
- if u.messageset {
- b, err = u.appendMessageSet(b, e, deterministic)
- } else {
- b, err = u.appendExtensions(b, e, deterministic)
- }
- if err != nil {
- return b, err
- }
- }
- if u.v1extensions.IsValid() {
- m := *ptr.offset(u.v1extensions).toOldExtensions()
- b, err = u.appendV1Extensions(b, m, deterministic)
- if err != nil {
- return b, err
- }
- }
- for _, f := range u.fields {
- if f.required {
- if ptr.offset(f.field).getPointer().isNil() {
- // Required field is not set.
- // We record the error but keep going, to give a complete marshaling.
- if errLater == nil {
- errLater = &RequiredNotSetError{f.name}
- }
- continue
- }
- }
- if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
- // nil pointer always marshals to nothing
- continue
- }
- b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic)
- if err != nil {
- if err1, ok := err.(*RequiredNotSetError); ok {
- // Required field in submessage is not set.
- // We record the error but keep going, to give a complete marshaling.
- if errLater == nil {
- errLater = &RequiredNotSetError{f.name + "." + err1.field}
- }
- continue
- }
- if err == errRepeatedHasNil {
- err = errors.New("proto: repeated field " + f.name + " has nil element")
- }
- if err == errInvalidUTF8 {
- if errLater == nil {
- fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
- errLater = &invalidUTF8Error{fullName}
- }
- continue
- }
- return b, err
- }
- }
- if u.unrecognized.IsValid() {
- s := *ptr.offset(u.unrecognized).toBytes()
- b = append(b, s...)
- }
- return b, errLater
-}
-
-// computeMarshalInfo initializes the marshal info.
-func (u *marshalInfo) computeMarshalInfo() {
- u.Lock()
- defer u.Unlock()
- if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock
- return
- }
-
- t := u.typ
- u.unrecognized = invalidField
- u.extensions = invalidField
- u.v1extensions = invalidField
- u.sizecache = invalidField
-
- // If the message can marshal itself, let it do it, for compatibility.
- // NOTE: This is not efficient.
- if reflect.PtrTo(t).Implements(marshalerType) {
- u.hasmarshaler = true
- atomic.StoreInt32(&u.initialized, 1)
- return
- }
-
- // get oneof implementers
- var oneofImplementers []interface{}
- switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
- case oneofFuncsIface:
- _, _, _, oneofImplementers = m.XXX_OneofFuncs()
- case oneofWrappersIface:
- oneofImplementers = m.XXX_OneofWrappers()
- }
-
- n := t.NumField()
-
- // deal with XXX fields first
- for i := 0; i < t.NumField(); i++ {
- f := t.Field(i)
- if !strings.HasPrefix(f.Name, "XXX_") {
- continue
- }
- switch f.Name {
- case "XXX_sizecache":
- u.sizecache = toField(&f)
- case "XXX_unrecognized":
- u.unrecognized = toField(&f)
- case "XXX_InternalExtensions":
- u.extensions = toField(&f)
- u.messageset = f.Tag.Get("protobuf_messageset") == "1"
- case "XXX_extensions":
- u.v1extensions = toField(&f)
- case "XXX_NoUnkeyedLiteral":
- // nothing to do
- default:
- panic("unknown XXX field: " + f.Name)
- }
- n--
- }
-
- // normal fields
- fields := make([]marshalFieldInfo, n) // batch allocation
- u.fields = make([]*marshalFieldInfo, 0, n)
- for i, j := 0, 0; i < t.NumField(); i++ {
- f := t.Field(i)
-
- if strings.HasPrefix(f.Name, "XXX_") {
- continue
- }
- field := &fields[j]
- j++
- field.name = f.Name
- u.fields = append(u.fields, field)
- if f.Tag.Get("protobuf_oneof") != "" {
- field.computeOneofFieldInfo(&f, oneofImplementers)
- continue
- }
- if f.Tag.Get("protobuf") == "" {
- // field has no tag (not in generated message), ignore it
- u.fields = u.fields[:len(u.fields)-1]
- j--
- continue
- }
- field.computeMarshalFieldInfo(&f)
- }
-
- // fields are marshaled in tag order on the wire.
- sort.Sort(byTag(u.fields))
-
- atomic.StoreInt32(&u.initialized, 1)
-}
-
-// helper for sorting fields by tag
-type byTag []*marshalFieldInfo
-
-func (a byTag) Len() int { return len(a) }
-func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag }
-
-// getExtElemInfo returns the information to marshal an extension element.
-// The info it returns is initialized.
-func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo {
- // get from cache first
- u.RLock()
- e, ok := u.extElems[desc.Field]
- u.RUnlock()
- if ok {
- return e
- }
-
- t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct
- tags := strings.Split(desc.Tag, ",")
- tag, err := strconv.Atoi(tags[1])
- if err != nil {
- panic("tag is not an integer")
- }
- wt := wiretype(tags[0])
- if t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct {
- t = t.Elem()
- }
- sizer, marshaler := typeMarshaler(t, tags, false, false)
- var deref bool
- if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
- t = reflect.PtrTo(t)
- deref = true
- }
- e = &marshalElemInfo{
- wiretag: uint64(tag)<<3 | wt,
- tagsize: SizeVarint(uint64(tag) << 3),
- sizer: sizer,
- marshaler: marshaler,
- isptr: t.Kind() == reflect.Ptr,
- deref: deref,
- }
-
- // update cache
- u.Lock()
- if u.extElems == nil {
- u.extElems = make(map[int32]*marshalElemInfo)
- }
- u.extElems[desc.Field] = e
- u.Unlock()
- return e
-}
-
-// computeMarshalFieldInfo fills up the information to marshal a field.
-func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) {
- // parse protobuf tag of the field.
- // tag has format of "bytes,49,opt,name=foo,def=hello!"
- tags := strings.Split(f.Tag.Get("protobuf"), ",")
- if tags[0] == "" {
- return
- }
- tag, err := strconv.Atoi(tags[1])
- if err != nil {
- panic("tag is not an integer")
- }
- wt := wiretype(tags[0])
- if tags[2] == "req" {
- fi.required = true
- }
- fi.setTag(f, tag, wt)
- fi.setMarshaler(f, tags)
-}
-
-func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) {
- fi.field = toField(f)
- fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire.
- fi.isPointer = true
- fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f)
- fi.oneofElems = make(map[reflect.Type]*marshalElemInfo)
-
- ityp := f.Type // interface type
- for _, o := range oneofImplementers {
- t := reflect.TypeOf(o)
- if !t.Implements(ityp) {
- continue
- }
- sf := t.Elem().Field(0) // oneof implementer is a struct with a single field
- tags := strings.Split(sf.Tag.Get("protobuf"), ",")
- tag, err := strconv.Atoi(tags[1])
- if err != nil {
- panic("tag is not an integer")
- }
- wt := wiretype(tags[0])
- sizer, marshaler := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value
- fi.oneofElems[t.Elem()] = &marshalElemInfo{
- wiretag: uint64(tag)<<3 | wt,
- tagsize: SizeVarint(uint64(tag) << 3),
- sizer: sizer,
- marshaler: marshaler,
- }
- }
-}
-
-// wiretype returns the wire encoding of the type.
-func wiretype(encoding string) uint64 {
- switch encoding {
- case "fixed32":
- return WireFixed32
- case "fixed64":
- return WireFixed64
- case "varint", "zigzag32", "zigzag64":
- return WireVarint
- case "bytes":
- return WireBytes
- case "group":
- return WireStartGroup
- }
- panic("unknown wire type " + encoding)
-}
-
-// setTag fills up the tag (in wire format) and its size in the info of a field.
-func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) {
- fi.field = toField(f)
- fi.wiretag = uint64(tag)<<3 | wt
- fi.tagsize = SizeVarint(uint64(tag) << 3)
-}
-
-// setMarshaler fills up the sizer and marshaler in the info of a field.
-func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) {
- switch f.Type.Kind() {
- case reflect.Map:
- // map field
- fi.isPointer = true
- fi.sizer, fi.marshaler = makeMapMarshaler(f)
- return
- case reflect.Ptr, reflect.Slice:
- fi.isPointer = true
- }
- fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false)
-}
-
-// typeMarshaler returns the sizer and marshaler of a given field.
-// t is the type of the field.
-// tags is the generated "protobuf" tag of the field.
-// If nozero is true, zero value is not marshaled to the wire.
-// If oneof is true, it is a oneof field.
-func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) {
- encoding := tags[0]
-
- pointer := false
- slice := false
- if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
- slice = true
- t = t.Elem()
- }
- if t.Kind() == reflect.Ptr {
- pointer = true
- t = t.Elem()
- }
-
- packed := false
- proto3 := false
- validateUTF8 := true
- for i := 2; i < len(tags); i++ {
- if tags[i] == "packed" {
- packed = true
- }
- if tags[i] == "proto3" {
- proto3 = true
- }
- }
- validateUTF8 = validateUTF8 && proto3
-
- switch t.Kind() {
- case reflect.Bool:
- if pointer {
- return sizeBoolPtr, appendBoolPtr
- }
- if slice {
- if packed {
- return sizeBoolPackedSlice, appendBoolPackedSlice
- }
- return sizeBoolSlice, appendBoolSlice
- }
- if nozero {
- return sizeBoolValueNoZero, appendBoolValueNoZero
- }
- return sizeBoolValue, appendBoolValue
- case reflect.Uint32:
- switch encoding {
- case "fixed32":
- if pointer {
- return sizeFixed32Ptr, appendFixed32Ptr
- }
- if slice {
- if packed {
- return sizeFixed32PackedSlice, appendFixed32PackedSlice
- }
- return sizeFixed32Slice, appendFixed32Slice
- }
- if nozero {
- return sizeFixed32ValueNoZero, appendFixed32ValueNoZero
- }
- return sizeFixed32Value, appendFixed32Value
- case "varint":
- if pointer {
- return sizeVarint32Ptr, appendVarint32Ptr
- }
- if slice {
- if packed {
- return sizeVarint32PackedSlice, appendVarint32PackedSlice
- }
- return sizeVarint32Slice, appendVarint32Slice
- }
- if nozero {
- return sizeVarint32ValueNoZero, appendVarint32ValueNoZero
- }
- return sizeVarint32Value, appendVarint32Value
- }
- case reflect.Int32:
- switch encoding {
- case "fixed32":
- if pointer {
- return sizeFixedS32Ptr, appendFixedS32Ptr
- }
- if slice {
- if packed {
- return sizeFixedS32PackedSlice, appendFixedS32PackedSlice
- }
- return sizeFixedS32Slice, appendFixedS32Slice
- }
- if nozero {
- return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero
- }
- return sizeFixedS32Value, appendFixedS32Value
- case "varint":
- if pointer {
- return sizeVarintS32Ptr, appendVarintS32Ptr
- }
- if slice {
- if packed {
- return sizeVarintS32PackedSlice, appendVarintS32PackedSlice
- }
- return sizeVarintS32Slice, appendVarintS32Slice
- }
- if nozero {
- return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero
- }
- return sizeVarintS32Value, appendVarintS32Value
- case "zigzag32":
- if pointer {
- return sizeZigzag32Ptr, appendZigzag32Ptr
- }
- if slice {
- if packed {
- return sizeZigzag32PackedSlice, appendZigzag32PackedSlice
- }
- return sizeZigzag32Slice, appendZigzag32Slice
- }
- if nozero {
- return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero
- }
- return sizeZigzag32Value, appendZigzag32Value
- }
- case reflect.Uint64:
- switch encoding {
- case "fixed64":
- if pointer {
- return sizeFixed64Ptr, appendFixed64Ptr
- }
- if slice {
- if packed {
- return sizeFixed64PackedSlice, appendFixed64PackedSlice
- }
- return sizeFixed64Slice, appendFixed64Slice
- }
- if nozero {
- return sizeFixed64ValueNoZero, appendFixed64ValueNoZero
- }
- return sizeFixed64Value, appendFixed64Value
- case "varint":
- if pointer {
- return sizeVarint64Ptr, appendVarint64Ptr
- }
- if slice {
- if packed {
- return sizeVarint64PackedSlice, appendVarint64PackedSlice
- }
- return sizeVarint64Slice, appendVarint64Slice
- }
- if nozero {
- return sizeVarint64ValueNoZero, appendVarint64ValueNoZero
- }
- return sizeVarint64Value, appendVarint64Value
- }
- case reflect.Int64:
- switch encoding {
- case "fixed64":
- if pointer {
- return sizeFixedS64Ptr, appendFixedS64Ptr
- }
- if slice {
- if packed {
- return sizeFixedS64PackedSlice, appendFixedS64PackedSlice
- }
- return sizeFixedS64Slice, appendFixedS64Slice
- }
- if nozero {
- return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero
- }
- return sizeFixedS64Value, appendFixedS64Value
- case "varint":
- if pointer {
- return sizeVarintS64Ptr, appendVarintS64Ptr
- }
- if slice {
- if packed {
- return sizeVarintS64PackedSlice, appendVarintS64PackedSlice
- }
- return sizeVarintS64Slice, appendVarintS64Slice
- }
- if nozero {
- return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero
- }
- return sizeVarintS64Value, appendVarintS64Value
- case "zigzag64":
- if pointer {
- return sizeZigzag64Ptr, appendZigzag64Ptr
- }
- if slice {
- if packed {
- return sizeZigzag64PackedSlice, appendZigzag64PackedSlice
- }
- return sizeZigzag64Slice, appendZigzag64Slice
- }
- if nozero {
- return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero
- }
- return sizeZigzag64Value, appendZigzag64Value
- }
- case reflect.Float32:
- if pointer {
- return sizeFloat32Ptr, appendFloat32Ptr
- }
- if slice {
- if packed {
- return sizeFloat32PackedSlice, appendFloat32PackedSlice
- }
- return sizeFloat32Slice, appendFloat32Slice
- }
- if nozero {
- return sizeFloat32ValueNoZero, appendFloat32ValueNoZero
- }
- return sizeFloat32Value, appendFloat32Value
- case reflect.Float64:
- if pointer {
- return sizeFloat64Ptr, appendFloat64Ptr
- }
- if slice {
- if packed {
- return sizeFloat64PackedSlice, appendFloat64PackedSlice
- }
- return sizeFloat64Slice, appendFloat64Slice
- }
- if nozero {
- return sizeFloat64ValueNoZero, appendFloat64ValueNoZero
- }
- return sizeFloat64Value, appendFloat64Value
- case reflect.String:
- if validateUTF8 {
- if pointer {
- return sizeStringPtr, appendUTF8StringPtr
- }
- if slice {
- return sizeStringSlice, appendUTF8StringSlice
- }
- if nozero {
- return sizeStringValueNoZero, appendUTF8StringValueNoZero
- }
- return sizeStringValue, appendUTF8StringValue
- }
- if pointer {
- return sizeStringPtr, appendStringPtr
- }
- if slice {
- return sizeStringSlice, appendStringSlice
- }
- if nozero {
- return sizeStringValueNoZero, appendStringValueNoZero
- }
- return sizeStringValue, appendStringValue
- case reflect.Slice:
- if slice {
- return sizeBytesSlice, appendBytesSlice
- }
- if oneof {
- // Oneof bytes field may also have "proto3" tag.
- // We want to marshal it as a oneof field. Do this
- // check before the proto3 check.
- return sizeBytesOneof, appendBytesOneof
- }
- if proto3 {
- return sizeBytes3, appendBytes3
- }
- return sizeBytes, appendBytes
- case reflect.Struct:
- switch encoding {
- case "group":
- if slice {
- return makeGroupSliceMarshaler(getMarshalInfo(t))
- }
- return makeGroupMarshaler(getMarshalInfo(t))
- case "bytes":
- if slice {
- return makeMessageSliceMarshaler(getMarshalInfo(t))
- }
- return makeMessageMarshaler(getMarshalInfo(t))
- }
- }
- panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding))
-}
-
-// Below are functions to size/marshal a specific type of a field.
-// They are stored in the field's info, and called by function pointers.
-// They have type sizer or marshaler.
-
-func sizeFixed32Value(_ pointer, tagsize int) int {
- return 4 + tagsize
-}
-func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toUint32()
- if v == 0 {
- return 0
- }
- return 4 + tagsize
-}
-func sizeFixed32Ptr(ptr pointer, tagsize int) int {
- p := *ptr.toUint32Ptr()
- if p == nil {
- return 0
- }
- return 4 + tagsize
-}
-func sizeFixed32Slice(ptr pointer, tagsize int) int {
- s := *ptr.toUint32Slice()
- return (4 + tagsize) * len(s)
-}
-func sizeFixed32PackedSlice(ptr pointer, tagsize int) int {
- s := *ptr.toUint32Slice()
- if len(s) == 0 {
- return 0
- }
- return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
-}
-func sizeFixedS32Value(_ pointer, tagsize int) int {
- return 4 + tagsize
-}
-func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toInt32()
- if v == 0 {
- return 0
- }
- return 4 + tagsize
-}
-func sizeFixedS32Ptr(ptr pointer, tagsize int) int {
- p := ptr.getInt32Ptr()
- if p == nil {
- return 0
- }
- return 4 + tagsize
-}
-func sizeFixedS32Slice(ptr pointer, tagsize int) int {
- s := ptr.getInt32Slice()
- return (4 + tagsize) * len(s)
-}
-func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int {
- s := ptr.getInt32Slice()
- if len(s) == 0 {
- return 0
- }
- return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
-}
-func sizeFloat32Value(_ pointer, tagsize int) int {
- return 4 + tagsize
-}
-func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int {
- v := math.Float32bits(*ptr.toFloat32())
- if v == 0 {
- return 0
- }
- return 4 + tagsize
-}
-func sizeFloat32Ptr(ptr pointer, tagsize int) int {
- p := *ptr.toFloat32Ptr()
- if p == nil {
- return 0
- }
- return 4 + tagsize
-}
-func sizeFloat32Slice(ptr pointer, tagsize int) int {
- s := *ptr.toFloat32Slice()
- return (4 + tagsize) * len(s)
-}
-func sizeFloat32PackedSlice(ptr pointer, tagsize int) int {
- s := *ptr.toFloat32Slice()
- if len(s) == 0 {
- return 0
- }
- return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
-}
-func sizeFixed64Value(_ pointer, tagsize int) int {
- return 8 + tagsize
-}
-func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toUint64()
- if v == 0 {
- return 0
- }
- return 8 + tagsize
-}
-func sizeFixed64Ptr(ptr pointer, tagsize int) int {
- p := *ptr.toUint64Ptr()
- if p == nil {
- return 0
- }
- return 8 + tagsize
-}
-func sizeFixed64Slice(ptr pointer, tagsize int) int {
- s := *ptr.toUint64Slice()
- return (8 + tagsize) * len(s)
-}
-func sizeFixed64PackedSlice(ptr pointer, tagsize int) int {
- s := *ptr.toUint64Slice()
- if len(s) == 0 {
- return 0
- }
- return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
-}
-func sizeFixedS64Value(_ pointer, tagsize int) int {
- return 8 + tagsize
-}
-func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toInt64()
- if v == 0 {
- return 0
- }
- return 8 + tagsize
-}
-func sizeFixedS64Ptr(ptr pointer, tagsize int) int {
- p := *ptr.toInt64Ptr()
- if p == nil {
- return 0
- }
- return 8 + tagsize
-}
-func sizeFixedS64Slice(ptr pointer, tagsize int) int {
- s := *ptr.toInt64Slice()
- return (8 + tagsize) * len(s)
-}
-func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int {
- s := *ptr.toInt64Slice()
- if len(s) == 0 {
- return 0
- }
- return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
-}
-func sizeFloat64Value(_ pointer, tagsize int) int {
- return 8 + tagsize
-}
-func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int {
- v := math.Float64bits(*ptr.toFloat64())
- if v == 0 {
- return 0
- }
- return 8 + tagsize
-}
-func sizeFloat64Ptr(ptr pointer, tagsize int) int {
- p := *ptr.toFloat64Ptr()
- if p == nil {
- return 0
- }
- return 8 + tagsize
-}
-func sizeFloat64Slice(ptr pointer, tagsize int) int {
- s := *ptr.toFloat64Slice()
- return (8 + tagsize) * len(s)
-}
-func sizeFloat64PackedSlice(ptr pointer, tagsize int) int {
- s := *ptr.toFloat64Slice()
- if len(s) == 0 {
- return 0
- }
- return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
-}
-func sizeVarint32Value(ptr pointer, tagsize int) int {
- v := *ptr.toUint32()
- return SizeVarint(uint64(v)) + tagsize
-}
-func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toUint32()
- if v == 0 {
- return 0
- }
- return SizeVarint(uint64(v)) + tagsize
-}
-func sizeVarint32Ptr(ptr pointer, tagsize int) int {
- p := *ptr.toUint32Ptr()
- if p == nil {
- return 0
- }
- return SizeVarint(uint64(*p)) + tagsize
-}
-func sizeVarint32Slice(ptr pointer, tagsize int) int {
- s := *ptr.toUint32Slice()
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v)) + tagsize
- }
- return n
-}
-func sizeVarint32PackedSlice(ptr pointer, tagsize int) int {
- s := *ptr.toUint32Slice()
- if len(s) == 0 {
- return 0
- }
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v))
- }
- return n + SizeVarint(uint64(n)) + tagsize
-}
-func sizeVarintS32Value(ptr pointer, tagsize int) int {
- v := *ptr.toInt32()
- return SizeVarint(uint64(v)) + tagsize
-}
-func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toInt32()
- if v == 0 {
- return 0
- }
- return SizeVarint(uint64(v)) + tagsize
-}
-func sizeVarintS32Ptr(ptr pointer, tagsize int) int {
- p := ptr.getInt32Ptr()
- if p == nil {
- return 0
- }
- return SizeVarint(uint64(*p)) + tagsize
-}
-func sizeVarintS32Slice(ptr pointer, tagsize int) int {
- s := ptr.getInt32Slice()
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v)) + tagsize
- }
- return n
-}
-func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int {
- s := ptr.getInt32Slice()
- if len(s) == 0 {
- return 0
- }
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v))
- }
- return n + SizeVarint(uint64(n)) + tagsize
-}
-func sizeVarint64Value(ptr pointer, tagsize int) int {
- v := *ptr.toUint64()
- return SizeVarint(v) + tagsize
-}
-func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toUint64()
- if v == 0 {
- return 0
- }
- return SizeVarint(v) + tagsize
-}
-func sizeVarint64Ptr(ptr pointer, tagsize int) int {
- p := *ptr.toUint64Ptr()
- if p == nil {
- return 0
- }
- return SizeVarint(*p) + tagsize
-}
-func sizeVarint64Slice(ptr pointer, tagsize int) int {
- s := *ptr.toUint64Slice()
- n := 0
- for _, v := range s {
- n += SizeVarint(v) + tagsize
- }
- return n
-}
-func sizeVarint64PackedSlice(ptr pointer, tagsize int) int {
- s := *ptr.toUint64Slice()
- if len(s) == 0 {
- return 0
- }
- n := 0
- for _, v := range s {
- n += SizeVarint(v)
- }
- return n + SizeVarint(uint64(n)) + tagsize
-}
-func sizeVarintS64Value(ptr pointer, tagsize int) int {
- v := *ptr.toInt64()
- return SizeVarint(uint64(v)) + tagsize
-}
-func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toInt64()
- if v == 0 {
- return 0
- }
- return SizeVarint(uint64(v)) + tagsize
-}
-func sizeVarintS64Ptr(ptr pointer, tagsize int) int {
- p := *ptr.toInt64Ptr()
- if p == nil {
- return 0
- }
- return SizeVarint(uint64(*p)) + tagsize
-}
-func sizeVarintS64Slice(ptr pointer, tagsize int) int {
- s := *ptr.toInt64Slice()
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v)) + tagsize
- }
- return n
-}
-func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int {
- s := *ptr.toInt64Slice()
- if len(s) == 0 {
- return 0
- }
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v))
- }
- return n + SizeVarint(uint64(n)) + tagsize
-}
-func sizeZigzag32Value(ptr pointer, tagsize int) int {
- v := *ptr.toInt32()
- return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
-}
-func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toInt32()
- if v == 0 {
- return 0
- }
- return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
-}
-func sizeZigzag32Ptr(ptr pointer, tagsize int) int {
- p := ptr.getInt32Ptr()
- if p == nil {
- return 0
- }
- v := *p
- return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
-}
-func sizeZigzag32Slice(ptr pointer, tagsize int) int {
- s := ptr.getInt32Slice()
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
- }
- return n
-}
-func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int {
- s := ptr.getInt32Slice()
- if len(s) == 0 {
- return 0
- }
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
- }
- return n + SizeVarint(uint64(n)) + tagsize
-}
-func sizeZigzag64Value(ptr pointer, tagsize int) int {
- v := *ptr.toInt64()
- return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
-}
-func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toInt64()
- if v == 0 {
- return 0
- }
- return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
-}
-func sizeZigzag64Ptr(ptr pointer, tagsize int) int {
- p := *ptr.toInt64Ptr()
- if p == nil {
- return 0
- }
- v := *p
- return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
-}
-func sizeZigzag64Slice(ptr pointer, tagsize int) int {
- s := *ptr.toInt64Slice()
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
- }
- return n
-}
-func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int {
- s := *ptr.toInt64Slice()
- if len(s) == 0 {
- return 0
- }
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63)))
- }
- return n + SizeVarint(uint64(n)) + tagsize
-}
-func sizeBoolValue(_ pointer, tagsize int) int {
- return 1 + tagsize
-}
-func sizeBoolValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toBool()
- if !v {
- return 0
- }
- return 1 + tagsize
-}
-func sizeBoolPtr(ptr pointer, tagsize int) int {
- p := *ptr.toBoolPtr()
- if p == nil {
- return 0
- }
- return 1 + tagsize
-}
-func sizeBoolSlice(ptr pointer, tagsize int) int {
- s := *ptr.toBoolSlice()
- return (1 + tagsize) * len(s)
-}
-func sizeBoolPackedSlice(ptr pointer, tagsize int) int {
- s := *ptr.toBoolSlice()
- if len(s) == 0 {
- return 0
- }
- return len(s) + SizeVarint(uint64(len(s))) + tagsize
-}
-func sizeStringValue(ptr pointer, tagsize int) int {
- v := *ptr.toString()
- return len(v) + SizeVarint(uint64(len(v))) + tagsize
-}
-func sizeStringValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toString()
- if v == "" {
- return 0
- }
- return len(v) + SizeVarint(uint64(len(v))) + tagsize
-}
-func sizeStringPtr(ptr pointer, tagsize int) int {
- p := *ptr.toStringPtr()
- if p == nil {
- return 0
- }
- v := *p
- return len(v) + SizeVarint(uint64(len(v))) + tagsize
-}
-func sizeStringSlice(ptr pointer, tagsize int) int {
- s := *ptr.toStringSlice()
- n := 0
- for _, v := range s {
- n += len(v) + SizeVarint(uint64(len(v))) + tagsize
- }
- return n
-}
-func sizeBytes(ptr pointer, tagsize int) int {
- v := *ptr.toBytes()
- if v == nil {
- return 0
- }
- return len(v) + SizeVarint(uint64(len(v))) + tagsize
-}
-func sizeBytes3(ptr pointer, tagsize int) int {
- v := *ptr.toBytes()
- if len(v) == 0 {
- return 0
- }
- return len(v) + SizeVarint(uint64(len(v))) + tagsize
-}
-func sizeBytesOneof(ptr pointer, tagsize int) int {
- v := *ptr.toBytes()
- return len(v) + SizeVarint(uint64(len(v))) + tagsize
-}
-func sizeBytesSlice(ptr pointer, tagsize int) int {
- s := *ptr.toBytesSlice()
- n := 0
- for _, v := range s {
- n += len(v) + SizeVarint(uint64(len(v))) + tagsize
- }
- return n
-}
-
-// appendFixed32 appends an encoded fixed32 to b.
-func appendFixed32(b []byte, v uint32) []byte {
- b = append(b,
- byte(v),
- byte(v>>8),
- byte(v>>16),
- byte(v>>24))
- return b
-}
-
-// appendFixed64 appends an encoded fixed64 to b.
-func appendFixed64(b []byte, v uint64) []byte {
- b = append(b,
- byte(v),
- byte(v>>8),
- byte(v>>16),
- byte(v>>24),
- byte(v>>32),
- byte(v>>40),
- byte(v>>48),
- byte(v>>56))
- return b
-}
-
-// appendVarint appends an encoded varint to b.
-func appendVarint(b []byte, v uint64) []byte {
- // TODO: make 1-byte (maybe 2-byte) case inline-able, once we
- // have non-leaf inliner.
- switch {
- case v < 1<<7:
- b = append(b, byte(v))
- case v < 1<<14:
- b = append(b,
- byte(v&0x7f|0x80),
- byte(v>>7))
- case v < 1<<21:
- b = append(b,
- byte(v&0x7f|0x80),
- byte((v>>7)&0x7f|0x80),
- byte(v>>14))
- case v < 1<<28:
- b = append(b,
- byte(v&0x7f|0x80),
- byte((v>>7)&0x7f|0x80),
- byte((v>>14)&0x7f|0x80),
- byte(v>>21))
- case v < 1<<35:
- b = append(b,
- byte(v&0x7f|0x80),
- byte((v>>7)&0x7f|0x80),
- byte((v>>14)&0x7f|0x80),
- byte((v>>21)&0x7f|0x80),
- byte(v>>28))
- case v < 1<<42:
- b = append(b,
- byte(v&0x7f|0x80),
- byte((v>>7)&0x7f|0x80),
- byte((v>>14)&0x7f|0x80),
- byte((v>>21)&0x7f|0x80),
- byte((v>>28)&0x7f|0x80),
- byte(v>>35))
- case v < 1<<49:
- b = append(b,
- byte(v&0x7f|0x80),
- byte((v>>7)&0x7f|0x80),
- byte((v>>14)&0x7f|0x80),
- byte((v>>21)&0x7f|0x80),
- byte((v>>28)&0x7f|0x80),
- byte((v>>35)&0x7f|0x80),
- byte(v>>42))
- case v < 1<<56:
- b = append(b,
- byte(v&0x7f|0x80),
- byte((v>>7)&0x7f|0x80),
- byte((v>>14)&0x7f|0x80),
- byte((v>>21)&0x7f|0x80),
- byte((v>>28)&0x7f|0x80),
- byte((v>>35)&0x7f|0x80),
- byte((v>>42)&0x7f|0x80),
- byte(v>>49))
- case v < 1<<63:
- b = append(b,
- byte(v&0x7f|0x80),
- byte((v>>7)&0x7f|0x80),
- byte((v>>14)&0x7f|0x80),
- byte((v>>21)&0x7f|0x80),
- byte((v>>28)&0x7f|0x80),
- byte((v>>35)&0x7f|0x80),
- byte((v>>42)&0x7f|0x80),
- byte((v>>49)&0x7f|0x80),
- byte(v>>56))
- default:
- b = append(b,
- byte(v&0x7f|0x80),
- byte((v>>7)&0x7f|0x80),
- byte((v>>14)&0x7f|0x80),
- byte((v>>21)&0x7f|0x80),
- byte((v>>28)&0x7f|0x80),
- byte((v>>35)&0x7f|0x80),
- byte((v>>42)&0x7f|0x80),
- byte((v>>49)&0x7f|0x80),
- byte((v>>56)&0x7f|0x80),
- 1)
- }
- return b
-}
-
-func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toUint32()
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, v)
- return b, nil
-}
-func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toUint32()
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, v)
- return b, nil
-}
-func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := *ptr.toUint32Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, *p)
- return b, nil
-}
-func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toUint32Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, v)
- }
- return b, nil
-}
-func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toUint32Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- b = appendVarint(b, uint64(4*len(s)))
- for _, v := range s {
- b = appendFixed32(b, v)
- }
- return b, nil
-}
-func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt32()
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, uint32(v))
- return b, nil
-}
-func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt32()
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, uint32(v))
- return b, nil
-}
-func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := ptr.getInt32Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, uint32(*p))
- return b, nil
-}
-func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := ptr.getInt32Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, uint32(v))
- }
- return b, nil
-}
-func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := ptr.getInt32Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- b = appendVarint(b, uint64(4*len(s)))
- for _, v := range s {
- b = appendFixed32(b, uint32(v))
- }
- return b, nil
-}
-func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := math.Float32bits(*ptr.toFloat32())
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, v)
- return b, nil
-}
-func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := math.Float32bits(*ptr.toFloat32())
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, v)
- return b, nil
-}
-func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := *ptr.toFloat32Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, math.Float32bits(*p))
- return b, nil
-}
-func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toFloat32Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, math.Float32bits(v))
- }
- return b, nil
-}
-func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toFloat32Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- b = appendVarint(b, uint64(4*len(s)))
- for _, v := range s {
- b = appendFixed32(b, math.Float32bits(v))
- }
- return b, nil
-}
-func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toUint64()
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, v)
- return b, nil
-}
-func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toUint64()
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, v)
- return b, nil
-}
-func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := *ptr.toUint64Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, *p)
- return b, nil
-}
-func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toUint64Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, v)
- }
- return b, nil
-}
-func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toUint64Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- b = appendVarint(b, uint64(8*len(s)))
- for _, v := range s {
- b = appendFixed64(b, v)
- }
- return b, nil
-}
-func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt64()
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, uint64(v))
- return b, nil
-}
-func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt64()
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, uint64(v))
- return b, nil
-}
-func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := *ptr.toInt64Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, uint64(*p))
- return b, nil
-}
-func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toInt64Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, uint64(v))
- }
- return b, nil
-}
-func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toInt64Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- b = appendVarint(b, uint64(8*len(s)))
- for _, v := range s {
- b = appendFixed64(b, uint64(v))
- }
- return b, nil
-}
-func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := math.Float64bits(*ptr.toFloat64())
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, v)
- return b, nil
-}
-func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := math.Float64bits(*ptr.toFloat64())
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, v)
- return b, nil
-}
-func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := *ptr.toFloat64Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, math.Float64bits(*p))
- return b, nil
-}
-func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toFloat64Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, math.Float64bits(v))
- }
- return b, nil
-}
-func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toFloat64Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- b = appendVarint(b, uint64(8*len(s)))
- for _, v := range s {
- b = appendFixed64(b, math.Float64bits(v))
- }
- return b, nil
-}
-func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toUint32()
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v))
- return b, nil
-}
-func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toUint32()
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v))
- return b, nil
-}
-func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := *ptr.toUint32Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(*p))
- return b, nil
-}
-func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toUint32Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v))
- }
- return b, nil
-}
-func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toUint32Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- // compute size
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v))
- }
- b = appendVarint(b, uint64(n))
- for _, v := range s {
- b = appendVarint(b, uint64(v))
- }
- return b, nil
-}
-func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt32()
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v))
- return b, nil
-}
-func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt32()
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v))
- return b, nil
-}
-func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := ptr.getInt32Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(*p))
- return b, nil
-}
-func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := ptr.getInt32Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v))
- }
- return b, nil
-}
-func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := ptr.getInt32Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- // compute size
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v))
- }
- b = appendVarint(b, uint64(n))
- for _, v := range s {
- b = appendVarint(b, uint64(v))
- }
- return b, nil
-}
-func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toUint64()
- b = appendVarint(b, wiretag)
- b = appendVarint(b, v)
- return b, nil
-}
-func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toUint64()
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, v)
- return b, nil
-}
-func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := *ptr.toUint64Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, *p)
- return b, nil
-}
-func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toUint64Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendVarint(b, v)
- }
- return b, nil
-}
-func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toUint64Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- // compute size
- n := 0
- for _, v := range s {
- n += SizeVarint(v)
- }
- b = appendVarint(b, uint64(n))
- for _, v := range s {
- b = appendVarint(b, v)
- }
- return b, nil
-}
-func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt64()
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v))
- return b, nil
-}
-func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt64()
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v))
- return b, nil
-}
-func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := *ptr.toInt64Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(*p))
- return b, nil
-}
-func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toInt64Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v))
- }
- return b, nil
-}
-func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toInt64Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- // compute size
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v))
- }
- b = appendVarint(b, uint64(n))
- for _, v := range s {
- b = appendVarint(b, uint64(v))
- }
- return b, nil
-}
-func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt32()
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
- return b, nil
-}
-func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt32()
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
- return b, nil
-}
-func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := ptr.getInt32Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- v := *p
- b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
- return b, nil
-}
-func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := ptr.getInt32Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
- }
- return b, nil
-}
-func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := ptr.getInt32Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- // compute size
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
- }
- b = appendVarint(b, uint64(n))
- for _, v := range s {
- b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
- }
- return b, nil
-}
-func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt64()
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
- return b, nil
-}
-func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt64()
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
- return b, nil
-}
-func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := *ptr.toInt64Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- v := *p
- b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
- return b, nil
-}
-func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toInt64Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
- }
- return b, nil
-}
-func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toInt64Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- // compute size
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63)))
- }
- b = appendVarint(b, uint64(n))
- for _, v := range s {
- b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
- }
- return b, nil
-}
-func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toBool()
- b = appendVarint(b, wiretag)
- if v {
- b = append(b, 1)
- } else {
- b = append(b, 0)
- }
- return b, nil
-}
-func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toBool()
- if !v {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = append(b, 1)
- return b, nil
-}
-
-func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := *ptr.toBoolPtr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- if *p {
- b = append(b, 1)
- } else {
- b = append(b, 0)
- }
- return b, nil
-}
-func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toBoolSlice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- if v {
- b = append(b, 1)
- } else {
- b = append(b, 0)
- }
- }
- return b, nil
-}
-func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toBoolSlice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- b = appendVarint(b, uint64(len(s)))
- for _, v := range s {
- if v {
- b = append(b, 1)
- } else {
- b = append(b, 0)
- }
- }
- return b, nil
-}
-func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toString()
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- return b, nil
-}
-func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toString()
- if v == "" {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- return b, nil
-}
-func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := *ptr.toStringPtr()
- if p == nil {
- return b, nil
- }
- v := *p
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- return b, nil
-}
-func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toStringSlice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- }
- return b, nil
-}
-func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- var invalidUTF8 bool
- v := *ptr.toString()
- if !utf8.ValidString(v) {
- invalidUTF8 = true
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- if invalidUTF8 {
- return b, errInvalidUTF8
- }
- return b, nil
-}
-func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- var invalidUTF8 bool
- v := *ptr.toString()
- if v == "" {
- return b, nil
- }
- if !utf8.ValidString(v) {
- invalidUTF8 = true
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- if invalidUTF8 {
- return b, errInvalidUTF8
- }
- return b, nil
-}
-func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- var invalidUTF8 bool
- p := *ptr.toStringPtr()
- if p == nil {
- return b, nil
- }
- v := *p
- if !utf8.ValidString(v) {
- invalidUTF8 = true
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- if invalidUTF8 {
- return b, errInvalidUTF8
- }
- return b, nil
-}
-func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- var invalidUTF8 bool
- s := *ptr.toStringSlice()
- for _, v := range s {
- if !utf8.ValidString(v) {
- invalidUTF8 = true
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- }
- if invalidUTF8 {
- return b, errInvalidUTF8
- }
- return b, nil
-}
-func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toBytes()
- if v == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- return b, nil
-}
-func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toBytes()
- if len(v) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- return b, nil
-}
-func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toBytes()
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- return b, nil
-}
-func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toBytesSlice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- }
- return b, nil
-}
-
-// makeGroupMarshaler returns the sizer and marshaler for a group.
-// u is the marshal info of the underlying message.
-func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- p := ptr.getPointer()
- if p.isNil() {
- return 0
- }
- return u.size(p) + 2*tagsize
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- p := ptr.getPointer()
- if p.isNil() {
- return b, nil
- }
- var err error
- b = appendVarint(b, wiretag) // start group
- b, err = u.marshal(b, p, deterministic)
- b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
- return b, err
- }
-}
-
-// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice.
-// u is the marshal info of the underlying message.
-func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- s := ptr.getPointerSlice()
- n := 0
- for _, v := range s {
- if v.isNil() {
- continue
- }
- n += u.size(v) + 2*tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- s := ptr.getPointerSlice()
- var err error
- var nerr nonFatal
- for _, v := range s {
- if v.isNil() {
- return b, errRepeatedHasNil
- }
- b = appendVarint(b, wiretag) // start group
- b, err = u.marshal(b, v, deterministic)
- b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
- if !nerr.Merge(err) {
- if err == ErrNil {
- err = errRepeatedHasNil
- }
- return b, err
- }
- }
- return b, nerr.E
- }
-}
-
-// makeMessageMarshaler returns the sizer and marshaler for a message field.
-// u is the marshal info of the message.
-func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- p := ptr.getPointer()
- if p.isNil() {
- return 0
- }
- siz := u.size(p)
- return siz + SizeVarint(uint64(siz)) + tagsize
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- p := ptr.getPointer()
- if p.isNil() {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- siz := u.cachedsize(p)
- b = appendVarint(b, uint64(siz))
- return u.marshal(b, p, deterministic)
- }
-}
-
-// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice.
-// u is the marshal info of the message.
-func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- s := ptr.getPointerSlice()
- n := 0
- for _, v := range s {
- if v.isNil() {
- continue
- }
- siz := u.size(v)
- n += siz + SizeVarint(uint64(siz)) + tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- s := ptr.getPointerSlice()
- var err error
- var nerr nonFatal
- for _, v := range s {
- if v.isNil() {
- return b, errRepeatedHasNil
- }
- b = appendVarint(b, wiretag)
- siz := u.cachedsize(v)
- b = appendVarint(b, uint64(siz))
- b, err = u.marshal(b, v, deterministic)
-
- if !nerr.Merge(err) {
- if err == ErrNil {
- err = errRepeatedHasNil
- }
- return b, err
- }
- }
- return b, nerr.E
- }
-}
-
-// makeMapMarshaler returns the sizer and marshaler for a map field.
-// f is the pointer to the reflect data structure of the field.
-func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) {
- // figure out key and value type
- t := f.Type
- keyType := t.Key()
- valType := t.Elem()
- keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",")
- valTags := strings.Split(f.Tag.Get("protobuf_val"), ",")
- keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map
- valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map
- keyWireTag := 1<<3 | wiretype(keyTags[0])
- valWireTag := 2<<3 | wiretype(valTags[0])
-
- // We create an interface to get the addresses of the map key and value.
- // If value is pointer-typed, the interface is a direct interface, the
- // idata itself is the value. Otherwise, the idata is the pointer to the
- // value.
- // Key cannot be pointer-typed.
- valIsPtr := valType.Kind() == reflect.Ptr
-
- // If value is a message with nested maps, calling
- // valSizer in marshal may be quadratic. We should use
- // cached version in marshal (but not in size).
- // If value is not message type, we don't have size cache,
- // but it cannot be nested either. Just use valSizer.
- valCachedSizer := valSizer
- if valIsPtr && valType.Elem().Kind() == reflect.Struct {
- u := getMarshalInfo(valType.Elem())
- valCachedSizer = func(ptr pointer, tagsize int) int {
- // Same as message sizer, but use cache.
- p := ptr.getPointer()
- if p.isNil() {
- return 0
- }
- siz := u.cachedsize(p)
- return siz + SizeVarint(uint64(siz)) + tagsize
- }
- }
- return func(ptr pointer, tagsize int) int {
- m := ptr.asPointerTo(t).Elem() // the map
- n := 0
- for _, k := range m.MapKeys() {
- ki := k.Interface()
- vi := m.MapIndex(k).Interface()
- kaddr := toAddrPointer(&ki, false, false) // pointer to key
- vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value
- siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
- n += siz + SizeVarint(uint64(siz)) + tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) {
- m := ptr.asPointerTo(t).Elem() // the map
- var err error
- keys := m.MapKeys()
- if len(keys) > 1 && deterministic {
- sort.Sort(mapKeys(keys))
- }
-
- var nerr nonFatal
- for _, k := range keys {
- ki := k.Interface()
- vi := m.MapIndex(k).Interface()
- kaddr := toAddrPointer(&ki, false, false) // pointer to key
- vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value
- b = appendVarint(b, tag)
- siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
- b = appendVarint(b, uint64(siz))
- b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic)
- if !nerr.Merge(err) {
- return b, err
- }
- b, err = valMarshaler(b, vaddr, valWireTag, deterministic)
- if err != ErrNil && !nerr.Merge(err) { // allow nil value in map
- return b, err
- }
- }
- return b, nerr.E
- }
-}
-
-// makeOneOfMarshaler returns the sizer and marshaler for a oneof field.
-// fi is the marshal info of the field.
-// f is the pointer to the reflect data structure of the field.
-func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) {
- // Oneof field is an interface. We need to get the actual data type on the fly.
- t := f.Type
- return func(ptr pointer, _ int) int {
- p := ptr.getInterfacePointer()
- if p.isNil() {
- return 0
- }
- v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct
- telem := v.Type()
- e := fi.oneofElems[telem]
- return e.sizer(p, e.tagsize)
- },
- func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) {
- p := ptr.getInterfacePointer()
- if p.isNil() {
- return b, nil
- }
- v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct
- telem := v.Type()
- if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() {
- return b, errOneofHasNil
- }
- e := fi.oneofElems[telem]
- return e.marshaler(b, p, e.wiretag, deterministic)
- }
-}
-
-// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field.
-func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int {
- m, mu := ext.extensionsRead()
- if m == nil {
- return 0
- }
- mu.Lock()
-
- n := 0
- for _, e := range m {
- if e.value == nil || e.desc == nil {
- // Extension is only in its encoded form.
- n += len(e.enc)
- continue
- }
-
- // We don't skip extensions that have an encoded form set,
- // because the extension value may have been mutated after
- // the last time this function was called.
- ei := u.getExtElemInfo(e.desc)
- v := e.value
- p := toAddrPointer(&v, ei.isptr, ei.deref)
- n += ei.sizer(p, ei.tagsize)
- }
- mu.Unlock()
- return n
-}
-
-// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b.
-func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) {
- m, mu := ext.extensionsRead()
- if m == nil {
- return b, nil
- }
- mu.Lock()
- defer mu.Unlock()
-
- var err error
- var nerr nonFatal
-
- // Fast-path for common cases: zero or one extensions.
- // Don't bother sorting the keys.
- if len(m) <= 1 {
- for _, e := range m {
- if e.value == nil || e.desc == nil {
- // Extension is only in its encoded form.
- b = append(b, e.enc...)
- continue
- }
-
- // We don't skip extensions that have an encoded form set,
- // because the extension value may have been mutated after
- // the last time this function was called.
-
- ei := u.getExtElemInfo(e.desc)
- v := e.value
- p := toAddrPointer(&v, ei.isptr, ei.deref)
- b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
- if !nerr.Merge(err) {
- return b, err
- }
- }
- return b, nerr.E
- }
-
- // Sort the keys to provide a deterministic encoding.
- // Not sure this is required, but the old code does it.
- keys := make([]int, 0, len(m))
- for k := range m {
- keys = append(keys, int(k))
- }
- sort.Ints(keys)
-
- for _, k := range keys {
- e := m[int32(k)]
- if e.value == nil || e.desc == nil {
- // Extension is only in its encoded form.
- b = append(b, e.enc...)
- continue
- }
-
- // We don't skip extensions that have an encoded form set,
- // because the extension value may have been mutated after
- // the last time this function was called.
-
- ei := u.getExtElemInfo(e.desc)
- v := e.value
- p := toAddrPointer(&v, ei.isptr, ei.deref)
- b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
- if !nerr.Merge(err) {
- return b, err
- }
- }
- return b, nerr.E
-}
-
-// message set format is:
-// message MessageSet {
-// repeated group Item = 1 {
-// required int32 type_id = 2;
-// required string message = 3;
-// };
-// }
-
-// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field
-// in message set format (above).
-func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int {
- m, mu := ext.extensionsRead()
- if m == nil {
- return 0
- }
- mu.Lock()
-
- n := 0
- for id, e := range m {
- n += 2 // start group, end group. tag = 1 (size=1)
- n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1)
-
- if e.value == nil || e.desc == nil {
- // Extension is only in its encoded form.
- msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
- siz := len(msgWithLen)
- n += siz + 1 // message, tag = 3 (size=1)
- continue
- }
-
- // We don't skip extensions that have an encoded form set,
- // because the extension value may have been mutated after
- // the last time this function was called.
-
- ei := u.getExtElemInfo(e.desc)
- v := e.value
- p := toAddrPointer(&v, ei.isptr, ei.deref)
- n += ei.sizer(p, 1) // message, tag = 3 (size=1)
- }
- mu.Unlock()
- return n
-}
-
-// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above)
-// to the end of byte slice b.
-func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) {
- m, mu := ext.extensionsRead()
- if m == nil {
- return b, nil
- }
- mu.Lock()
- defer mu.Unlock()
-
- var err error
- var nerr nonFatal
-
- // Fast-path for common cases: zero or one extensions.
- // Don't bother sorting the keys.
- if len(m) <= 1 {
- for id, e := range m {
- b = append(b, 1<<3|WireStartGroup)
- b = append(b, 2<<3|WireVarint)
- b = appendVarint(b, uint64(id))
-
- if e.value == nil || e.desc == nil {
- // Extension is only in its encoded form.
- msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
- b = append(b, 3<<3|WireBytes)
- b = append(b, msgWithLen...)
- b = append(b, 1<<3|WireEndGroup)
- continue
- }
-
- // We don't skip extensions that have an encoded form set,
- // because the extension value may have been mutated after
- // the last time this function was called.
-
- ei := u.getExtElemInfo(e.desc)
- v := e.value
- p := toAddrPointer(&v, ei.isptr, ei.deref)
- b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
- if !nerr.Merge(err) {
- return b, err
- }
- b = append(b, 1<<3|WireEndGroup)
- }
- return b, nerr.E
- }
-
- // Sort the keys to provide a deterministic encoding.
- keys := make([]int, 0, len(m))
- for k := range m {
- keys = append(keys, int(k))
- }
- sort.Ints(keys)
-
- for _, id := range keys {
- e := m[int32(id)]
- b = append(b, 1<<3|WireStartGroup)
- b = append(b, 2<<3|WireVarint)
- b = appendVarint(b, uint64(id))
-
- if e.value == nil || e.desc == nil {
- // Extension is only in its encoded form.
- msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
- b = append(b, 3<<3|WireBytes)
- b = append(b, msgWithLen...)
- b = append(b, 1<<3|WireEndGroup)
- continue
- }
-
- // We don't skip extensions that have an encoded form set,
- // because the extension value may have been mutated after
- // the last time this function was called.
-
- ei := u.getExtElemInfo(e.desc)
- v := e.value
- p := toAddrPointer(&v, ei.isptr, ei.deref)
- b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
- b = append(b, 1<<3|WireEndGroup)
- if !nerr.Merge(err) {
- return b, err
- }
- }
- return b, nerr.E
-}
-
-// sizeV1Extensions computes the size of encoded data for a V1-API extension field.
-func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int {
- if m == nil {
- return 0
- }
-
- n := 0
- for _, e := range m {
- if e.value == nil || e.desc == nil {
- // Extension is only in its encoded form.
- n += len(e.enc)
- continue
- }
-
- // We don't skip extensions that have an encoded form set,
- // because the extension value may have been mutated after
- // the last time this function was called.
-
- ei := u.getExtElemInfo(e.desc)
- v := e.value
- p := toAddrPointer(&v, ei.isptr, ei.deref)
- n += ei.sizer(p, ei.tagsize)
- }
- return n
-}
-
-// appendV1Extensions marshals a V1-API extension field to the end of byte slice b.
-func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) {
- if m == nil {
- return b, nil
- }
-
- // Sort the keys to provide a deterministic encoding.
- keys := make([]int, 0, len(m))
- for k := range m {
- keys = append(keys, int(k))
- }
- sort.Ints(keys)
-
- var err error
- var nerr nonFatal
- for _, k := range keys {
- e := m[int32(k)]
- if e.value == nil || e.desc == nil {
- // Extension is only in its encoded form.
- b = append(b, e.enc...)
- continue
- }
-
- // We don't skip extensions that have an encoded form set,
- // because the extension value may have been mutated after
- // the last time this function was called.
-
- ei := u.getExtElemInfo(e.desc)
- v := e.value
- p := toAddrPointer(&v, ei.isptr, ei.deref)
- b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
- if !nerr.Merge(err) {
- return b, err
- }
- }
- return b, nerr.E
-}
-
-// newMarshaler is the interface representing objects that can marshal themselves.
-//
-// This exists to support protoc-gen-go generated messages.
-// The proto package will stop type-asserting to this interface in the future.
-//
-// DO NOT DEPEND ON THIS.
-type newMarshaler interface {
- XXX_Size() int
- XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
-}
-
-// Size returns the encoded size of a protocol buffer message.
-// This is the main entry point.
-func Size(pb Message) int {
- if m, ok := pb.(newMarshaler); ok {
- return m.XXX_Size()
- }
- if m, ok := pb.(Marshaler); ok {
- // If the message can marshal itself, let it do it, for compatibility.
- // NOTE: This is not efficient.
- b, _ := m.Marshal()
- return len(b)
- }
- // in case somehow we didn't generate the wrapper
- if pb == nil {
- return 0
- }
- var info InternalMessageInfo
- return info.Size(pb)
-}
-
-// Marshal takes a protocol buffer message
-// and encodes it into the wire format, returning the data.
-// This is the main entry point.
-func Marshal(pb Message) ([]byte, error) {
- if m, ok := pb.(newMarshaler); ok {
- siz := m.XXX_Size()
- b := make([]byte, 0, siz)
- return m.XXX_Marshal(b, false)
- }
- if m, ok := pb.(Marshaler); ok {
- // If the message can marshal itself, let it do it, for compatibility.
- // NOTE: This is not efficient.
- return m.Marshal()
- }
- // in case somehow we didn't generate the wrapper
- if pb == nil {
- return nil, ErrNil
- }
- var info InternalMessageInfo
- siz := info.Size(pb)
- b := make([]byte, 0, siz)
- return info.Marshal(b, pb, false)
-}
-
-// Marshal takes a protocol buffer message
-// and encodes it into the wire format, writing the result to the
-// Buffer.
-// This is an alternative entry point. It is not necessary to use
-// a Buffer for most applications.
-func (p *Buffer) Marshal(pb Message) error {
- var err error
- if m, ok := pb.(newMarshaler); ok {
- siz := m.XXX_Size()
- p.grow(siz) // make sure buf has enough capacity
- p.buf, err = m.XXX_Marshal(p.buf, p.deterministic)
- return err
- }
- if m, ok := pb.(Marshaler); ok {
- // If the message can marshal itself, let it do it, for compatibility.
- // NOTE: This is not efficient.
- b, err := m.Marshal()
- p.buf = append(p.buf, b...)
- return err
- }
- // in case somehow we didn't generate the wrapper
- if pb == nil {
- return ErrNil
- }
- var info InternalMessageInfo
- siz := info.Size(pb)
- p.grow(siz) // make sure buf has enough capacity
- p.buf, err = info.Marshal(p.buf, pb, p.deterministic)
- return err
-}
-
-// grow grows the buffer's capacity, if necessary, to guarantee space for
-// another n bytes. After grow(n), at least n bytes can be written to the
-// buffer without another allocation.
-func (p *Buffer) grow(n int) {
- need := len(p.buf) + n
- if need <= cap(p.buf) {
- return
- }
- newCap := len(p.buf) * 2
- if newCap < need {
- newCap = need
- }
- p.buf = append(make([]byte, 0, newCap), p.buf...)
-}
diff --git a/vendor/github.com/golang/protobuf/proto/table_merge.go b/vendor/github.com/golang/protobuf/proto/table_merge.go
deleted file mode 100644
index 5525def6a..000000000
--- a/vendor/github.com/golang/protobuf/proto/table_merge.go
+++ /dev/null
@@ -1,654 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2016 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-import (
- "fmt"
- "reflect"
- "strings"
- "sync"
- "sync/atomic"
-)
-
-// Merge merges the src message into dst.
-// This assumes that dst and src of the same type and are non-nil.
-func (a *InternalMessageInfo) Merge(dst, src Message) {
- mi := atomicLoadMergeInfo(&a.merge)
- if mi == nil {
- mi = getMergeInfo(reflect.TypeOf(dst).Elem())
- atomicStoreMergeInfo(&a.merge, mi)
- }
- mi.merge(toPointer(&dst), toPointer(&src))
-}
-
-type mergeInfo struct {
- typ reflect.Type
-
- initialized int32 // 0: only typ is valid, 1: everything is valid
- lock sync.Mutex
-
- fields []mergeFieldInfo
- unrecognized field // Offset of XXX_unrecognized
-}
-
-type mergeFieldInfo struct {
- field field // Offset of field, guaranteed to be valid
-
- // isPointer reports whether the value in the field is a pointer.
- // This is true for the following situations:
- // * Pointer to struct
- // * Pointer to basic type (proto2 only)
- // * Slice (first value in slice header is a pointer)
- // * String (first value in string header is a pointer)
- isPointer bool
-
- // basicWidth reports the width of the field assuming that it is directly
- // embedded in the struct (as is the case for basic types in proto3).
- // The possible values are:
- // 0: invalid
- // 1: bool
- // 4: int32, uint32, float32
- // 8: int64, uint64, float64
- basicWidth int
-
- // Where dst and src are pointers to the types being merged.
- merge func(dst, src pointer)
-}
-
-var (
- mergeInfoMap = map[reflect.Type]*mergeInfo{}
- mergeInfoLock sync.Mutex
-)
-
-func getMergeInfo(t reflect.Type) *mergeInfo {
- mergeInfoLock.Lock()
- defer mergeInfoLock.Unlock()
- mi := mergeInfoMap[t]
- if mi == nil {
- mi = &mergeInfo{typ: t}
- mergeInfoMap[t] = mi
- }
- return mi
-}
-
-// merge merges src into dst assuming they are both of type *mi.typ.
-func (mi *mergeInfo) merge(dst, src pointer) {
- if dst.isNil() {
- panic("proto: nil destination")
- }
- if src.isNil() {
- return // Nothing to do.
- }
-
- if atomic.LoadInt32(&mi.initialized) == 0 {
- mi.computeMergeInfo()
- }
-
- for _, fi := range mi.fields {
- sfp := src.offset(fi.field)
-
- // As an optimization, we can avoid the merge function call cost
- // if we know for sure that the source will have no effect
- // by checking if it is the zero value.
- if unsafeAllowed {
- if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string
- continue
- }
- if fi.basicWidth > 0 {
- switch {
- case fi.basicWidth == 1 && !*sfp.toBool():
- continue
- case fi.basicWidth == 4 && *sfp.toUint32() == 0:
- continue
- case fi.basicWidth == 8 && *sfp.toUint64() == 0:
- continue
- }
- }
- }
-
- dfp := dst.offset(fi.field)
- fi.merge(dfp, sfp)
- }
-
- // TODO: Make this faster?
- out := dst.asPointerTo(mi.typ).Elem()
- in := src.asPointerTo(mi.typ).Elem()
- if emIn, err := extendable(in.Addr().Interface()); err == nil {
- emOut, _ := extendable(out.Addr().Interface())
- mIn, muIn := emIn.extensionsRead()
- if mIn != nil {
- mOut := emOut.extensionsWrite()
- muIn.Lock()
- mergeExtension(mOut, mIn)
- muIn.Unlock()
- }
- }
-
- if mi.unrecognized.IsValid() {
- if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 {
- *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...)
- }
- }
-}
-
-func (mi *mergeInfo) computeMergeInfo() {
- mi.lock.Lock()
- defer mi.lock.Unlock()
- if mi.initialized != 0 {
- return
- }
- t := mi.typ
- n := t.NumField()
-
- props := GetProperties(t)
- for i := 0; i < n; i++ {
- f := t.Field(i)
- if strings.HasPrefix(f.Name, "XXX_") {
- continue
- }
-
- mfi := mergeFieldInfo{field: toField(&f)}
- tf := f.Type
-
- // As an optimization, we can avoid the merge function call cost
- // if we know for sure that the source will have no effect
- // by checking if it is the zero value.
- if unsafeAllowed {
- switch tf.Kind() {
- case reflect.Ptr, reflect.Slice, reflect.String:
- // As a special case, we assume slices and strings are pointers
- // since we know that the first field in the SliceSlice or
- // StringHeader is a data pointer.
- mfi.isPointer = true
- case reflect.Bool:
- mfi.basicWidth = 1
- case reflect.Int32, reflect.Uint32, reflect.Float32:
- mfi.basicWidth = 4
- case reflect.Int64, reflect.Uint64, reflect.Float64:
- mfi.basicWidth = 8
- }
- }
-
- // Unwrap tf to get at its most basic type.
- var isPointer, isSlice bool
- if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
- isSlice = true
- tf = tf.Elem()
- }
- if tf.Kind() == reflect.Ptr {
- isPointer = true
- tf = tf.Elem()
- }
- if isPointer && isSlice && tf.Kind() != reflect.Struct {
- panic("both pointer and slice for basic type in " + tf.Name())
- }
-
- switch tf.Kind() {
- case reflect.Int32:
- switch {
- case isSlice: // E.g., []int32
- mfi.merge = func(dst, src pointer) {
- // NOTE: toInt32Slice is not defined (see pointer_reflect.go).
- /*
- sfsp := src.toInt32Slice()
- if *sfsp != nil {
- dfsp := dst.toInt32Slice()
- *dfsp = append(*dfsp, *sfsp...)
- if *dfsp == nil {
- *dfsp = []int64{}
- }
- }
- */
- sfs := src.getInt32Slice()
- if sfs != nil {
- dfs := dst.getInt32Slice()
- dfs = append(dfs, sfs...)
- if dfs == nil {
- dfs = []int32{}
- }
- dst.setInt32Slice(dfs)
- }
- }
- case isPointer: // E.g., *int32
- mfi.merge = func(dst, src pointer) {
- // NOTE: toInt32Ptr is not defined (see pointer_reflect.go).
- /*
- sfpp := src.toInt32Ptr()
- if *sfpp != nil {
- dfpp := dst.toInt32Ptr()
- if *dfpp == nil {
- *dfpp = Int32(**sfpp)
- } else {
- **dfpp = **sfpp
- }
- }
- */
- sfp := src.getInt32Ptr()
- if sfp != nil {
- dfp := dst.getInt32Ptr()
- if dfp == nil {
- dst.setInt32Ptr(*sfp)
- } else {
- *dfp = *sfp
- }
- }
- }
- default: // E.g., int32
- mfi.merge = func(dst, src pointer) {
- if v := *src.toInt32(); v != 0 {
- *dst.toInt32() = v
- }
- }
- }
- case reflect.Int64:
- switch {
- case isSlice: // E.g., []int64
- mfi.merge = func(dst, src pointer) {
- sfsp := src.toInt64Slice()
- if *sfsp != nil {
- dfsp := dst.toInt64Slice()
- *dfsp = append(*dfsp, *sfsp...)
- if *dfsp == nil {
- *dfsp = []int64{}
- }
- }
- }
- case isPointer: // E.g., *int64
- mfi.merge = func(dst, src pointer) {
- sfpp := src.toInt64Ptr()
- if *sfpp != nil {
- dfpp := dst.toInt64Ptr()
- if *dfpp == nil {
- *dfpp = Int64(**sfpp)
- } else {
- **dfpp = **sfpp
- }
- }
- }
- default: // E.g., int64
- mfi.merge = func(dst, src pointer) {
- if v := *src.toInt64(); v != 0 {
- *dst.toInt64() = v
- }
- }
- }
- case reflect.Uint32:
- switch {
- case isSlice: // E.g., []uint32
- mfi.merge = func(dst, src pointer) {
- sfsp := src.toUint32Slice()
- if *sfsp != nil {
- dfsp := dst.toUint32Slice()
- *dfsp = append(*dfsp, *sfsp...)
- if *dfsp == nil {
- *dfsp = []uint32{}
- }
- }
- }
- case isPointer: // E.g., *uint32
- mfi.merge = func(dst, src pointer) {
- sfpp := src.toUint32Ptr()
- if *sfpp != nil {
- dfpp := dst.toUint32Ptr()
- if *dfpp == nil {
- *dfpp = Uint32(**sfpp)
- } else {
- **dfpp = **sfpp
- }
- }
- }
- default: // E.g., uint32
- mfi.merge = func(dst, src pointer) {
- if v := *src.toUint32(); v != 0 {
- *dst.toUint32() = v
- }
- }
- }
- case reflect.Uint64:
- switch {
- case isSlice: // E.g., []uint64
- mfi.merge = func(dst, src pointer) {
- sfsp := src.toUint64Slice()
- if *sfsp != nil {
- dfsp := dst.toUint64Slice()
- *dfsp = append(*dfsp, *sfsp...)
- if *dfsp == nil {
- *dfsp = []uint64{}
- }
- }
- }
- case isPointer: // E.g., *uint64
- mfi.merge = func(dst, src pointer) {
- sfpp := src.toUint64Ptr()
- if *sfpp != nil {
- dfpp := dst.toUint64Ptr()
- if *dfpp == nil {
- *dfpp = Uint64(**sfpp)
- } else {
- **dfpp = **sfpp
- }
- }
- }
- default: // E.g., uint64
- mfi.merge = func(dst, src pointer) {
- if v := *src.toUint64(); v != 0 {
- *dst.toUint64() = v
- }
- }
- }
- case reflect.Float32:
- switch {
- case isSlice: // E.g., []float32
- mfi.merge = func(dst, src pointer) {
- sfsp := src.toFloat32Slice()
- if *sfsp != nil {
- dfsp := dst.toFloat32Slice()
- *dfsp = append(*dfsp, *sfsp...)
- if *dfsp == nil {
- *dfsp = []float32{}
- }
- }
- }
- case isPointer: // E.g., *float32
- mfi.merge = func(dst, src pointer) {
- sfpp := src.toFloat32Ptr()
- if *sfpp != nil {
- dfpp := dst.toFloat32Ptr()
- if *dfpp == nil {
- *dfpp = Float32(**sfpp)
- } else {
- **dfpp = **sfpp
- }
- }
- }
- default: // E.g., float32
- mfi.merge = func(dst, src pointer) {
- if v := *src.toFloat32(); v != 0 {
- *dst.toFloat32() = v
- }
- }
- }
- case reflect.Float64:
- switch {
- case isSlice: // E.g., []float64
- mfi.merge = func(dst, src pointer) {
- sfsp := src.toFloat64Slice()
- if *sfsp != nil {
- dfsp := dst.toFloat64Slice()
- *dfsp = append(*dfsp, *sfsp...)
- if *dfsp == nil {
- *dfsp = []float64{}
- }
- }
- }
- case isPointer: // E.g., *float64
- mfi.merge = func(dst, src pointer) {
- sfpp := src.toFloat64Ptr()
- if *sfpp != nil {
- dfpp := dst.toFloat64Ptr()
- if *dfpp == nil {
- *dfpp = Float64(**sfpp)
- } else {
- **dfpp = **sfpp
- }
- }
- }
- default: // E.g., float64
- mfi.merge = func(dst, src pointer) {
- if v := *src.toFloat64(); v != 0 {
- *dst.toFloat64() = v
- }
- }
- }
- case reflect.Bool:
- switch {
- case isSlice: // E.g., []bool
- mfi.merge = func(dst, src pointer) {
- sfsp := src.toBoolSlice()
- if *sfsp != nil {
- dfsp := dst.toBoolSlice()
- *dfsp = append(*dfsp, *sfsp...)
- if *dfsp == nil {
- *dfsp = []bool{}
- }
- }
- }
- case isPointer: // E.g., *bool
- mfi.merge = func(dst, src pointer) {
- sfpp := src.toBoolPtr()
- if *sfpp != nil {
- dfpp := dst.toBoolPtr()
- if *dfpp == nil {
- *dfpp = Bool(**sfpp)
- } else {
- **dfpp = **sfpp
- }
- }
- }
- default: // E.g., bool
- mfi.merge = func(dst, src pointer) {
- if v := *src.toBool(); v {
- *dst.toBool() = v
- }
- }
- }
- case reflect.String:
- switch {
- case isSlice: // E.g., []string
- mfi.merge = func(dst, src pointer) {
- sfsp := src.toStringSlice()
- if *sfsp != nil {
- dfsp := dst.toStringSlice()
- *dfsp = append(*dfsp, *sfsp...)
- if *dfsp == nil {
- *dfsp = []string{}
- }
- }
- }
- case isPointer: // E.g., *string
- mfi.merge = func(dst, src pointer) {
- sfpp := src.toStringPtr()
- if *sfpp != nil {
- dfpp := dst.toStringPtr()
- if *dfpp == nil {
- *dfpp = String(**sfpp)
- } else {
- **dfpp = **sfpp
- }
- }
- }
- default: // E.g., string
- mfi.merge = func(dst, src pointer) {
- if v := *src.toString(); v != "" {
- *dst.toString() = v
- }
- }
- }
- case reflect.Slice:
- isProto3 := props.Prop[i].proto3
- switch {
- case isPointer:
- panic("bad pointer in byte slice case in " + tf.Name())
- case tf.Elem().Kind() != reflect.Uint8:
- panic("bad element kind in byte slice case in " + tf.Name())
- case isSlice: // E.g., [][]byte
- mfi.merge = func(dst, src pointer) {
- sbsp := src.toBytesSlice()
- if *sbsp != nil {
- dbsp := dst.toBytesSlice()
- for _, sb := range *sbsp {
- if sb == nil {
- *dbsp = append(*dbsp, nil)
- } else {
- *dbsp = append(*dbsp, append([]byte{}, sb...))
- }
- }
- if *dbsp == nil {
- *dbsp = [][]byte{}
- }
- }
- }
- default: // E.g., []byte
- mfi.merge = func(dst, src pointer) {
- sbp := src.toBytes()
- if *sbp != nil {
- dbp := dst.toBytes()
- if !isProto3 || len(*sbp) > 0 {
- *dbp = append([]byte{}, *sbp...)
- }
- }
- }
- }
- case reflect.Struct:
- switch {
- case !isPointer:
- panic(fmt.Sprintf("message field %s without pointer", tf))
- case isSlice: // E.g., []*pb.T
- mi := getMergeInfo(tf)
- mfi.merge = func(dst, src pointer) {
- sps := src.getPointerSlice()
- if sps != nil {
- dps := dst.getPointerSlice()
- for _, sp := range sps {
- var dp pointer
- if !sp.isNil() {
- dp = valToPointer(reflect.New(tf))
- mi.merge(dp, sp)
- }
- dps = append(dps, dp)
- }
- if dps == nil {
- dps = []pointer{}
- }
- dst.setPointerSlice(dps)
- }
- }
- default: // E.g., *pb.T
- mi := getMergeInfo(tf)
- mfi.merge = func(dst, src pointer) {
- sp := src.getPointer()
- if !sp.isNil() {
- dp := dst.getPointer()
- if dp.isNil() {
- dp = valToPointer(reflect.New(tf))
- dst.setPointer(dp)
- }
- mi.merge(dp, sp)
- }
- }
- }
- case reflect.Map:
- switch {
- case isPointer || isSlice:
- panic("bad pointer or slice in map case in " + tf.Name())
- default: // E.g., map[K]V
- mfi.merge = func(dst, src pointer) {
- sm := src.asPointerTo(tf).Elem()
- if sm.Len() == 0 {
- return
- }
- dm := dst.asPointerTo(tf).Elem()
- if dm.IsNil() {
- dm.Set(reflect.MakeMap(tf))
- }
-
- switch tf.Elem().Kind() {
- case reflect.Ptr: // Proto struct (e.g., *T)
- for _, key := range sm.MapKeys() {
- val := sm.MapIndex(key)
- val = reflect.ValueOf(Clone(val.Interface().(Message)))
- dm.SetMapIndex(key, val)
- }
- case reflect.Slice: // E.g. Bytes type (e.g., []byte)
- for _, key := range sm.MapKeys() {
- val := sm.MapIndex(key)
- val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
- dm.SetMapIndex(key, val)
- }
- default: // Basic type (e.g., string)
- for _, key := range sm.MapKeys() {
- val := sm.MapIndex(key)
- dm.SetMapIndex(key, val)
- }
- }
- }
- }
- case reflect.Interface:
- // Must be oneof field.
- switch {
- case isPointer || isSlice:
- panic("bad pointer or slice in interface case in " + tf.Name())
- default: // E.g., interface{}
- // TODO: Make this faster?
- mfi.merge = func(dst, src pointer) {
- su := src.asPointerTo(tf).Elem()
- if !su.IsNil() {
- du := dst.asPointerTo(tf).Elem()
- typ := su.Elem().Type()
- if du.IsNil() || du.Elem().Type() != typ {
- du.Set(reflect.New(typ.Elem())) // Initialize interface if empty
- }
- sv := su.Elem().Elem().Field(0)
- if sv.Kind() == reflect.Ptr && sv.IsNil() {
- return
- }
- dv := du.Elem().Elem().Field(0)
- if dv.Kind() == reflect.Ptr && dv.IsNil() {
- dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty
- }
- switch sv.Type().Kind() {
- case reflect.Ptr: // Proto struct (e.g., *T)
- Merge(dv.Interface().(Message), sv.Interface().(Message))
- case reflect.Slice: // E.g. Bytes type (e.g., []byte)
- dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...)))
- default: // Basic type (e.g., string)
- dv.Set(sv)
- }
- }
- }
- }
- default:
- panic(fmt.Sprintf("merger not found for type:%s", tf))
- }
- mi.fields = append(mi.fields, mfi)
- }
-
- mi.unrecognized = invalidField
- if f, ok := t.FieldByName("XXX_unrecognized"); ok {
- if f.Type != reflect.TypeOf([]byte{}) {
- panic("expected XXX_unrecognized to be of type []byte")
- }
- mi.unrecognized = toField(&f)
- }
-
- atomic.StoreInt32(&mi.initialized, 1)
-}
diff --git a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go
deleted file mode 100644
index acee2fc52..000000000
--- a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go
+++ /dev/null
@@ -1,2053 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2016 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-import (
- "errors"
- "fmt"
- "io"
- "math"
- "reflect"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
- "unicode/utf8"
-)
-
-// Unmarshal is the entry point from the generated .pb.go files.
-// This function is not intended to be used by non-generated code.
-// This function is not subject to any compatibility guarantee.
-// msg contains a pointer to a protocol buffer struct.
-// b is the data to be unmarshaled into the protocol buffer.
-// a is a pointer to a place to store cached unmarshal information.
-func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error {
- // Load the unmarshal information for this message type.
- // The atomic load ensures memory consistency.
- u := atomicLoadUnmarshalInfo(&a.unmarshal)
- if u == nil {
- // Slow path: find unmarshal info for msg, update a with it.
- u = getUnmarshalInfo(reflect.TypeOf(msg).Elem())
- atomicStoreUnmarshalInfo(&a.unmarshal, u)
- }
- // Then do the unmarshaling.
- err := u.unmarshal(toPointer(&msg), b)
- return err
-}
-
-type unmarshalInfo struct {
- typ reflect.Type // type of the protobuf struct
-
- // 0 = only typ field is initialized
- // 1 = completely initialized
- initialized int32
- lock sync.Mutex // prevents double initialization
- dense []unmarshalFieldInfo // fields indexed by tag #
- sparse map[uint64]unmarshalFieldInfo // fields indexed by tag #
- reqFields []string // names of required fields
- reqMask uint64 // 1<<len(reqFields)-1
- unrecognized field // offset of []byte to put unrecognized data (or invalidField if we should throw it away)
- extensions field // offset of extensions field (of type proto.XXX_InternalExtensions), or invalidField if it does not exist
- oldExtensions field // offset of old-form extensions field (of type map[int]Extension)
- extensionRanges []ExtensionRange // if non-nil, implies extensions field is valid
- isMessageSet bool // if true, implies extensions field is valid
-}
-
-// An unmarshaler takes a stream of bytes and a pointer to a field of a message.
-// It decodes the field, stores it at f, and returns the unused bytes.
-// w is the wire encoding.
-// b is the data after the tag and wire encoding have been read.
-type unmarshaler func(b []byte, f pointer, w int) ([]byte, error)
-
-type unmarshalFieldInfo struct {
- // location of the field in the proto message structure.
- field field
-
- // function to unmarshal the data for the field.
- unmarshal unmarshaler
-
- // if a required field, contains a single set bit at this field's index in the required field list.
- reqMask uint64
-
- name string // name of the field, for error reporting
-}
-
-var (
- unmarshalInfoMap = map[reflect.Type]*unmarshalInfo{}
- unmarshalInfoLock sync.Mutex
-)
-
-// getUnmarshalInfo returns the data structure which can be
-// subsequently used to unmarshal a message of the given type.
-// t is the type of the message (note: not pointer to message).
-func getUnmarshalInfo(t reflect.Type) *unmarshalInfo {
- // It would be correct to return a new unmarshalInfo
- // unconditionally. We would end up allocating one
- // per occurrence of that type as a message or submessage.
- // We use a cache here just to reduce memory usage.
- unmarshalInfoLock.Lock()
- defer unmarshalInfoLock.Unlock()
- u := unmarshalInfoMap[t]
- if u == nil {
- u = &unmarshalInfo{typ: t}
- // Note: we just set the type here. The rest of the fields
- // will be initialized on first use.
- unmarshalInfoMap[t] = u
- }
- return u
-}
-
-// unmarshal does the main work of unmarshaling a message.
-// u provides type information used to unmarshal the message.
-// m is a pointer to a protocol buffer message.
-// b is a byte stream to unmarshal into m.
-// This is top routine used when recursively unmarshaling submessages.
-func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error {
- if atomic.LoadInt32(&u.initialized) == 0 {
- u.computeUnmarshalInfo()
- }
- if u.isMessageSet {
- return unmarshalMessageSet(b, m.offset(u.extensions).toExtensions())
- }
- var reqMask uint64 // bitmask of required fields we've seen.
- var errLater error
- for len(b) > 0 {
- // Read tag and wire type.
- // Special case 1 and 2 byte varints.
- var x uint64
- if b[0] < 128 {
- x = uint64(b[0])
- b = b[1:]
- } else if len(b) >= 2 && b[1] < 128 {
- x = uint64(b[0]&0x7f) + uint64(b[1])<<7
- b = b[2:]
- } else {
- var n int
- x, n = decodeVarint(b)
- if n == 0 {
- return io.ErrUnexpectedEOF
- }
- b = b[n:]
- }
- tag := x >> 3
- wire := int(x) & 7
-
- // Dispatch on the tag to one of the unmarshal* functions below.
- var f unmarshalFieldInfo
- if tag < uint64(len(u.dense)) {
- f = u.dense[tag]
- } else {
- f = u.sparse[tag]
- }
- if fn := f.unmarshal; fn != nil {
- var err error
- b, err = fn(b, m.offset(f.field), wire)
- if err == nil {
- reqMask |= f.reqMask
- continue
- }
- if r, ok := err.(*RequiredNotSetError); ok {
- // Remember this error, but keep parsing. We need to produce
- // a full parse even if a required field is missing.
- if errLater == nil {
- errLater = r
- }
- reqMask |= f.reqMask
- continue
- }
- if err != errInternalBadWireType {
- if err == errInvalidUTF8 {
- if errLater == nil {
- fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
- errLater = &invalidUTF8Error{fullName}
- }
- continue
- }
- return err
- }
- // Fragments with bad wire type are treated as unknown fields.
- }
-
- // Unknown tag.
- if !u.unrecognized.IsValid() {
- // Don't keep unrecognized data; just skip it.
- var err error
- b, err = skipField(b, wire)
- if err != nil {
- return err
- }
- continue
- }
- // Keep unrecognized data around.
- // maybe in extensions, maybe in the unrecognized field.
- z := m.offset(u.unrecognized).toBytes()
- var emap map[int32]Extension
- var e Extension
- for _, r := range u.extensionRanges {
- if uint64(r.Start) <= tag && tag <= uint64(r.End) {
- if u.extensions.IsValid() {
- mp := m.offset(u.extensions).toExtensions()
- emap = mp.extensionsWrite()
- e = emap[int32(tag)]
- z = &e.enc
- break
- }
- if u.oldExtensions.IsValid() {
- p := m.offset(u.oldExtensions).toOldExtensions()
- emap = *p
- if emap == nil {
- emap = map[int32]Extension{}
- *p = emap
- }
- e = emap[int32(tag)]
- z = &e.enc
- break
- }
- panic("no extensions field available")
- }
- }
-
- // Use wire type to skip data.
- var err error
- b0 := b
- b, err = skipField(b, wire)
- if err != nil {
- return err
- }
- *z = encodeVarint(*z, tag<<3|uint64(wire))
- *z = append(*z, b0[:len(b0)-len(b)]...)
-
- if emap != nil {
- emap[int32(tag)] = e
- }
- }
- if reqMask != u.reqMask && errLater == nil {
- // A required field of this message is missing.
- for _, n := range u.reqFields {
- if reqMask&1 == 0 {
- errLater = &RequiredNotSetError{n}
- }
- reqMask >>= 1
- }
- }
- return errLater
-}
-
-// computeUnmarshalInfo fills in u with information for use
-// in unmarshaling protocol buffers of type u.typ.
-func (u *unmarshalInfo) computeUnmarshalInfo() {
- u.lock.Lock()
- defer u.lock.Unlock()
- if u.initialized != 0 {
- return
- }
- t := u.typ
- n := t.NumField()
-
- // Set up the "not found" value for the unrecognized byte buffer.
- // This is the default for proto3.
- u.unrecognized = invalidField
- u.extensions = invalidField
- u.oldExtensions = invalidField
-
- // List of the generated type and offset for each oneof field.
- type oneofField struct {
- ityp reflect.Type // interface type of oneof field
- field field // offset in containing message
- }
- var oneofFields []oneofField
-
- for i := 0; i < n; i++ {
- f := t.Field(i)
- if f.Name == "XXX_unrecognized" {
- // The byte slice used to hold unrecognized input is special.
- if f.Type != reflect.TypeOf(([]byte)(nil)) {
- panic("bad type for XXX_unrecognized field: " + f.Type.Name())
- }
- u.unrecognized = toField(&f)
- continue
- }
- if f.Name == "XXX_InternalExtensions" {
- // Ditto here.
- if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) {
- panic("bad type for XXX_InternalExtensions field: " + f.Type.Name())
- }
- u.extensions = toField(&f)
- if f.Tag.Get("protobuf_messageset") == "1" {
- u.isMessageSet = true
- }
- continue
- }
- if f.Name == "XXX_extensions" {
- // An older form of the extensions field.
- if f.Type != reflect.TypeOf((map[int32]Extension)(nil)) {
- panic("bad type for XXX_extensions field: " + f.Type.Name())
- }
- u.oldExtensions = toField(&f)
- continue
- }
- if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" {
- continue
- }
-
- oneof := f.Tag.Get("protobuf_oneof")
- if oneof != "" {
- oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)})
- // The rest of oneof processing happens below.
- continue
- }
-
- tags := f.Tag.Get("protobuf")
- tagArray := strings.Split(tags, ",")
- if len(tagArray) < 2 {
- panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags)
- }
- tag, err := strconv.Atoi(tagArray[1])
- if err != nil {
- panic("protobuf tag field not an integer: " + tagArray[1])
- }
-
- name := ""
- for _, tag := range tagArray[3:] {
- if strings.HasPrefix(tag, "name=") {
- name = tag[5:]
- }
- }
-
- // Extract unmarshaling function from the field (its type and tags).
- unmarshal := fieldUnmarshaler(&f)
-
- // Required field?
- var reqMask uint64
- if tagArray[2] == "req" {
- bit := len(u.reqFields)
- u.reqFields = append(u.reqFields, name)
- reqMask = uint64(1) << uint(bit)
- // TODO: if we have more than 64 required fields, we end up
- // not verifying that all required fields are present.
- // Fix this, perhaps using a count of required fields?
- }
-
- // Store the info in the correct slot in the message.
- u.setTag(tag, toField(&f), unmarshal, reqMask, name)
- }
-
- // Find any types associated with oneof fields.
- var oneofImplementers []interface{}
- switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
- case oneofFuncsIface:
- _, _, _, oneofImplementers = m.XXX_OneofFuncs()
- case oneofWrappersIface:
- oneofImplementers = m.XXX_OneofWrappers()
- }
- for _, v := range oneofImplementers {
- tptr := reflect.TypeOf(v) // *Msg_X
- typ := tptr.Elem() // Msg_X
-
- f := typ.Field(0) // oneof implementers have one field
- baseUnmarshal := fieldUnmarshaler(&f)
- tags := strings.Split(f.Tag.Get("protobuf"), ",")
- fieldNum, err := strconv.Atoi(tags[1])
- if err != nil {
- panic("protobuf tag field not an integer: " + tags[1])
- }
- var name string
- for _, tag := range tags {
- if strings.HasPrefix(tag, "name=") {
- name = strings.TrimPrefix(tag, "name=")
- break
- }
- }
-
- // Find the oneof field that this struct implements.
- // Might take O(n^2) to process all of the oneofs, but who cares.
- for _, of := range oneofFields {
- if tptr.Implements(of.ityp) {
- // We have found the corresponding interface for this struct.
- // That lets us know where this struct should be stored
- // when we encounter it during unmarshaling.
- unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal)
- u.setTag(fieldNum, of.field, unmarshal, 0, name)
- }
- }
-
- }
-
- // Get extension ranges, if any.
- fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray")
- if fn.IsValid() {
- if !u.extensions.IsValid() && !u.oldExtensions.IsValid() {
- panic("a message with extensions, but no extensions field in " + t.Name())
- }
- u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange)
- }
-
- // Explicitly disallow tag 0. This will ensure we flag an error
- // when decoding a buffer of all zeros. Without this code, we
- // would decode and skip an all-zero buffer of even length.
- // [0 0] is [tag=0/wiretype=varint varint-encoded-0].
- u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) {
- return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w)
- }, 0, "")
-
- // Set mask for required field check.
- u.reqMask = uint64(1)<<uint(len(u.reqFields)) - 1
-
- atomic.StoreInt32(&u.initialized, 1)
-}
-
-// setTag stores the unmarshal information for the given tag.
-// tag = tag # for field
-// field/unmarshal = unmarshal info for that field.
-// reqMask = if required, bitmask for field position in required field list. 0 otherwise.
-// name = short name of the field.
-func (u *unmarshalInfo) setTag(tag int, field field, unmarshal unmarshaler, reqMask uint64, name string) {
- i := unmarshalFieldInfo{field: field, unmarshal: unmarshal, reqMask: reqMask, name: name}
- n := u.typ.NumField()
- if tag >= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here?
- for len(u.dense) <= tag {
- u.dense = append(u.dense, unmarshalFieldInfo{})
- }
- u.dense[tag] = i
- return
- }
- if u.sparse == nil {
- u.sparse = map[uint64]unmarshalFieldInfo{}
- }
- u.sparse[uint64(tag)] = i
-}
-
-// fieldUnmarshaler returns an unmarshaler for the given field.
-func fieldUnmarshaler(f *reflect.StructField) unmarshaler {
- if f.Type.Kind() == reflect.Map {
- return makeUnmarshalMap(f)
- }
- return typeUnmarshaler(f.Type, f.Tag.Get("protobuf"))
-}
-
-// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair.
-func typeUnmarshaler(t reflect.Type, tags string) unmarshaler {
- tagArray := strings.Split(tags, ",")
- encoding := tagArray[0]
- name := "unknown"
- proto3 := false
- validateUTF8 := true
- for _, tag := range tagArray[3:] {
- if strings.HasPrefix(tag, "name=") {
- name = tag[5:]
- }
- if tag == "proto3" {
- proto3 = true
- }
- }
- validateUTF8 = validateUTF8 && proto3
-
- // Figure out packaging (pointer, slice, or both)
- slice := false
- pointer := false
- if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
- slice = true
- t = t.Elem()
- }
- if t.Kind() == reflect.Ptr {
- pointer = true
- t = t.Elem()
- }
-
- // We'll never have both pointer and slice for basic types.
- if pointer && slice && t.Kind() != reflect.Struct {
- panic("both pointer and slice for basic type in " + t.Name())
- }
-
- switch t.Kind() {
- case reflect.Bool:
- if pointer {
- return unmarshalBoolPtr
- }
- if slice {
- return unmarshalBoolSlice
- }
- return unmarshalBoolValue
- case reflect.Int32:
- switch encoding {
- case "fixed32":
- if pointer {
- return unmarshalFixedS32Ptr
- }
- if slice {
- return unmarshalFixedS32Slice
- }
- return unmarshalFixedS32Value
- case "varint":
- // this could be int32 or enum
- if pointer {
- return unmarshalInt32Ptr
- }
- if slice {
- return unmarshalInt32Slice
- }
- return unmarshalInt32Value
- case "zigzag32":
- if pointer {
- return unmarshalSint32Ptr
- }
- if slice {
- return unmarshalSint32Slice
- }
- return unmarshalSint32Value
- }
- case reflect.Int64:
- switch encoding {
- case "fixed64":
- if pointer {
- return unmarshalFixedS64Ptr
- }
- if slice {
- return unmarshalFixedS64Slice
- }
- return unmarshalFixedS64Value
- case "varint":
- if pointer {
- return unmarshalInt64Ptr
- }
- if slice {
- return unmarshalInt64Slice
- }
- return unmarshalInt64Value
- case "zigzag64":
- if pointer {
- return unmarshalSint64Ptr
- }
- if slice {
- return unmarshalSint64Slice
- }
- return unmarshalSint64Value
- }
- case reflect.Uint32:
- switch encoding {
- case "fixed32":
- if pointer {
- return unmarshalFixed32Ptr
- }
- if slice {
- return unmarshalFixed32Slice
- }
- return unmarshalFixed32Value
- case "varint":
- if pointer {
- return unmarshalUint32Ptr
- }
- if slice {
- return unmarshalUint32Slice
- }
- return unmarshalUint32Value
- }
- case reflect.Uint64:
- switch encoding {
- case "fixed64":
- if pointer {
- return unmarshalFixed64Ptr
- }
- if slice {
- return unmarshalFixed64Slice
- }
- return unmarshalFixed64Value
- case "varint":
- if pointer {
- return unmarshalUint64Ptr
- }
- if slice {
- return unmarshalUint64Slice
- }
- return unmarshalUint64Value
- }
- case reflect.Float32:
- if pointer {
- return unmarshalFloat32Ptr
- }
- if slice {
- return unmarshalFloat32Slice
- }
- return unmarshalFloat32Value
- case reflect.Float64:
- if pointer {
- return unmarshalFloat64Ptr
- }
- if slice {
- return unmarshalFloat64Slice
- }
- return unmarshalFloat64Value
- case reflect.Map:
- panic("map type in typeUnmarshaler in " + t.Name())
- case reflect.Slice:
- if pointer {
- panic("bad pointer in slice case in " + t.Name())
- }
- if slice {
- return unmarshalBytesSlice
- }
- return unmarshalBytesValue
- case reflect.String:
- if validateUTF8 {
- if pointer {
- return unmarshalUTF8StringPtr
- }
- if slice {
- return unmarshalUTF8StringSlice
- }
- return unmarshalUTF8StringValue
- }
- if pointer {
- return unmarshalStringPtr
- }
- if slice {
- return unmarshalStringSlice
- }
- return unmarshalStringValue
- case reflect.Struct:
- // message or group field
- if !pointer {
- panic(fmt.Sprintf("message/group field %s:%s without pointer", t, encoding))
- }
- switch encoding {
- case "bytes":
- if slice {
- return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name)
- }
- return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name)
- case "group":
- if slice {
- return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name)
- }
- return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name)
- }
- }
- panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding))
-}
-
-// Below are all the unmarshalers for individual fields of various types.
-
-func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int64(x)
- *f.toInt64() = v
- return b, nil
-}
-
-func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int64(x)
- *f.toInt64Ptr() = &v
- return b, nil
-}
-
-func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- x, n = decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int64(x)
- s := f.toInt64Slice()
- *s = append(*s, v)
- }
- return res, nil
- }
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int64(x)
- s := f.toInt64Slice()
- *s = append(*s, v)
- return b, nil
-}
-
-func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int64(x>>1) ^ int64(x)<<63>>63
- *f.toInt64() = v
- return b, nil
-}
-
-func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int64(x>>1) ^ int64(x)<<63>>63
- *f.toInt64Ptr() = &v
- return b, nil
-}
-
-func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- x, n = decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int64(x>>1) ^ int64(x)<<63>>63
- s := f.toInt64Slice()
- *s = append(*s, v)
- }
- return res, nil
- }
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int64(x>>1) ^ int64(x)<<63>>63
- s := f.toInt64Slice()
- *s = append(*s, v)
- return b, nil
-}
-
-func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := uint64(x)
- *f.toUint64() = v
- return b, nil
-}
-
-func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := uint64(x)
- *f.toUint64Ptr() = &v
- return b, nil
-}
-
-func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- x, n = decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := uint64(x)
- s := f.toUint64Slice()
- *s = append(*s, v)
- }
- return res, nil
- }
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := uint64(x)
- s := f.toUint64Slice()
- *s = append(*s, v)
- return b, nil
-}
-
-func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int32(x)
- *f.toInt32() = v
- return b, nil
-}
-
-func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int32(x)
- f.setInt32Ptr(v)
- return b, nil
-}
-
-func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- x, n = decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int32(x)
- f.appendInt32Slice(v)
- }
- return res, nil
- }
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int32(x)
- f.appendInt32Slice(v)
- return b, nil
-}
-
-func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int32(x>>1) ^ int32(x)<<31>>31
- *f.toInt32() = v
- return b, nil
-}
-
-func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int32(x>>1) ^ int32(x)<<31>>31
- f.setInt32Ptr(v)
- return b, nil
-}
-
-func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- x, n = decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int32(x>>1) ^ int32(x)<<31>>31
- f.appendInt32Slice(v)
- }
- return res, nil
- }
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int32(x>>1) ^ int32(x)<<31>>31
- f.appendInt32Slice(v)
- return b, nil
-}
-
-func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := uint32(x)
- *f.toUint32() = v
- return b, nil
-}
-
-func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := uint32(x)
- *f.toUint32Ptr() = &v
- return b, nil
-}
-
-func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- x, n = decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := uint32(x)
- s := f.toUint32Slice()
- *s = append(*s, v)
- }
- return res, nil
- }
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := uint32(x)
- s := f.toUint32Slice()
- *s = append(*s, v)
- return b, nil
-}
-
-func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed64 {
- return b, errInternalBadWireType
- }
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
- *f.toUint64() = v
- return b[8:], nil
-}
-
-func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed64 {
- return b, errInternalBadWireType
- }
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
- *f.toUint64Ptr() = &v
- return b[8:], nil
-}
-
-func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
- s := f.toUint64Slice()
- *s = append(*s, v)
- b = b[8:]
- }
- return res, nil
- }
- if w != WireFixed64 {
- return b, errInternalBadWireType
- }
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
- s := f.toUint64Slice()
- *s = append(*s, v)
- return b[8:], nil
-}
-
-func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed64 {
- return b, errInternalBadWireType
- }
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
- *f.toInt64() = v
- return b[8:], nil
-}
-
-func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed64 {
- return b, errInternalBadWireType
- }
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
- *f.toInt64Ptr() = &v
- return b[8:], nil
-}
-
-func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
- s := f.toInt64Slice()
- *s = append(*s, v)
- b = b[8:]
- }
- return res, nil
- }
- if w != WireFixed64 {
- return b, errInternalBadWireType
- }
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
- s := f.toInt64Slice()
- *s = append(*s, v)
- return b[8:], nil
-}
-
-func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed32 {
- return b, errInternalBadWireType
- }
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
- *f.toUint32() = v
- return b[4:], nil
-}
-
-func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed32 {
- return b, errInternalBadWireType
- }
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
- *f.toUint32Ptr() = &v
- return b[4:], nil
-}
-
-func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
- s := f.toUint32Slice()
- *s = append(*s, v)
- b = b[4:]
- }
- return res, nil
- }
- if w != WireFixed32 {
- return b, errInternalBadWireType
- }
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
- s := f.toUint32Slice()
- *s = append(*s, v)
- return b[4:], nil
-}
-
-func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed32 {
- return b, errInternalBadWireType
- }
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
- *f.toInt32() = v
- return b[4:], nil
-}
-
-func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed32 {
- return b, errInternalBadWireType
- }
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
- f.setInt32Ptr(v)
- return b[4:], nil
-}
-
-func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
- f.appendInt32Slice(v)
- b = b[4:]
- }
- return res, nil
- }
- if w != WireFixed32 {
- return b, errInternalBadWireType
- }
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
- f.appendInt32Slice(v)
- return b[4:], nil
-}
-
-func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- // Note: any length varint is allowed, even though any sane
- // encoder will use one byte.
- // See https://github.com/golang/protobuf/issues/76
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- // TODO: check if x>1? Tests seem to indicate no.
- v := x != 0
- *f.toBool() = v
- return b[n:], nil
-}
-
-func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- v := x != 0
- *f.toBoolPtr() = &v
- return b[n:], nil
-}
-
-func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- x, n = decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- v := x != 0
- s := f.toBoolSlice()
- *s = append(*s, v)
- b = b[n:]
- }
- return res, nil
- }
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- v := x != 0
- s := f.toBoolSlice()
- *s = append(*s, v)
- return b[n:], nil
-}
-
-func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed64 {
- return b, errInternalBadWireType
- }
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
- *f.toFloat64() = v
- return b[8:], nil
-}
-
-func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed64 {
- return b, errInternalBadWireType
- }
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
- *f.toFloat64Ptr() = &v
- return b[8:], nil
-}
-
-func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
- s := f.toFloat64Slice()
- *s = append(*s, v)
- b = b[8:]
- }
- return res, nil
- }
- if w != WireFixed64 {
- return b, errInternalBadWireType
- }
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
- s := f.toFloat64Slice()
- *s = append(*s, v)
- return b[8:], nil
-}
-
-func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed32 {
- return b, errInternalBadWireType
- }
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
- *f.toFloat32() = v
- return b[4:], nil
-}
-
-func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed32 {
- return b, errInternalBadWireType
- }
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
- *f.toFloat32Ptr() = &v
- return b[4:], nil
-}
-
-func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
- s := f.toFloat32Slice()
- *s = append(*s, v)
- b = b[4:]
- }
- return res, nil
- }
- if w != WireFixed32 {
- return b, errInternalBadWireType
- }
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
- s := f.toFloat32Slice()
- *s = append(*s, v)
- return b[4:], nil
-}
-
-func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- v := string(b[:x])
- *f.toString() = v
- return b[x:], nil
-}
-
-func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- v := string(b[:x])
- *f.toStringPtr() = &v
- return b[x:], nil
-}
-
-func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- v := string(b[:x])
- s := f.toStringSlice()
- *s = append(*s, v)
- return b[x:], nil
-}
-
-func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- v := string(b[:x])
- *f.toString() = v
- if !utf8.ValidString(v) {
- return b[x:], errInvalidUTF8
- }
- return b[x:], nil
-}
-
-func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- v := string(b[:x])
- *f.toStringPtr() = &v
- if !utf8.ValidString(v) {
- return b[x:], errInvalidUTF8
- }
- return b[x:], nil
-}
-
-func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- v := string(b[:x])
- s := f.toStringSlice()
- *s = append(*s, v)
- if !utf8.ValidString(v) {
- return b[x:], errInvalidUTF8
- }
- return b[x:], nil
-}
-
-var emptyBuf [0]byte
-
-func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- // The use of append here is a trick which avoids the zeroing
- // that would be required if we used a make/copy pair.
- // We append to emptyBuf instead of nil because we want
- // a non-nil result even when the length is 0.
- v := append(emptyBuf[:], b[:x]...)
- *f.toBytes() = v
- return b[x:], nil
-}
-
-func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- v := append(emptyBuf[:], b[:x]...)
- s := f.toBytesSlice()
- *s = append(*s, v)
- return b[x:], nil
-}
-
-func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- // First read the message field to see if something is there.
- // The semantics of multiple submessages are weird. Instead of
- // the last one winning (as it is for all other fields), multiple
- // submessages are merged.
- v := f.getPointer()
- if v.isNil() {
- v = valToPointer(reflect.New(sub.typ))
- f.setPointer(v)
- }
- err := sub.unmarshal(v, b[:x])
- if err != nil {
- if r, ok := err.(*RequiredNotSetError); ok {
- r.field = name + "." + r.field
- } else {
- return nil, err
- }
- }
- return b[x:], err
- }
-}
-
-func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- v := valToPointer(reflect.New(sub.typ))
- err := sub.unmarshal(v, b[:x])
- if err != nil {
- if r, ok := err.(*RequiredNotSetError); ok {
- r.field = name + "." + r.field
- } else {
- return nil, err
- }
- }
- f.appendPointer(v)
- return b[x:], err
- }
-}
-
-func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireStartGroup {
- return b, errInternalBadWireType
- }
- x, y := findEndGroup(b)
- if x < 0 {
- return nil, io.ErrUnexpectedEOF
- }
- v := f.getPointer()
- if v.isNil() {
- v = valToPointer(reflect.New(sub.typ))
- f.setPointer(v)
- }
- err := sub.unmarshal(v, b[:x])
- if err != nil {
- if r, ok := err.(*RequiredNotSetError); ok {
- r.field = name + "." + r.field
- } else {
- return nil, err
- }
- }
- return b[y:], err
- }
-}
-
-func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireStartGroup {
- return b, errInternalBadWireType
- }
- x, y := findEndGroup(b)
- if x < 0 {
- return nil, io.ErrUnexpectedEOF
- }
- v := valToPointer(reflect.New(sub.typ))
- err := sub.unmarshal(v, b[:x])
- if err != nil {
- if r, ok := err.(*RequiredNotSetError); ok {
- r.field = name + "." + r.field
- } else {
- return nil, err
- }
- }
- f.appendPointer(v)
- return b[y:], err
- }
-}
-
-func makeUnmarshalMap(f *reflect.StructField) unmarshaler {
- t := f.Type
- kt := t.Key()
- vt := t.Elem()
- unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key"))
- unmarshalVal := typeUnmarshaler(vt, f.Tag.Get("protobuf_val"))
- return func(b []byte, f pointer, w int) ([]byte, error) {
- // The map entry is a submessage. Figure out how big it is.
- if w != WireBytes {
- return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes)
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- r := b[x:] // unused data to return
- b = b[:x] // data for map entry
-
- // Note: we could use #keys * #values ~= 200 functions
- // to do map decoding without reflection. Probably not worth it.
- // Maps will be somewhat slow. Oh well.
-
- // Read key and value from data.
- var nerr nonFatal
- k := reflect.New(kt)
- v := reflect.New(vt)
- for len(b) > 0 {
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- wire := int(x) & 7
- b = b[n:]
-
- var err error
- switch x >> 3 {
- case 1:
- b, err = unmarshalKey(b, valToPointer(k), wire)
- case 2:
- b, err = unmarshalVal(b, valToPointer(v), wire)
- default:
- err = errInternalBadWireType // skip unknown tag
- }
-
- if nerr.Merge(err) {
- continue
- }
- if err != errInternalBadWireType {
- return nil, err
- }
-
- // Skip past unknown fields.
- b, err = skipField(b, wire)
- if err != nil {
- return nil, err
- }
- }
-
- // Get map, allocate if needed.
- m := f.asPointerTo(t).Elem() // an addressable map[K]T
- if m.IsNil() {
- m.Set(reflect.MakeMap(t))
- }
-
- // Insert into map.
- m.SetMapIndex(k.Elem(), v.Elem())
-
- return r, nerr.E
- }
-}
-
-// makeUnmarshalOneof makes an unmarshaler for oneof fields.
-// for:
-// message Msg {
-// oneof F {
-// int64 X = 1;
-// float64 Y = 2;
-// }
-// }
-// typ is the type of the concrete entry for a oneof case (e.g. Msg_X).
-// ityp is the interface type of the oneof field (e.g. isMsg_F).
-// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64).
-// Note that this function will be called once for each case in the oneof.
-func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler {
- sf := typ.Field(0)
- field0 := toField(&sf)
- return func(b []byte, f pointer, w int) ([]byte, error) {
- // Allocate holder for value.
- v := reflect.New(typ)
-
- // Unmarshal data into holder.
- // We unmarshal into the first field of the holder object.
- var err error
- var nerr nonFatal
- b, err = unmarshal(b, valToPointer(v).offset(field0), w)
- if !nerr.Merge(err) {
- return nil, err
- }
-
- // Write pointer to holder into target field.
- f.asPointerTo(ityp).Elem().Set(v)
-
- return b, nerr.E
- }
-}
-
-// Error used by decode internally.
-var errInternalBadWireType = errors.New("proto: internal error: bad wiretype")
-
-// skipField skips past a field of type wire and returns the remaining bytes.
-func skipField(b []byte, wire int) ([]byte, error) {
- switch wire {
- case WireVarint:
- _, k := decodeVarint(b)
- if k == 0 {
- return b, io.ErrUnexpectedEOF
- }
- b = b[k:]
- case WireFixed32:
- if len(b) < 4 {
- return b, io.ErrUnexpectedEOF
- }
- b = b[4:]
- case WireFixed64:
- if len(b) < 8 {
- return b, io.ErrUnexpectedEOF
- }
- b = b[8:]
- case WireBytes:
- m, k := decodeVarint(b)
- if k == 0 || uint64(len(b)-k) < m {
- return b, io.ErrUnexpectedEOF
- }
- b = b[uint64(k)+m:]
- case WireStartGroup:
- _, i := findEndGroup(b)
- if i == -1 {
- return b, io.ErrUnexpectedEOF
- }
- b = b[i:]
- default:
- return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire)
- }
- return b, nil
-}
-
-// findEndGroup finds the index of the next EndGroup tag.
-// Groups may be nested, so the "next" EndGroup tag is the first
-// unpaired EndGroup.
-// findEndGroup returns the indexes of the start and end of the EndGroup tag.
-// Returns (-1,-1) if it can't find one.
-func findEndGroup(b []byte) (int, int) {
- depth := 1
- i := 0
- for {
- x, n := decodeVarint(b[i:])
- if n == 0 {
- return -1, -1
- }
- j := i
- i += n
- switch x & 7 {
- case WireVarint:
- _, k := decodeVarint(b[i:])
- if k == 0 {
- return -1, -1
- }
- i += k
- case WireFixed32:
- if len(b)-4 < i {
- return -1, -1
- }
- i += 4
- case WireFixed64:
- if len(b)-8 < i {
- return -1, -1
- }
- i += 8
- case WireBytes:
- m, k := decodeVarint(b[i:])
- if k == 0 {
- return -1, -1
- }
- i += k
- if uint64(len(b)-i) < m {
- return -1, -1
- }
- i += int(m)
- case WireStartGroup:
- depth++
- case WireEndGroup:
- depth--
- if depth == 0 {
- return j, i
- }
- default:
- return -1, -1
- }
- }
-}
-
-// encodeVarint appends a varint-encoded integer to b and returns the result.
-func encodeVarint(b []byte, x uint64) []byte {
- for x >= 1<<7 {
- b = append(b, byte(x&0x7f|0x80))
- x >>= 7
- }
- return append(b, byte(x))
-}
-
-// decodeVarint reads a varint-encoded integer from b.
-// Returns the decoded integer and the number of bytes read.
-// If there is an error, it returns 0,0.
-func decodeVarint(b []byte) (uint64, int) {
- var x, y uint64
- if len(b) == 0 {
- goto bad
- }
- x = uint64(b[0])
- if x < 0x80 {
- return x, 1
- }
- x -= 0x80
-
- if len(b) <= 1 {
- goto bad
- }
- y = uint64(b[1])
- x += y << 7
- if y < 0x80 {
- return x, 2
- }
- x -= 0x80 << 7
-
- if len(b) <= 2 {
- goto bad
- }
- y = uint64(b[2])
- x += y << 14
- if y < 0x80 {
- return x, 3
- }
- x -= 0x80 << 14
-
- if len(b) <= 3 {
- goto bad
- }
- y = uint64(b[3])
- x += y << 21
- if y < 0x80 {
- return x, 4
- }
- x -= 0x80 << 21
-
- if len(b) <= 4 {
- goto bad
- }
- y = uint64(b[4])
- x += y << 28
- if y < 0x80 {
- return x, 5
- }
- x -= 0x80 << 28
-
- if len(b) <= 5 {
- goto bad
- }
- y = uint64(b[5])
- x += y << 35
- if y < 0x80 {
- return x, 6
- }
- x -= 0x80 << 35
-
- if len(b) <= 6 {
- goto bad
- }
- y = uint64(b[6])
- x += y << 42
- if y < 0x80 {
- return x, 7
- }
- x -= 0x80 << 42
-
- if len(b) <= 7 {
- goto bad
- }
- y = uint64(b[7])
- x += y << 49
- if y < 0x80 {
- return x, 8
- }
- x -= 0x80 << 49
-
- if len(b) <= 8 {
- goto bad
- }
- y = uint64(b[8])
- x += y << 56
- if y < 0x80 {
- return x, 9
- }
- x -= 0x80 << 56
-
- if len(b) <= 9 {
- goto bad
- }
- y = uint64(b[9])
- x += y << 63
- if y < 2 {
- return x, 10
- }
-
-bad:
- return 0, 0
-}
diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go
deleted file mode 100644
index 1aaee725b..000000000
--- a/vendor/github.com/golang/protobuf/proto/text.go
+++ /dev/null
@@ -1,843 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-// Functions for writing the text protocol buffer format.
-
-import (
- "bufio"
- "bytes"
- "encoding"
- "errors"
- "fmt"
- "io"
- "log"
- "math"
- "reflect"
- "sort"
- "strings"
-)
-
-var (
- newline = []byte("\n")
- spaces = []byte(" ")
- endBraceNewline = []byte("}\n")
- backslashN = []byte{'\\', 'n'}
- backslashR = []byte{'\\', 'r'}
- backslashT = []byte{'\\', 't'}
- backslashDQ = []byte{'\\', '"'}
- backslashBS = []byte{'\\', '\\'}
- posInf = []byte("inf")
- negInf = []byte("-inf")
- nan = []byte("nan")
-)
-
-type writer interface {
- io.Writer
- WriteByte(byte) error
-}
-
-// textWriter is an io.Writer that tracks its indentation level.
-type textWriter struct {
- ind int
- complete bool // if the current position is a complete line
- compact bool // whether to write out as a one-liner
- w writer
-}
-
-func (w *textWriter) WriteString(s string) (n int, err error) {
- if !strings.Contains(s, "\n") {
- if !w.compact && w.complete {
- w.writeIndent()
- }
- w.complete = false
- return io.WriteString(w.w, s)
- }
- // WriteString is typically called without newlines, so this
- // codepath and its copy are rare. We copy to avoid
- // duplicating all of Write's logic here.
- return w.Write([]byte(s))
-}
-
-func (w *textWriter) Write(p []byte) (n int, err error) {
- newlines := bytes.Count(p, newline)
- if newlines == 0 {
- if !w.compact && w.complete {
- w.writeIndent()
- }
- n, err = w.w.Write(p)
- w.complete = false
- return n, err
- }
-
- frags := bytes.SplitN(p, newline, newlines+1)
- if w.compact {
- for i, frag := range frags {
- if i > 0 {
- if err := w.w.WriteByte(' '); err != nil {
- return n, err
- }
- n++
- }
- nn, err := w.w.Write(frag)
- n += nn
- if err != nil {
- return n, err
- }
- }
- return n, nil
- }
-
- for i, frag := range frags {
- if w.complete {
- w.writeIndent()
- }
- nn, err := w.w.Write(frag)
- n += nn
- if err != nil {
- return n, err
- }
- if i+1 < len(frags) {
- if err := w.w.WriteByte('\n'); err != nil {
- return n, err
- }
- n++
- }
- }
- w.complete = len(frags[len(frags)-1]) == 0
- return n, nil
-}
-
-func (w *textWriter) WriteByte(c byte) error {
- if w.compact && c == '\n' {
- c = ' '
- }
- if !w.compact && w.complete {
- w.writeIndent()
- }
- err := w.w.WriteByte(c)
- w.complete = c == '\n'
- return err
-}
-
-func (w *textWriter) indent() { w.ind++ }
-
-func (w *textWriter) unindent() {
- if w.ind == 0 {
- log.Print("proto: textWriter unindented too far")
- return
- }
- w.ind--
-}
-
-func writeName(w *textWriter, props *Properties) error {
- if _, err := w.WriteString(props.OrigName); err != nil {
- return err
- }
- if props.Wire != "group" {
- return w.WriteByte(':')
- }
- return nil
-}
-
-func requiresQuotes(u string) bool {
- // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
- for _, ch := range u {
- switch {
- case ch == '.' || ch == '/' || ch == '_':
- continue
- case '0' <= ch && ch <= '9':
- continue
- case 'A' <= ch && ch <= 'Z':
- continue
- case 'a' <= ch && ch <= 'z':
- continue
- default:
- return true
- }
- }
- return false
-}
-
-// isAny reports whether sv is a google.protobuf.Any message
-func isAny(sv reflect.Value) bool {
- type wkt interface {
- XXX_WellKnownType() string
- }
- t, ok := sv.Addr().Interface().(wkt)
- return ok && t.XXX_WellKnownType() == "Any"
-}
-
-// writeProto3Any writes an expanded google.protobuf.Any message.
-//
-// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
-// required messages are not linked in).
-//
-// It returns (true, error) when sv was written in expanded format or an error
-// was encountered.
-func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
- turl := sv.FieldByName("TypeUrl")
- val := sv.FieldByName("Value")
- if !turl.IsValid() || !val.IsValid() {
- return true, errors.New("proto: invalid google.protobuf.Any message")
- }
-
- b, ok := val.Interface().([]byte)
- if !ok {
- return true, errors.New("proto: invalid google.protobuf.Any message")
- }
-
- parts := strings.Split(turl.String(), "/")
- mt := MessageType(parts[len(parts)-1])
- if mt == nil {
- return false, nil
- }
- m := reflect.New(mt.Elem())
- if err := Unmarshal(b, m.Interface().(Message)); err != nil {
- return false, nil
- }
- w.Write([]byte("["))
- u := turl.String()
- if requiresQuotes(u) {
- writeString(w, u)
- } else {
- w.Write([]byte(u))
- }
- if w.compact {
- w.Write([]byte("]:<"))
- } else {
- w.Write([]byte("]: <\n"))
- w.ind++
- }
- if err := tm.writeStruct(w, m.Elem()); err != nil {
- return true, err
- }
- if w.compact {
- w.Write([]byte("> "))
- } else {
- w.ind--
- w.Write([]byte(">\n"))
- }
- return true, nil
-}
-
-func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
- if tm.ExpandAny && isAny(sv) {
- if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
- return err
- }
- }
- st := sv.Type()
- sprops := GetProperties(st)
- for i := 0; i < sv.NumField(); i++ {
- fv := sv.Field(i)
- props := sprops.Prop[i]
- name := st.Field(i).Name
-
- if name == "XXX_NoUnkeyedLiteral" {
- continue
- }
-
- if strings.HasPrefix(name, "XXX_") {
- // There are two XXX_ fields:
- // XXX_unrecognized []byte
- // XXX_extensions map[int32]proto.Extension
- // The first is handled here;
- // the second is handled at the bottom of this function.
- if name == "XXX_unrecognized" && !fv.IsNil() {
- if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
- return err
- }
- }
- continue
- }
- if fv.Kind() == reflect.Ptr && fv.IsNil() {
- // Field not filled in. This could be an optional field or
- // a required field that wasn't filled in. Either way, there
- // isn't anything we can show for it.
- continue
- }
- if fv.Kind() == reflect.Slice && fv.IsNil() {
- // Repeated field that is empty, or a bytes field that is unused.
- continue
- }
-
- if props.Repeated && fv.Kind() == reflect.Slice {
- // Repeated field.
- for j := 0; j < fv.Len(); j++ {
- if err := writeName(w, props); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte(' '); err != nil {
- return err
- }
- }
- v := fv.Index(j)
- if v.Kind() == reflect.Ptr && v.IsNil() {
- // A nil message in a repeated field is not valid,
- // but we can handle that more gracefully than panicking.
- if _, err := w.Write([]byte("<nil>\n")); err != nil {
- return err
- }
- continue
- }
- if err := tm.writeAny(w, v, props); err != nil {
- return err
- }
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- }
- continue
- }
- if fv.Kind() == reflect.Map {
- // Map fields are rendered as a repeated struct with key/value fields.
- keys := fv.MapKeys()
- sort.Sort(mapKeys(keys))
- for _, key := range keys {
- val := fv.MapIndex(key)
- if err := writeName(w, props); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte(' '); err != nil {
- return err
- }
- }
- // open struct
- if err := w.WriteByte('<'); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- }
- w.indent()
- // key
- if _, err := w.WriteString("key:"); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte(' '); err != nil {
- return err
- }
- }
- if err := tm.writeAny(w, key, props.MapKeyProp); err != nil {
- return err
- }
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- // nil values aren't legal, but we can avoid panicking because of them.
- if val.Kind() != reflect.Ptr || !val.IsNil() {
- // value
- if _, err := w.WriteString("value:"); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte(' '); err != nil {
- return err
- }
- }
- if err := tm.writeAny(w, val, props.MapValProp); err != nil {
- return err
- }
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- }
- // close struct
- w.unindent()
- if err := w.WriteByte('>'); err != nil {
- return err
- }
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- }
- continue
- }
- if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
- // empty bytes field
- continue
- }
- if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
- // proto3 non-repeated scalar field; skip if zero value
- if isProto3Zero(fv) {
- continue
- }
- }
-
- if fv.Kind() == reflect.Interface {
- // Check if it is a oneof.
- if st.Field(i).Tag.Get("protobuf_oneof") != "" {
- // fv is nil, or holds a pointer to generated struct.
- // That generated struct has exactly one field,
- // which has a protobuf struct tag.
- if fv.IsNil() {
- continue
- }
- inner := fv.Elem().Elem() // interface -> *T -> T
- tag := inner.Type().Field(0).Tag.Get("protobuf")
- props = new(Properties) // Overwrite the outer props var, but not its pointee.
- props.Parse(tag)
- // Write the value in the oneof, not the oneof itself.
- fv = inner.Field(0)
-
- // Special case to cope with malformed messages gracefully:
- // If the value in the oneof is a nil pointer, don't panic
- // in writeAny.
- if fv.Kind() == reflect.Ptr && fv.IsNil() {
- // Use errors.New so writeAny won't render quotes.
- msg := errors.New("/* nil */")
- fv = reflect.ValueOf(&msg).Elem()
- }
- }
- }
-
- if err := writeName(w, props); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte(' '); err != nil {
- return err
- }
- }
-
- // Enums have a String method, so writeAny will work fine.
- if err := tm.writeAny(w, fv, props); err != nil {
- return err
- }
-
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- }
-
- // Extensions (the XXX_extensions field).
- pv := sv.Addr()
- if _, err := extendable(pv.Interface()); err == nil {
- if err := tm.writeExtensions(w, pv); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// writeAny writes an arbitrary field.
-func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
- v = reflect.Indirect(v)
-
- // Floats have special cases.
- if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
- x := v.Float()
- var b []byte
- switch {
- case math.IsInf(x, 1):
- b = posInf
- case math.IsInf(x, -1):
- b = negInf
- case math.IsNaN(x):
- b = nan
- }
- if b != nil {
- _, err := w.Write(b)
- return err
- }
- // Other values are handled below.
- }
-
- // We don't attempt to serialise every possible value type; only those
- // that can occur in protocol buffers.
- switch v.Kind() {
- case reflect.Slice:
- // Should only be a []byte; repeated fields are handled in writeStruct.
- if err := writeString(w, string(v.Bytes())); err != nil {
- return err
- }
- case reflect.String:
- if err := writeString(w, v.String()); err != nil {
- return err
- }
- case reflect.Struct:
- // Required/optional group/message.
- var bra, ket byte = '<', '>'
- if props != nil && props.Wire == "group" {
- bra, ket = '{', '}'
- }
- if err := w.WriteByte(bra); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- }
- w.indent()
- if v.CanAddr() {
- // Calling v.Interface on a struct causes the reflect package to
- // copy the entire struct. This is racy with the new Marshaler
- // since we atomically update the XXX_sizecache.
- //
- // Thus, we retrieve a pointer to the struct if possible to avoid
- // a race since v.Interface on the pointer doesn't copy the struct.
- //
- // If v is not addressable, then we are not worried about a race
- // since it implies that the binary Marshaler cannot possibly be
- // mutating this value.
- v = v.Addr()
- }
- if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
- text, err := etm.MarshalText()
- if err != nil {
- return err
- }
- if _, err = w.Write(text); err != nil {
- return err
- }
- } else {
- if v.Kind() == reflect.Ptr {
- v = v.Elem()
- }
- if err := tm.writeStruct(w, v); err != nil {
- return err
- }
- }
- w.unindent()
- if err := w.WriteByte(ket); err != nil {
- return err
- }
- default:
- _, err := fmt.Fprint(w, v.Interface())
- return err
- }
- return nil
-}
-
-// equivalent to C's isprint.
-func isprint(c byte) bool {
- return c >= 0x20 && c < 0x7f
-}
-
-// writeString writes a string in the protocol buffer text format.
-// It is similar to strconv.Quote except we don't use Go escape sequences,
-// we treat the string as a byte sequence, and we use octal escapes.
-// These differences are to maintain interoperability with the other
-// languages' implementations of the text format.
-func writeString(w *textWriter, s string) error {
- // use WriteByte here to get any needed indent
- if err := w.WriteByte('"'); err != nil {
- return err
- }
- // Loop over the bytes, not the runes.
- for i := 0; i < len(s); i++ {
- var err error
- // Divergence from C++: we don't escape apostrophes.
- // There's no need to escape them, and the C++ parser
- // copes with a naked apostrophe.
- switch c := s[i]; c {
- case '\n':
- _, err = w.w.Write(backslashN)
- case '\r':
- _, err = w.w.Write(backslashR)
- case '\t':
- _, err = w.w.Write(backslashT)
- case '"':
- _, err = w.w.Write(backslashDQ)
- case '\\':
- _, err = w.w.Write(backslashBS)
- default:
- if isprint(c) {
- err = w.w.WriteByte(c)
- } else {
- _, err = fmt.Fprintf(w.w, "\\%03o", c)
- }
- }
- if err != nil {
- return err
- }
- }
- return w.WriteByte('"')
-}
-
-func writeUnknownStruct(w *textWriter, data []byte) (err error) {
- if !w.compact {
- if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
- return err
- }
- }
- b := NewBuffer(data)
- for b.index < len(b.buf) {
- x, err := b.DecodeVarint()
- if err != nil {
- _, err := fmt.Fprintf(w, "/* %v */\n", err)
- return err
- }
- wire, tag := x&7, x>>3
- if wire == WireEndGroup {
- w.unindent()
- if _, err := w.Write(endBraceNewline); err != nil {
- return err
- }
- continue
- }
- if _, err := fmt.Fprint(w, tag); err != nil {
- return err
- }
- if wire != WireStartGroup {
- if err := w.WriteByte(':'); err != nil {
- return err
- }
- }
- if !w.compact || wire == WireStartGroup {
- if err := w.WriteByte(' '); err != nil {
- return err
- }
- }
- switch wire {
- case WireBytes:
- buf, e := b.DecodeRawBytes(false)
- if e == nil {
- _, err = fmt.Fprintf(w, "%q", buf)
- } else {
- _, err = fmt.Fprintf(w, "/* %v */", e)
- }
- case WireFixed32:
- x, err = b.DecodeFixed32()
- err = writeUnknownInt(w, x, err)
- case WireFixed64:
- x, err = b.DecodeFixed64()
- err = writeUnknownInt(w, x, err)
- case WireStartGroup:
- err = w.WriteByte('{')
- w.indent()
- case WireVarint:
- x, err = b.DecodeVarint()
- err = writeUnknownInt(w, x, err)
- default:
- _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
- }
- if err != nil {
- return err
- }
- if err = w.WriteByte('\n'); err != nil {
- return err
- }
- }
- return nil
-}
-
-func writeUnknownInt(w *textWriter, x uint64, err error) error {
- if err == nil {
- _, err = fmt.Fprint(w, x)
- } else {
- _, err = fmt.Fprintf(w, "/* %v */", err)
- }
- return err
-}
-
-type int32Slice []int32
-
-func (s int32Slice) Len() int { return len(s) }
-func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
-func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-
-// writeExtensions writes all the extensions in pv.
-// pv is assumed to be a pointer to a protocol message struct that is extendable.
-func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
- emap := extensionMaps[pv.Type().Elem()]
- ep, _ := extendable(pv.Interface())
-
- // Order the extensions by ID.
- // This isn't strictly necessary, but it will give us
- // canonical output, which will also make testing easier.
- m, mu := ep.extensionsRead()
- if m == nil {
- return nil
- }
- mu.Lock()
- ids := make([]int32, 0, len(m))
- for id := range m {
- ids = append(ids, id)
- }
- sort.Sort(int32Slice(ids))
- mu.Unlock()
-
- for _, extNum := range ids {
- ext := m[extNum]
- var desc *ExtensionDesc
- if emap != nil {
- desc = emap[extNum]
- }
- if desc == nil {
- // Unknown extension.
- if err := writeUnknownStruct(w, ext.enc); err != nil {
- return err
- }
- continue
- }
-
- pb, err := GetExtension(ep, desc)
- if err != nil {
- return fmt.Errorf("failed getting extension: %v", err)
- }
-
- // Repeated extensions will appear as a slice.
- if !desc.repeated() {
- if err := tm.writeExtension(w, desc.Name, pb); err != nil {
- return err
- }
- } else {
- v := reflect.ValueOf(pb)
- for i := 0; i < v.Len(); i++ {
- if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
- return err
- }
- }
- }
- }
- return nil
-}
-
-func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
- if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte(' '); err != nil {
- return err
- }
- }
- if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
- return err
- }
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- return nil
-}
-
-func (w *textWriter) writeIndent() {
- if !w.complete {
- return
- }
- remain := w.ind * 2
- for remain > 0 {
- n := remain
- if n > len(spaces) {
- n = len(spaces)
- }
- w.w.Write(spaces[:n])
- remain -= n
- }
- w.complete = false
-}
-
-// TextMarshaler is a configurable text format marshaler.
-type TextMarshaler struct {
- Compact bool // use compact text format (one line).
- ExpandAny bool // expand google.protobuf.Any messages of known types
-}
-
-// Marshal writes a given protocol buffer in text format.
-// The only errors returned are from w.
-func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
- val := reflect.ValueOf(pb)
- if pb == nil || val.IsNil() {
- w.Write([]byte("<nil>"))
- return nil
- }
- var bw *bufio.Writer
- ww, ok := w.(writer)
- if !ok {
- bw = bufio.NewWriter(w)
- ww = bw
- }
- aw := &textWriter{
- w: ww,
- complete: true,
- compact: tm.Compact,
- }
-
- if etm, ok := pb.(encoding.TextMarshaler); ok {
- text, err := etm.MarshalText()
- if err != nil {
- return err
- }
- if _, err = aw.Write(text); err != nil {
- return err
- }
- if bw != nil {
- return bw.Flush()
- }
- return nil
- }
- // Dereference the received pointer so we don't have outer < and >.
- v := reflect.Indirect(val)
- if err := tm.writeStruct(aw, v); err != nil {
- return err
- }
- if bw != nil {
- return bw.Flush()
- }
- return nil
-}
-
-// Text is the same as Marshal, but returns the string directly.
-func (tm *TextMarshaler) Text(pb Message) string {
- var buf bytes.Buffer
- tm.Marshal(&buf, pb)
- return buf.String()
-}
-
-var (
- defaultTextMarshaler = TextMarshaler{}
- compactTextMarshaler = TextMarshaler{Compact: true}
-)
-
-// TODO: consider removing some of the Marshal functions below.
-
-// MarshalText writes a given protocol buffer in text format.
-// The only errors returned are from w.
-func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) }
-
-// MarshalTextString is the same as MarshalText, but returns the string directly.
-func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) }
-
-// CompactText writes a given protocol buffer in compact text format (one line).
-func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) }
-
-// CompactTextString is the same as CompactText, but returns the string directly.
-func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }
diff --git a/vendor/github.com/golang/protobuf/proto/text_decode.go b/vendor/github.com/golang/protobuf/proto/text_decode.go
new file mode 100644
index 000000000..4a5931009
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text_decode.go
@@ -0,0 +1,801 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "encoding"
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/encoding/prototext"
+ protoV2 "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+)
+
+const wrapTextUnmarshalV2 = false
+
+// ParseError is returned by UnmarshalText.
+type ParseError struct {
+ Message string
+
+ // Deprecated: Do not use.
+ Line, Offset int
+}
+
+func (e *ParseError) Error() string {
+ if wrapTextUnmarshalV2 {
+ return e.Message
+ }
+ if e.Line == 1 {
+ return fmt.Sprintf("line 1.%d: %v", e.Offset, e.Message)
+ }
+ return fmt.Sprintf("line %d: %v", e.Line, e.Message)
+}
+
+// UnmarshalText parses a proto text formatted string into m.
+func UnmarshalText(s string, m Message) error {
+ if u, ok := m.(encoding.TextUnmarshaler); ok {
+ return u.UnmarshalText([]byte(s))
+ }
+
+ m.Reset()
+ mi := MessageV2(m)
+
+ if wrapTextUnmarshalV2 {
+ err := prototext.UnmarshalOptions{
+ AllowPartial: true,
+ }.Unmarshal([]byte(s), mi)
+ if err != nil {
+ return &ParseError{Message: err.Error()}
+ }
+ return checkRequiredNotSet(mi)
+ } else {
+ if err := newTextParser(s).unmarshalMessage(mi.ProtoReflect(), ""); err != nil {
+ return err
+ }
+ return checkRequiredNotSet(mi)
+ }
+}
+
+type textParser struct {
+ s string // remaining input
+ done bool // whether the parsing is finished (success or error)
+ backed bool // whether back() was called
+ offset, line int
+ cur token
+}
+
+type token struct {
+ value string
+ err *ParseError
+ line int // line number
+ offset int // byte number from start of input, not start of line
+ unquoted string // the unquoted version of value, if it was a quoted string
+}
+
+func newTextParser(s string) *textParser {
+ p := new(textParser)
+ p.s = s
+ p.line = 1
+ p.cur.line = 1
+ return p
+}
+
+func (p *textParser) unmarshalMessage(m protoreflect.Message, terminator string) (err error) {
+ md := m.Descriptor()
+ fds := md.Fields()
+
+ // A struct is a sequence of "name: value", terminated by one of
+ // '>' or '}', or the end of the input. A name may also be
+ // "[extension]" or "[type/url]".
+ //
+ // The whole struct can also be an expanded Any message, like:
+ // [type/url] < ... struct contents ... >
+ seen := make(map[protoreflect.FieldNumber]bool)
+ for {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == terminator {
+ break
+ }
+ if tok.value == "[" {
+ if err := p.unmarshalExtensionOrAny(m, seen); err != nil {
+ return err
+ }
+ continue
+ }
+
+ // This is a normal, non-extension field.
+ name := protoreflect.Name(tok.value)
+ fd := fds.ByName(name)
+ switch {
+ case fd == nil:
+ gd := fds.ByName(protoreflect.Name(strings.ToLower(string(name))))
+ if gd != nil && gd.Kind() == protoreflect.GroupKind && gd.Message().Name() == name {
+ fd = gd
+ }
+ case fd.Kind() == protoreflect.GroupKind && fd.Message().Name() != name:
+ fd = nil
+ case fd.IsWeak() && fd.Message().IsPlaceholder():
+ fd = nil
+ }
+ if fd == nil {
+ typeName := string(md.FullName())
+ if m, ok := m.Interface().(Message); ok {
+ t := reflect.TypeOf(m)
+ if t.Kind() == reflect.Ptr {
+ typeName = t.Elem().String()
+ }
+ }
+ return p.errorf("unknown field name %q in %v", name, typeName)
+ }
+ if od := fd.ContainingOneof(); od != nil && m.WhichOneof(od) != nil {
+ return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, od.Name())
+ }
+ if fd.Cardinality() != protoreflect.Repeated && seen[fd.Number()] {
+ return p.errorf("non-repeated field %q was repeated", fd.Name())
+ }
+ seen[fd.Number()] = true
+
+ // Consume any colon.
+ if err := p.checkForColon(fd); err != nil {
+ return err
+ }
+
+ // Parse into the field.
+ v := m.Get(fd)
+ if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) {
+ v = m.Mutable(fd)
+ }
+ if v, err = p.unmarshalValue(v, fd); err != nil {
+ return err
+ }
+ m.Set(fd, v)
+
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (p *textParser) unmarshalExtensionOrAny(m protoreflect.Message, seen map[protoreflect.FieldNumber]bool) error {
+ name, err := p.consumeExtensionOrAnyName()
+ if err != nil {
+ return err
+ }
+
+ // If it contains a slash, it's an Any type URL.
+ if slashIdx := strings.LastIndex(name, "/"); slashIdx >= 0 {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ // consume an optional colon
+ if tok.value == ":" {
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ }
+
+ var terminator string
+ switch tok.value {
+ case "<":
+ terminator = ">"
+ case "{":
+ terminator = "}"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+
+ mt, err := protoregistry.GlobalTypes.FindMessageByURL(name)
+ if err != nil {
+ return p.errorf("unrecognized message %q in google.protobuf.Any", name[slashIdx+len("/"):])
+ }
+ m2 := mt.New()
+ if err := p.unmarshalMessage(m2, terminator); err != nil {
+ return err
+ }
+ b, err := protoV2.Marshal(m2.Interface())
+ if err != nil {
+ return p.errorf("failed to marshal message of type %q: %v", name[slashIdx+len("/"):], err)
+ }
+
+ urlFD := m.Descriptor().Fields().ByName("type_url")
+ valFD := m.Descriptor().Fields().ByName("value")
+ if seen[urlFD.Number()] {
+ return p.errorf("Any message unpacked multiple times, or %q already set", urlFD.Name())
+ }
+ if seen[valFD.Number()] {
+ return p.errorf("Any message unpacked multiple times, or %q already set", valFD.Name())
+ }
+ m.Set(urlFD, protoreflect.ValueOfString(name))
+ m.Set(valFD, protoreflect.ValueOfBytes(b))
+ seen[urlFD.Number()] = true
+ seen[valFD.Number()] = true
+ return nil
+ }
+
+ xname := protoreflect.FullName(name)
+ xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname)
+ if xt == nil && isMessageSet(m.Descriptor()) {
+ xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension"))
+ }
+ if xt == nil {
+ return p.errorf("unrecognized extension %q", name)
+ }
+ fd := xt.TypeDescriptor()
+ if fd.ContainingMessage().FullName() != m.Descriptor().FullName() {
+ return p.errorf("extension field %q does not extend message %q", name, m.Descriptor().FullName())
+ }
+
+ if err := p.checkForColon(fd); err != nil {
+ return err
+ }
+
+ v := m.Get(fd)
+ if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) {
+ v = m.Mutable(fd)
+ }
+ v, err = p.unmarshalValue(v, fd)
+ if err != nil {
+ return err
+ }
+ m.Set(fd, v)
+ return p.consumeOptionalSeparator()
+}
+
+func (p *textParser) unmarshalValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
+ tok := p.next()
+ if tok.err != nil {
+ return v, tok.err
+ }
+ if tok.value == "" {
+ return v, p.errorf("unexpected EOF")
+ }
+
+ switch {
+ case fd.IsList():
+ lv := v.List()
+ var err error
+ if tok.value == "[" {
+ // Repeated field with list notation, like [1,2,3].
+ for {
+ vv := lv.NewElement()
+ vv, err = p.unmarshalSingularValue(vv, fd)
+ if err != nil {
+ return v, err
+ }
+ lv.Append(vv)
+
+ tok := p.next()
+ if tok.err != nil {
+ return v, tok.err
+ }
+ if tok.value == "]" {
+ break
+ }
+ if tok.value != "," {
+ return v, p.errorf("Expected ']' or ',' found %q", tok.value)
+ }
+ }
+ return v, nil
+ }
+
+ // One value of the repeated field.
+ p.back()
+ vv := lv.NewElement()
+ vv, err = p.unmarshalSingularValue(vv, fd)
+ if err != nil {
+ return v, err
+ }
+ lv.Append(vv)
+ return v, nil
+ case fd.IsMap():
+ // The map entry should be this sequence of tokens:
+ // < key : KEY value : VALUE >
+ // However, implementations may omit key or value, and technically
+ // we should support them in any order.
+ var terminator string
+ switch tok.value {
+ case "<":
+ terminator = ">"
+ case "{":
+ terminator = "}"
+ default:
+ return v, p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+
+ keyFD := fd.MapKey()
+ valFD := fd.MapValue()
+
+ mv := v.Map()
+ kv := keyFD.Default()
+ vv := mv.NewValue()
+ for {
+ tok := p.next()
+ if tok.err != nil {
+ return v, tok.err
+ }
+ if tok.value == terminator {
+ break
+ }
+ var err error
+ switch tok.value {
+ case "key":
+ if err := p.consumeToken(":"); err != nil {
+ return v, err
+ }
+ if kv, err = p.unmarshalSingularValue(kv, keyFD); err != nil {
+ return v, err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return v, err
+ }
+ case "value":
+ if err := p.checkForColon(valFD); err != nil {
+ return v, err
+ }
+ if vv, err = p.unmarshalSingularValue(vv, valFD); err != nil {
+ return v, err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return v, err
+ }
+ default:
+ p.back()
+ return v, p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
+ }
+ }
+ mv.Set(kv.MapKey(), vv)
+ return v, nil
+ default:
+ p.back()
+ return p.unmarshalSingularValue(v, fd)
+ }
+}
+
+func (p *textParser) unmarshalSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
+ tok := p.next()
+ if tok.err != nil {
+ return v, tok.err
+ }
+ if tok.value == "" {
+ return v, p.errorf("unexpected EOF")
+ }
+
+ switch fd.Kind() {
+ case protoreflect.BoolKind:
+ switch tok.value {
+ case "true", "1", "t", "True":
+ return protoreflect.ValueOfBool(true), nil
+ case "false", "0", "f", "False":
+ return protoreflect.ValueOfBool(false), nil
+ }
+ case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
+ if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
+ return protoreflect.ValueOfInt32(int32(x)), nil
+ }
+
+ // The C++ parser accepts large positive hex numbers that uses
+ // two's complement arithmetic to represent negative numbers.
+ // This feature is here for backwards compatibility with C++.
+ if strings.HasPrefix(tok.value, "0x") {
+ if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
+ return protoreflect.ValueOfInt32(int32(-(int64(^x) + 1))), nil
+ }
+ }
+ case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
+ if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
+ return protoreflect.ValueOfInt64(int64(x)), nil
+ }
+
+ // The C++ parser accepts large positive hex numbers that uses
+ // two's complement arithmetic to represent negative numbers.
+ // This feature is here for backwards compatibility with C++.
+ if strings.HasPrefix(tok.value, "0x") {
+ if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
+ return protoreflect.ValueOfInt64(int64(-(int64(^x) + 1))), nil
+ }
+ }
+ case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
+ if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
+ return protoreflect.ValueOfUint32(uint32(x)), nil
+ }
+ case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
+ if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
+ return protoreflect.ValueOfUint64(uint64(x)), nil
+ }
+ case protoreflect.FloatKind:
+ // Ignore 'f' for compatibility with output generated by C++,
+ // but don't remove 'f' when the value is "-inf" or "inf".
+ v := tok.value
+ if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" {
+ v = v[:len(v)-len("f")]
+ }
+ if x, err := strconv.ParseFloat(v, 32); err == nil {
+ return protoreflect.ValueOfFloat32(float32(x)), nil
+ }
+ case protoreflect.DoubleKind:
+ // Ignore 'f' for compatibility with output generated by C++,
+ // but don't remove 'f' when the value is "-inf" or "inf".
+ v := tok.value
+ if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" {
+ v = v[:len(v)-len("f")]
+ }
+ if x, err := strconv.ParseFloat(v, 64); err == nil {
+ return protoreflect.ValueOfFloat64(float64(x)), nil
+ }
+ case protoreflect.StringKind:
+ if isQuote(tok.value[0]) {
+ return protoreflect.ValueOfString(tok.unquoted), nil
+ }
+ case protoreflect.BytesKind:
+ if isQuote(tok.value[0]) {
+ return protoreflect.ValueOfBytes([]byte(tok.unquoted)), nil
+ }
+ case protoreflect.EnumKind:
+ if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
+ return protoreflect.ValueOfEnum(protoreflect.EnumNumber(x)), nil
+ }
+ vd := fd.Enum().Values().ByName(protoreflect.Name(tok.value))
+ if vd != nil {
+ return protoreflect.ValueOfEnum(vd.Number()), nil
+ }
+ case protoreflect.MessageKind, protoreflect.GroupKind:
+ var terminator string
+ switch tok.value {
+ case "{":
+ terminator = "}"
+ case "<":
+ terminator = ">"
+ default:
+ return v, p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ err := p.unmarshalMessage(v.Message(), terminator)
+ return v, err
+ default:
+ panic(fmt.Sprintf("invalid kind %v", fd.Kind()))
+ }
+ return v, p.errorf("invalid %v: %v", fd.Kind(), tok.value)
+}
+
+// Consume a ':' from the input stream (if the next token is a colon),
+// returning an error if a colon is needed but not present.
+func (p *textParser) checkForColon(fd protoreflect.FieldDescriptor) *ParseError {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ":" {
+ if fd.Message() == nil {
+ return p.errorf("expected ':', found %q", tok.value)
+ }
+ p.back()
+ }
+ return nil
+}
+
+// consumeExtensionOrAnyName consumes an extension name or an Any type URL and
+// the following ']'. It returns the name or URL consumed.
+func (p *textParser) consumeExtensionOrAnyName() (string, error) {
+ tok := p.next()
+ if tok.err != nil {
+ return "", tok.err
+ }
+
+ // If extension name or type url is quoted, it's a single token.
+ if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
+ name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
+ if err != nil {
+ return "", err
+ }
+ return name, p.consumeToken("]")
+ }
+
+ // Consume everything up to "]"
+ var parts []string
+ for tok.value != "]" {
+ parts = append(parts, tok.value)
+ tok = p.next()
+ if tok.err != nil {
+ return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
+ }
+ if p.done && tok.value != "]" {
+ return "", p.errorf("unclosed type_url or extension name")
+ }
+ }
+ return strings.Join(parts, ""), nil
+}
+
+// consumeOptionalSeparator consumes an optional semicolon or comma.
+// It is used in unmarshalMessage to provide backward compatibility.
+func (p *textParser) consumeOptionalSeparator() error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ";" && tok.value != "," {
+ p.back()
+ }
+ return nil
+}
+
+func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
+ pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
+ p.cur.err = pe
+ p.done = true
+ return pe
+}
+
+func (p *textParser) skipWhitespace() {
+ i := 0
+ for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
+ if p.s[i] == '#' {
+ // comment; skip to end of line or input
+ for i < len(p.s) && p.s[i] != '\n' {
+ i++
+ }
+ if i == len(p.s) {
+ break
+ }
+ }
+ if p.s[i] == '\n' {
+ p.line++
+ }
+ i++
+ }
+ p.offset += i
+ p.s = p.s[i:len(p.s)]
+ if len(p.s) == 0 {
+ p.done = true
+ }
+}
+
+func (p *textParser) advance() {
+ // Skip whitespace
+ p.skipWhitespace()
+ if p.done {
+ return
+ }
+
+ // Start of non-whitespace
+ p.cur.err = nil
+ p.cur.offset, p.cur.line = p.offset, p.line
+ p.cur.unquoted = ""
+ switch p.s[0] {
+ case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
+ // Single symbol
+ p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
+ case '"', '\'':
+ // Quoted string
+ i := 1
+ for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
+ if p.s[i] == '\\' && i+1 < len(p.s) {
+ // skip escaped char
+ i++
+ }
+ i++
+ }
+ if i >= len(p.s) || p.s[i] != p.s[0] {
+ p.errorf("unmatched quote")
+ return
+ }
+ unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
+ if err != nil {
+ p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
+ return
+ }
+ p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
+ p.cur.unquoted = unq
+ default:
+ i := 0
+ for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
+ i++
+ }
+ if i == 0 {
+ p.errorf("unexpected byte %#x", p.s[0])
+ return
+ }
+ p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
+ }
+ p.offset += len(p.cur.value)
+}
+
+// Back off the parser by one token. Can only be done between calls to next().
+// It makes the next advance() a no-op.
+func (p *textParser) back() { p.backed = true }
+
+// Advances the parser and returns the new current token.
+func (p *textParser) next() *token {
+ if p.backed || p.done {
+ p.backed = false
+ return &p.cur
+ }
+ p.advance()
+ if p.done {
+ p.cur.value = ""
+ } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
+ // Look for multiple quoted strings separated by whitespace,
+ // and concatenate them.
+ cat := p.cur
+ for {
+ p.skipWhitespace()
+ if p.done || !isQuote(p.s[0]) {
+ break
+ }
+ p.advance()
+ if p.cur.err != nil {
+ return &p.cur
+ }
+ cat.value += " " + p.cur.value
+ cat.unquoted += p.cur.unquoted
+ }
+ p.done = false // parser may have seen EOF, but we want to return cat
+ p.cur = cat
+ }
+ return &p.cur
+}
+
+func (p *textParser) consumeToken(s string) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != s {
+ p.back()
+ return p.errorf("expected %q, found %q", s, tok.value)
+ }
+ return nil
+}
+
+var errBadUTF8 = errors.New("proto: bad UTF-8")
+
+func unquoteC(s string, quote rune) (string, error) {
+ // This is based on C++'s tokenizer.cc.
+ // Despite its name, this is *not* parsing C syntax.
+ // For instance, "\0" is an invalid quoted string.
+
+ // Avoid allocation in trivial cases.
+ simple := true
+ for _, r := range s {
+ if r == '\\' || r == quote {
+ simple = false
+ break
+ }
+ }
+ if simple {
+ return s, nil
+ }
+
+ buf := make([]byte, 0, 3*len(s)/2)
+ for len(s) > 0 {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", errBadUTF8
+ }
+ s = s[n:]
+ if r != '\\' {
+ if r < utf8.RuneSelf {
+ buf = append(buf, byte(r))
+ } else {
+ buf = append(buf, string(r)...)
+ }
+ continue
+ }
+
+ ch, tail, err := unescape(s)
+ if err != nil {
+ return "", err
+ }
+ buf = append(buf, ch...)
+ s = tail
+ }
+ return string(buf), nil
+}
+
+func unescape(s string) (ch string, tail string, err error) {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", "", errBadUTF8
+ }
+ s = s[n:]
+ switch r {
+ case 'a':
+ return "\a", s, nil
+ case 'b':
+ return "\b", s, nil
+ case 'f':
+ return "\f", s, nil
+ case 'n':
+ return "\n", s, nil
+ case 'r':
+ return "\r", s, nil
+ case 't':
+ return "\t", s, nil
+ case 'v':
+ return "\v", s, nil
+ case '?':
+ return "?", s, nil // trigraph workaround
+ case '\'', '"', '\\':
+ return string(r), s, nil
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ if len(s) < 2 {
+ return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
+ }
+ ss := string(r) + s[:2]
+ s = s[2:]
+ i, err := strconv.ParseUint(ss, 8, 8)
+ if err != nil {
+ return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
+ }
+ return string([]byte{byte(i)}), s, nil
+ case 'x', 'X', 'u', 'U':
+ var n int
+ switch r {
+ case 'x', 'X':
+ n = 2
+ case 'u':
+ n = 4
+ case 'U':
+ n = 8
+ }
+ if len(s) < n {
+ return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
+ }
+ ss := s[:n]
+ s = s[n:]
+ i, err := strconv.ParseUint(ss, 16, 64)
+ if err != nil {
+ return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
+ }
+ if r == 'x' || r == 'X' {
+ return string([]byte{byte(i)}), s, nil
+ }
+ if i > utf8.MaxRune {
+ return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
+ }
+ return string(i), s, nil
+ }
+ return "", "", fmt.Errorf(`unknown escape \%c`, r)
+}
+
+func isIdentOrNumberChar(c byte) bool {
+ switch {
+ case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
+ return true
+ case '0' <= c && c <= '9':
+ return true
+ }
+ switch c {
+ case '-', '+', '.', '_':
+ return true
+ }
+ return false
+}
+
+func isWhitespace(c byte) bool {
+ switch c {
+ case ' ', '\t', '\n', '\r':
+ return true
+ }
+ return false
+}
+
+func isQuote(c byte) bool {
+ switch c {
+ case '"', '\'':
+ return true
+ }
+ return false
+}
diff --git a/vendor/github.com/golang/protobuf/proto/text_encode.go b/vendor/github.com/golang/protobuf/proto/text_encode.go
new file mode 100644
index 000000000..a31134eeb
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text_encode.go
@@ -0,0 +1,560 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "bytes"
+ "encoding"
+ "fmt"
+ "io"
+ "math"
+ "sort"
+ "strings"
+
+ "google.golang.org/protobuf/encoding/prototext"
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+)
+
+const wrapTextMarshalV2 = false
+
+// TextMarshaler is a configurable text format marshaler.
+type TextMarshaler struct {
+ Compact bool // use compact text format (one line)
+ ExpandAny bool // expand google.protobuf.Any messages of known types
+}
+
+// Marshal writes the proto text format of m to w.
+func (tm *TextMarshaler) Marshal(w io.Writer, m Message) error {
+ b, err := tm.marshal(m)
+ if len(b) > 0 {
+ if _, err := w.Write(b); err != nil {
+ return err
+ }
+ }
+ return err
+}
+
+// Text returns a proto text formatted string of m.
+func (tm *TextMarshaler) Text(m Message) string {
+ b, _ := tm.marshal(m)
+ return string(b)
+}
+
+func (tm *TextMarshaler) marshal(m Message) ([]byte, error) {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() {
+ return []byte("<nil>"), nil
+ }
+
+ if wrapTextMarshalV2 {
+ if m, ok := m.(encoding.TextMarshaler); ok {
+ return m.MarshalText()
+ }
+
+ opts := prototext.MarshalOptions{
+ AllowPartial: true,
+ EmitUnknown: true,
+ }
+ if !tm.Compact {
+ opts.Indent = " "
+ }
+ if !tm.ExpandAny {
+ opts.Resolver = (*protoregistry.Types)(nil)
+ }
+ return opts.Marshal(mr.Interface())
+ } else {
+ w := &textWriter{
+ compact: tm.Compact,
+ expandAny: tm.ExpandAny,
+ complete: true,
+ }
+
+ if m, ok := m.(encoding.TextMarshaler); ok {
+ b, err := m.MarshalText()
+ if err != nil {
+ return nil, err
+ }
+ w.Write(b)
+ return w.buf, nil
+ }
+
+ err := w.writeMessage(mr)
+ return w.buf, err
+ }
+}
+
+var (
+ defaultTextMarshaler = TextMarshaler{}
+ compactTextMarshaler = TextMarshaler{Compact: true}
+)
+
+// MarshalText writes the proto text format of m to w.
+func MarshalText(w io.Writer, m Message) error { return defaultTextMarshaler.Marshal(w, m) }
+
+// MarshalTextString returns a proto text formatted string of m.
+func MarshalTextString(m Message) string { return defaultTextMarshaler.Text(m) }
+
+// CompactText writes the compact proto text format of m to w.
+func CompactText(w io.Writer, m Message) error { return compactTextMarshaler.Marshal(w, m) }
+
+// CompactTextString returns a compact proto text formatted string of m.
+func CompactTextString(m Message) string { return compactTextMarshaler.Text(m) }
+
+var (
+ newline = []byte("\n")
+ endBraceNewline = []byte("}\n")
+ posInf = []byte("inf")
+ negInf = []byte("-inf")
+ nan = []byte("nan")
+)
+
+// textWriter is an io.Writer that tracks its indentation level.
+type textWriter struct {
+ compact bool // same as TextMarshaler.Compact
+ expandAny bool // same as TextMarshaler.ExpandAny
+ complete bool // whether the current position is a complete line
+ indent int // indentation level; never negative
+ buf []byte
+}
+
+func (w *textWriter) Write(p []byte) (n int, _ error) {
+ newlines := bytes.Count(p, newline)
+ if newlines == 0 {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ w.buf = append(w.buf, p...)
+ w.complete = false
+ return len(p), nil
+ }
+
+ frags := bytes.SplitN(p, newline, newlines+1)
+ if w.compact {
+ for i, frag := range frags {
+ if i > 0 {
+ w.buf = append(w.buf, ' ')
+ n++
+ }
+ w.buf = append(w.buf, frag...)
+ n += len(frag)
+ }
+ return n, nil
+ }
+
+ for i, frag := range frags {
+ if w.complete {
+ w.writeIndent()
+ }
+ w.buf = append(w.buf, frag...)
+ n += len(frag)
+ if i+1 < len(frags) {
+ w.buf = append(w.buf, '\n')
+ n++
+ }
+ }
+ w.complete = len(frags[len(frags)-1]) == 0
+ return n, nil
+}
+
+func (w *textWriter) WriteByte(c byte) error {
+ if w.compact && c == '\n' {
+ c = ' '
+ }
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ w.buf = append(w.buf, c)
+ w.complete = c == '\n'
+ return nil
+}
+
+func (w *textWriter) writeName(fd protoreflect.FieldDescriptor) {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ w.complete = false
+
+ if fd.Kind() != protoreflect.GroupKind {
+ w.buf = append(w.buf, fd.Name()...)
+ w.WriteByte(':')
+ } else {
+ // Use message type name for group field name.
+ w.buf = append(w.buf, fd.Message().Name()...)
+ }
+
+ if !w.compact {
+ w.WriteByte(' ')
+ }
+}
+
+func requiresQuotes(u string) bool {
+ // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
+ for _, ch := range u {
+ switch {
+ case ch == '.' || ch == '/' || ch == '_':
+ continue
+ case '0' <= ch && ch <= '9':
+ continue
+ case 'A' <= ch && ch <= 'Z':
+ continue
+ case 'a' <= ch && ch <= 'z':
+ continue
+ default:
+ return true
+ }
+ }
+ return false
+}
+
+// writeProto3Any writes an expanded google.protobuf.Any message.
+//
+// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
+// required messages are not linked in).
+//
+// It returns (true, error) when sv was written in expanded format or an error
+// was encountered.
+func (w *textWriter) writeProto3Any(m protoreflect.Message) (bool, error) {
+ md := m.Descriptor()
+ fdURL := md.Fields().ByName("type_url")
+ fdVal := md.Fields().ByName("value")
+
+ url := m.Get(fdURL).String()
+ mt, err := protoregistry.GlobalTypes.FindMessageByURL(url)
+ if err != nil {
+ return false, nil
+ }
+
+ b := m.Get(fdVal).Bytes()
+ m2 := mt.New()
+ if err := proto.Unmarshal(b, m2.Interface()); err != nil {
+ return false, nil
+ }
+ w.Write([]byte("["))
+ if requiresQuotes(url) {
+ w.writeQuotedString(url)
+ } else {
+ w.Write([]byte(url))
+ }
+ if w.compact {
+ w.Write([]byte("]:<"))
+ } else {
+ w.Write([]byte("]: <\n"))
+ w.indent++
+ }
+ if err := w.writeMessage(m2); err != nil {
+ return true, err
+ }
+ if w.compact {
+ w.Write([]byte("> "))
+ } else {
+ w.indent--
+ w.Write([]byte(">\n"))
+ }
+ return true, nil
+}
+
+func (w *textWriter) writeMessage(m protoreflect.Message) error {
+ md := m.Descriptor()
+ if w.expandAny && md.FullName() == "google.protobuf.Any" {
+ if canExpand, err := w.writeProto3Any(m); canExpand {
+ return err
+ }
+ }
+
+ fds := md.Fields()
+ for i := 0; i < fds.Len(); {
+ fd := fds.Get(i)
+ if od := fd.ContainingOneof(); od != nil {
+ fd = m.WhichOneof(od)
+ i += od.Fields().Len()
+ } else {
+ i++
+ }
+ if fd == nil || !m.Has(fd) {
+ continue
+ }
+
+ switch {
+ case fd.IsList():
+ lv := m.Get(fd).List()
+ for j := 0; j < lv.Len(); j++ {
+ w.writeName(fd)
+ v := lv.Get(j)
+ if err := w.writeSingularValue(v, fd); err != nil {
+ return err
+ }
+ w.WriteByte('\n')
+ }
+ case fd.IsMap():
+ kfd := fd.MapKey()
+ vfd := fd.MapValue()
+ mv := m.Get(fd).Map()
+
+ type entry struct{ key, val protoreflect.Value }
+ var entries []entry
+ mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
+ entries = append(entries, entry{k.Value(), v})
+ return true
+ })
+ sort.Slice(entries, func(i, j int) bool {
+ switch kfd.Kind() {
+ case protoreflect.BoolKind:
+ return !entries[i].key.Bool() && entries[j].key.Bool()
+ case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
+ return entries[i].key.Int() < entries[j].key.Int()
+ case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
+ return entries[i].key.Uint() < entries[j].key.Uint()
+ case protoreflect.StringKind:
+ return entries[i].key.String() < entries[j].key.String()
+ default:
+ panic("invalid kind")
+ }
+ })
+ for _, entry := range entries {
+ w.writeName(fd)
+ w.WriteByte('<')
+ if !w.compact {
+ w.WriteByte('\n')
+ }
+ w.indent++
+ w.writeName(kfd)
+ if err := w.writeSingularValue(entry.key, kfd); err != nil {
+ return err
+ }
+ w.WriteByte('\n')
+ w.writeName(vfd)
+ if err := w.writeSingularValue(entry.val, vfd); err != nil {
+ return err
+ }
+ w.WriteByte('\n')
+ w.indent--
+ w.WriteByte('>')
+ w.WriteByte('\n')
+ }
+ default:
+ w.writeName(fd)
+ if err := w.writeSingularValue(m.Get(fd), fd); err != nil {
+ return err
+ }
+ w.WriteByte('\n')
+ }
+ }
+
+ if b := m.GetUnknown(); len(b) > 0 {
+ w.writeUnknownFields(b)
+ }
+ return w.writeExtensions(m)
+}
+
+func (w *textWriter) writeSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) error {
+ switch fd.Kind() {
+ case protoreflect.FloatKind, protoreflect.DoubleKind:
+ switch vf := v.Float(); {
+ case math.IsInf(vf, +1):
+ w.Write(posInf)
+ case math.IsInf(vf, -1):
+ w.Write(negInf)
+ case math.IsNaN(vf):
+ w.Write(nan)
+ default:
+ fmt.Fprint(w, v.Interface())
+ }
+ case protoreflect.StringKind:
+ // NOTE: This does not validate UTF-8 for historical reasons.
+ w.writeQuotedString(string(v.String()))
+ case protoreflect.BytesKind:
+ w.writeQuotedString(string(v.Bytes()))
+ case protoreflect.MessageKind, protoreflect.GroupKind:
+ var bra, ket byte = '<', '>'
+ if fd.Kind() == protoreflect.GroupKind {
+ bra, ket = '{', '}'
+ }
+ w.WriteByte(bra)
+ if !w.compact {
+ w.WriteByte('\n')
+ }
+ w.indent++
+ m := v.Message()
+ if m2, ok := m.Interface().(encoding.TextMarshaler); ok {
+ b, err := m2.MarshalText()
+ if err != nil {
+ return err
+ }
+ w.Write(b)
+ } else {
+ w.writeMessage(m)
+ }
+ w.indent--
+ w.WriteByte(ket)
+ case protoreflect.EnumKind:
+ if ev := fd.Enum().Values().ByNumber(v.Enum()); ev != nil {
+ fmt.Fprint(w, ev.Name())
+ } else {
+ fmt.Fprint(w, v.Enum())
+ }
+ default:
+ fmt.Fprint(w, v.Interface())
+ }
+ return nil
+}
+
+// writeQuotedString writes a quoted string in the protocol buffer text format.
+func (w *textWriter) writeQuotedString(s string) {
+ w.WriteByte('"')
+ for i := 0; i < len(s); i++ {
+ switch c := s[i]; c {
+ case '\n':
+ w.buf = append(w.buf, `\n`...)
+ case '\r':
+ w.buf = append(w.buf, `\r`...)
+ case '\t':
+ w.buf = append(w.buf, `\t`...)
+ case '"':
+ w.buf = append(w.buf, `\"`...)
+ case '\\':
+ w.buf = append(w.buf, `\\`...)
+ default:
+ if isPrint := c >= 0x20 && c < 0x7f; isPrint {
+ w.buf = append(w.buf, c)
+ } else {
+ w.buf = append(w.buf, fmt.Sprintf(`\%03o`, c)...)
+ }
+ }
+ }
+ w.WriteByte('"')
+}
+
+func (w *textWriter) writeUnknownFields(b []byte) {
+ if !w.compact {
+ fmt.Fprintf(w, "/* %d unknown bytes */\n", len(b))
+ }
+
+ for len(b) > 0 {
+ num, wtyp, n := protowire.ConsumeTag(b)
+ if n < 0 {
+ return
+ }
+ b = b[n:]
+
+ if wtyp == protowire.EndGroupType {
+ w.indent--
+ w.Write(endBraceNewline)
+ continue
+ }
+ fmt.Fprint(w, num)
+ if wtyp != protowire.StartGroupType {
+ w.WriteByte(':')
+ }
+ if !w.compact || wtyp == protowire.StartGroupType {
+ w.WriteByte(' ')
+ }
+ switch wtyp {
+ case protowire.VarintType:
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return
+ }
+ b = b[n:]
+ fmt.Fprint(w, v)
+ case protowire.Fixed32Type:
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return
+ }
+ b = b[n:]
+ fmt.Fprint(w, v)
+ case protowire.Fixed64Type:
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return
+ }
+ b = b[n:]
+ fmt.Fprint(w, v)
+ case protowire.BytesType:
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return
+ }
+ b = b[n:]
+ fmt.Fprintf(w, "%q", v)
+ case protowire.StartGroupType:
+ w.WriteByte('{')
+ w.indent++
+ default:
+ fmt.Fprintf(w, "/* unknown wire type %d */", wtyp)
+ }
+ w.WriteByte('\n')
+ }
+}
+
+// writeExtensions writes all the extensions in m.
+func (w *textWriter) writeExtensions(m protoreflect.Message) error {
+ md := m.Descriptor()
+ if md.ExtensionRanges().Len() == 0 {
+ return nil
+ }
+
+ type ext struct {
+ desc protoreflect.FieldDescriptor
+ val protoreflect.Value
+ }
+ var exts []ext
+ m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+ if fd.IsExtension() {
+ exts = append(exts, ext{fd, v})
+ }
+ return true
+ })
+ sort.Slice(exts, func(i, j int) bool {
+ return exts[i].desc.Number() < exts[j].desc.Number()
+ })
+
+ for _, ext := range exts {
+ // For message set, use the name of the message as the extension name.
+ name := string(ext.desc.FullName())
+ if isMessageSet(ext.desc.ContainingMessage()) {
+ name = strings.TrimSuffix(name, ".message_set_extension")
+ }
+
+ if !ext.desc.IsList() {
+ if err := w.writeSingularExtension(name, ext.val, ext.desc); err != nil {
+ return err
+ }
+ } else {
+ lv := ext.val.List()
+ for i := 0; i < lv.Len(); i++ {
+ if err := w.writeSingularExtension(name, lv.Get(i), ext.desc); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func (w *textWriter) writeSingularExtension(name string, v protoreflect.Value, fd protoreflect.FieldDescriptor) error {
+ fmt.Fprintf(w, "[%s]:", name)
+ if !w.compact {
+ w.WriteByte(' ')
+ }
+ if err := w.writeSingularValue(v, fd); err != nil {
+ return err
+ }
+ w.WriteByte('\n')
+ return nil
+}
+
+func (w *textWriter) writeIndent() {
+ if !w.complete {
+ return
+ }
+ for i := 0; i < w.indent*2; i++ {
+ w.buf = append(w.buf, ' ')
+ }
+ w.complete = false
+}
diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go
deleted file mode 100644
index bb55a3af2..000000000
--- a/vendor/github.com/golang/protobuf/proto/text_parser.go
+++ /dev/null
@@ -1,880 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-// Functions for parsing the Text protocol buffer format.
-// TODO: message sets.
-
-import (
- "encoding"
- "errors"
- "fmt"
- "reflect"
- "strconv"
- "strings"
- "unicode/utf8"
-)
-
-// Error string emitted when deserializing Any and fields are already set
-const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set"
-
-type ParseError struct {
- Message string
- Line int // 1-based line number
- Offset int // 0-based byte offset from start of input
-}
-
-func (p *ParseError) Error() string {
- if p.Line == 1 {
- // show offset only for first line
- return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
- }
- return fmt.Sprintf("line %d: %v", p.Line, p.Message)
-}
-
-type token struct {
- value string
- err *ParseError
- line int // line number
- offset int // byte number from start of input, not start of line
- unquoted string // the unquoted version of value, if it was a quoted string
-}
-
-func (t *token) String() string {
- if t.err == nil {
- return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
- }
- return fmt.Sprintf("parse error: %v", t.err)
-}
-
-type textParser struct {
- s string // remaining input
- done bool // whether the parsing is finished (success or error)
- backed bool // whether back() was called
- offset, line int
- cur token
-}
-
-func newTextParser(s string) *textParser {
- p := new(textParser)
- p.s = s
- p.line = 1
- p.cur.line = 1
- return p
-}
-
-func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
- pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
- p.cur.err = pe
- p.done = true
- return pe
-}
-
-// Numbers and identifiers are matched by [-+._A-Za-z0-9]
-func isIdentOrNumberChar(c byte) bool {
- switch {
- case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
- return true
- case '0' <= c && c <= '9':
- return true
- }
- switch c {
- case '-', '+', '.', '_':
- return true
- }
- return false
-}
-
-func isWhitespace(c byte) bool {
- switch c {
- case ' ', '\t', '\n', '\r':
- return true
- }
- return false
-}
-
-func isQuote(c byte) bool {
- switch c {
- case '"', '\'':
- return true
- }
- return false
-}
-
-func (p *textParser) skipWhitespace() {
- i := 0
- for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
- if p.s[i] == '#' {
- // comment; skip to end of line or input
- for i < len(p.s) && p.s[i] != '\n' {
- i++
- }
- if i == len(p.s) {
- break
- }
- }
- if p.s[i] == '\n' {
- p.line++
- }
- i++
- }
- p.offset += i
- p.s = p.s[i:len(p.s)]
- if len(p.s) == 0 {
- p.done = true
- }
-}
-
-func (p *textParser) advance() {
- // Skip whitespace
- p.skipWhitespace()
- if p.done {
- return
- }
-
- // Start of non-whitespace
- p.cur.err = nil
- p.cur.offset, p.cur.line = p.offset, p.line
- p.cur.unquoted = ""
- switch p.s[0] {
- case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
- // Single symbol
- p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
- case '"', '\'':
- // Quoted string
- i := 1
- for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
- if p.s[i] == '\\' && i+1 < len(p.s) {
- // skip escaped char
- i++
- }
- i++
- }
- if i >= len(p.s) || p.s[i] != p.s[0] {
- p.errorf("unmatched quote")
- return
- }
- unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
- if err != nil {
- p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
- return
- }
- p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
- p.cur.unquoted = unq
- default:
- i := 0
- for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
- i++
- }
- if i == 0 {
- p.errorf("unexpected byte %#x", p.s[0])
- return
- }
- p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
- }
- p.offset += len(p.cur.value)
-}
-
-var (
- errBadUTF8 = errors.New("proto: bad UTF-8")
-)
-
-func unquoteC(s string, quote rune) (string, error) {
- // This is based on C++'s tokenizer.cc.
- // Despite its name, this is *not* parsing C syntax.
- // For instance, "\0" is an invalid quoted string.
-
- // Avoid allocation in trivial cases.
- simple := true
- for _, r := range s {
- if r == '\\' || r == quote {
- simple = false
- break
- }
- }
- if simple {
- return s, nil
- }
-
- buf := make([]byte, 0, 3*len(s)/2)
- for len(s) > 0 {
- r, n := utf8.DecodeRuneInString(s)
- if r == utf8.RuneError && n == 1 {
- return "", errBadUTF8
- }
- s = s[n:]
- if r != '\\' {
- if r < utf8.RuneSelf {
- buf = append(buf, byte(r))
- } else {
- buf = append(buf, string(r)...)
- }
- continue
- }
-
- ch, tail, err := unescape(s)
- if err != nil {
- return "", err
- }
- buf = append(buf, ch...)
- s = tail
- }
- return string(buf), nil
-}
-
-func unescape(s string) (ch string, tail string, err error) {
- r, n := utf8.DecodeRuneInString(s)
- if r == utf8.RuneError && n == 1 {
- return "", "", errBadUTF8
- }
- s = s[n:]
- switch r {
- case 'a':
- return "\a", s, nil
- case 'b':
- return "\b", s, nil
- case 'f':
- return "\f", s, nil
- case 'n':
- return "\n", s, nil
- case 'r':
- return "\r", s, nil
- case 't':
- return "\t", s, nil
- case 'v':
- return "\v", s, nil
- case '?':
- return "?", s, nil // trigraph workaround
- case '\'', '"', '\\':
- return string(r), s, nil
- case '0', '1', '2', '3', '4', '5', '6', '7':
- if len(s) < 2 {
- return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
- }
- ss := string(r) + s[:2]
- s = s[2:]
- i, err := strconv.ParseUint(ss, 8, 8)
- if err != nil {
- return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
- }
- return string([]byte{byte(i)}), s, nil
- case 'x', 'X', 'u', 'U':
- var n int
- switch r {
- case 'x', 'X':
- n = 2
- case 'u':
- n = 4
- case 'U':
- n = 8
- }
- if len(s) < n {
- return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
- }
- ss := s[:n]
- s = s[n:]
- i, err := strconv.ParseUint(ss, 16, 64)
- if err != nil {
- return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
- }
- if r == 'x' || r == 'X' {
- return string([]byte{byte(i)}), s, nil
- }
- if i > utf8.MaxRune {
- return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
- }
- return string(i), s, nil
- }
- return "", "", fmt.Errorf(`unknown escape \%c`, r)
-}
-
-// Back off the parser by one token. Can only be done between calls to next().
-// It makes the next advance() a no-op.
-func (p *textParser) back() { p.backed = true }
-
-// Advances the parser and returns the new current token.
-func (p *textParser) next() *token {
- if p.backed || p.done {
- p.backed = false
- return &p.cur
- }
- p.advance()
- if p.done {
- p.cur.value = ""
- } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
- // Look for multiple quoted strings separated by whitespace,
- // and concatenate them.
- cat := p.cur
- for {
- p.skipWhitespace()
- if p.done || !isQuote(p.s[0]) {
- break
- }
- p.advance()
- if p.cur.err != nil {
- return &p.cur
- }
- cat.value += " " + p.cur.value
- cat.unquoted += p.cur.unquoted
- }
- p.done = false // parser may have seen EOF, but we want to return cat
- p.cur = cat
- }
- return &p.cur
-}
-
-func (p *textParser) consumeToken(s string) error {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value != s {
- p.back()
- return p.errorf("expected %q, found %q", s, tok.value)
- }
- return nil
-}
-
-// Return a RequiredNotSetError indicating which required field was not set.
-func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
- st := sv.Type()
- sprops := GetProperties(st)
- for i := 0; i < st.NumField(); i++ {
- if !isNil(sv.Field(i)) {
- continue
- }
-
- props := sprops.Prop[i]
- if props.Required {
- return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
- }
- }
- return &RequiredNotSetError{fmt.Sprintf("%v.<unknown field name>", st)} // should not happen
-}
-
-// Returns the index in the struct for the named field, as well as the parsed tag properties.
-func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
- i, ok := sprops.decoderOrigNames[name]
- if ok {
- return i, sprops.Prop[i], true
- }
- return -1, nil, false
-}
-
-// Consume a ':' from the input stream (if the next token is a colon),
-// returning an error if a colon is needed but not present.
-func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value != ":" {
- // Colon is optional when the field is a group or message.
- needColon := true
- switch props.Wire {
- case "group":
- needColon = false
- case "bytes":
- // A "bytes" field is either a message, a string, or a repeated field;
- // those three become *T, *string and []T respectively, so we can check for
- // this field being a pointer to a non-string.
- if typ.Kind() == reflect.Ptr {
- // *T or *string
- if typ.Elem().Kind() == reflect.String {
- break
- }
- } else if typ.Kind() == reflect.Slice {
- // []T or []*T
- if typ.Elem().Kind() != reflect.Ptr {
- break
- }
- } else if typ.Kind() == reflect.String {
- // The proto3 exception is for a string field,
- // which requires a colon.
- break
- }
- needColon = false
- }
- if needColon {
- return p.errorf("expected ':', found %q", tok.value)
- }
- p.back()
- }
- return nil
-}
-
-func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
- st := sv.Type()
- sprops := GetProperties(st)
- reqCount := sprops.reqCount
- var reqFieldErr error
- fieldSet := make(map[string]bool)
- // A struct is a sequence of "name: value", terminated by one of
- // '>' or '}', or the end of the input. A name may also be
- // "[extension]" or "[type/url]".
- //
- // The whole struct can also be an expanded Any message, like:
- // [type/url] < ... struct contents ... >
- for {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value == terminator {
- break
- }
- if tok.value == "[" {
- // Looks like an extension or an Any.
- //
- // TODO: Check whether we need to handle
- // namespace rooted names (e.g. ".something.Foo").
- extName, err := p.consumeExtName()
- if err != nil {
- return err
- }
-
- if s := strings.LastIndex(extName, "/"); s >= 0 {
- // If it contains a slash, it's an Any type URL.
- messageName := extName[s+1:]
- mt := MessageType(messageName)
- if mt == nil {
- return p.errorf("unrecognized message %q in google.protobuf.Any", messageName)
- }
- tok = p.next()
- if tok.err != nil {
- return tok.err
- }
- // consume an optional colon
- if tok.value == ":" {
- tok = p.next()
- if tok.err != nil {
- return tok.err
- }
- }
- var terminator string
- switch tok.value {
- case "<":
- terminator = ">"
- case "{":
- terminator = "}"
- default:
- return p.errorf("expected '{' or '<', found %q", tok.value)
- }
- v := reflect.New(mt.Elem())
- if pe := p.readStruct(v.Elem(), terminator); pe != nil {
- return pe
- }
- b, err := Marshal(v.Interface().(Message))
- if err != nil {
- return p.errorf("failed to marshal message of type %q: %v", messageName, err)
- }
- if fieldSet["type_url"] {
- return p.errorf(anyRepeatedlyUnpacked, "type_url")
- }
- if fieldSet["value"] {
- return p.errorf(anyRepeatedlyUnpacked, "value")
- }
- sv.FieldByName("TypeUrl").SetString(extName)
- sv.FieldByName("Value").SetBytes(b)
- fieldSet["type_url"] = true
- fieldSet["value"] = true
- continue
- }
-
- var desc *ExtensionDesc
- // This could be faster, but it's functional.
- // TODO: Do something smarter than a linear scan.
- for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
- if d.Name == extName {
- desc = d
- break
- }
- }
- if desc == nil {
- return p.errorf("unrecognized extension %q", extName)
- }
-
- props := &Properties{}
- props.Parse(desc.Tag)
-
- typ := reflect.TypeOf(desc.ExtensionType)
- if err := p.checkForColon(props, typ); err != nil {
- return err
- }
-
- rep := desc.repeated()
-
- // Read the extension structure, and set it in
- // the value we're constructing.
- var ext reflect.Value
- if !rep {
- ext = reflect.New(typ).Elem()
- } else {
- ext = reflect.New(typ.Elem()).Elem()
- }
- if err := p.readAny(ext, props); err != nil {
- if _, ok := err.(*RequiredNotSetError); !ok {
- return err
- }
- reqFieldErr = err
- }
- ep := sv.Addr().Interface().(Message)
- if !rep {
- SetExtension(ep, desc, ext.Interface())
- } else {
- old, err := GetExtension(ep, desc)
- var sl reflect.Value
- if err == nil {
- sl = reflect.ValueOf(old) // existing slice
- } else {
- sl = reflect.MakeSlice(typ, 0, 1)
- }
- sl = reflect.Append(sl, ext)
- SetExtension(ep, desc, sl.Interface())
- }
- if err := p.consumeOptionalSeparator(); err != nil {
- return err
- }
- continue
- }
-
- // This is a normal, non-extension field.
- name := tok.value
- var dst reflect.Value
- fi, props, ok := structFieldByName(sprops, name)
- if ok {
- dst = sv.Field(fi)
- } else if oop, ok := sprops.OneofTypes[name]; ok {
- // It is a oneof.
- props = oop.Prop
- nv := reflect.New(oop.Type.Elem())
- dst = nv.Elem().Field(0)
- field := sv.Field(oop.Field)
- if !field.IsNil() {
- return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name)
- }
- field.Set(nv)
- }
- if !dst.IsValid() {
- return p.errorf("unknown field name %q in %v", name, st)
- }
-
- if dst.Kind() == reflect.Map {
- // Consume any colon.
- if err := p.checkForColon(props, dst.Type()); err != nil {
- return err
- }
-
- // Construct the map if it doesn't already exist.
- if dst.IsNil() {
- dst.Set(reflect.MakeMap(dst.Type()))
- }
- key := reflect.New(dst.Type().Key()).Elem()
- val := reflect.New(dst.Type().Elem()).Elem()
-
- // The map entry should be this sequence of tokens:
- // < key : KEY value : VALUE >
- // However, implementations may omit key or value, and technically
- // we should support them in any order. See b/28924776 for a time
- // this went wrong.
-
- tok := p.next()
- var terminator string
- switch tok.value {
- case "<":
- terminator = ">"
- case "{":
- terminator = "}"
- default:
- return p.errorf("expected '{' or '<', found %q", tok.value)
- }
- for {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value == terminator {
- break
- }
- switch tok.value {
- case "key":
- if err := p.consumeToken(":"); err != nil {
- return err
- }
- if err := p.readAny(key, props.MapKeyProp); err != nil {
- return err
- }
- if err := p.consumeOptionalSeparator(); err != nil {
- return err
- }
- case "value":
- if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil {
- return err
- }
- if err := p.readAny(val, props.MapValProp); err != nil {
- return err
- }
- if err := p.consumeOptionalSeparator(); err != nil {
- return err
- }
- default:
- p.back()
- return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
- }
- }
-
- dst.SetMapIndex(key, val)
- continue
- }
-
- // Check that it's not already set if it's not a repeated field.
- if !props.Repeated && fieldSet[name] {
- return p.errorf("non-repeated field %q was repeated", name)
- }
-
- if err := p.checkForColon(props, dst.Type()); err != nil {
- return err
- }
-
- // Parse into the field.
- fieldSet[name] = true
- if err := p.readAny(dst, props); err != nil {
- if _, ok := err.(*RequiredNotSetError); !ok {
- return err
- }
- reqFieldErr = err
- }
- if props.Required {
- reqCount--
- }
-
- if err := p.consumeOptionalSeparator(); err != nil {
- return err
- }
-
- }
-
- if reqCount > 0 {
- return p.missingRequiredFieldError(sv)
- }
- return reqFieldErr
-}
-
-// consumeExtName consumes extension name or expanded Any type URL and the
-// following ']'. It returns the name or URL consumed.
-func (p *textParser) consumeExtName() (string, error) {
- tok := p.next()
- if tok.err != nil {
- return "", tok.err
- }
-
- // If extension name or type url is quoted, it's a single token.
- if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
- name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
- if err != nil {
- return "", err
- }
- return name, p.consumeToken("]")
- }
-
- // Consume everything up to "]"
- var parts []string
- for tok.value != "]" {
- parts = append(parts, tok.value)
- tok = p.next()
- if tok.err != nil {
- return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
- }
- if p.done && tok.value != "]" {
- return "", p.errorf("unclosed type_url or extension name")
- }
- }
- return strings.Join(parts, ""), nil
-}
-
-// consumeOptionalSeparator consumes an optional semicolon or comma.
-// It is used in readStruct to provide backward compatibility.
-func (p *textParser) consumeOptionalSeparator() error {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value != ";" && tok.value != "," {
- p.back()
- }
- return nil
-}
-
-func (p *textParser) readAny(v reflect.Value, props *Properties) error {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value == "" {
- return p.errorf("unexpected EOF")
- }
-
- switch fv := v; fv.Kind() {
- case reflect.Slice:
- at := v.Type()
- if at.Elem().Kind() == reflect.Uint8 {
- // Special case for []byte
- if tok.value[0] != '"' && tok.value[0] != '\'' {
- // Deliberately written out here, as the error after
- // this switch statement would write "invalid []byte: ...",
- // which is not as user-friendly.
- return p.errorf("invalid string: %v", tok.value)
- }
- bytes := []byte(tok.unquoted)
- fv.Set(reflect.ValueOf(bytes))
- return nil
- }
- // Repeated field.
- if tok.value == "[" {
- // Repeated field with list notation, like [1,2,3].
- for {
- fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
- err := p.readAny(fv.Index(fv.Len()-1), props)
- if err != nil {
- return err
- }
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value == "]" {
- break
- }
- if tok.value != "," {
- return p.errorf("Expected ']' or ',' found %q", tok.value)
- }
- }
- return nil
- }
- // One value of the repeated field.
- p.back()
- fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
- return p.readAny(fv.Index(fv.Len()-1), props)
- case reflect.Bool:
- // true/1/t/True or false/f/0/False.
- switch tok.value {
- case "true", "1", "t", "True":
- fv.SetBool(true)
- return nil
- case "false", "0", "f", "False":
- fv.SetBool(false)
- return nil
- }
- case reflect.Float32, reflect.Float64:
- v := tok.value
- // Ignore 'f' for compatibility with output generated by C++, but don't
- // remove 'f' when the value is "-inf" or "inf".
- if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
- v = v[:len(v)-1]
- }
- if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
- fv.SetFloat(f)
- return nil
- }
- case reflect.Int32:
- if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
- fv.SetInt(x)
- return nil
- }
-
- if len(props.Enum) == 0 {
- break
- }
- m, ok := enumValueMaps[props.Enum]
- if !ok {
- break
- }
- x, ok := m[tok.value]
- if !ok {
- break
- }
- fv.SetInt(int64(x))
- return nil
- case reflect.Int64:
- if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
- fv.SetInt(x)
- return nil
- }
-
- case reflect.Ptr:
- // A basic field (indirected through pointer), or a repeated message/group
- p.back()
- fv.Set(reflect.New(fv.Type().Elem()))
- return p.readAny(fv.Elem(), props)
- case reflect.String:
- if tok.value[0] == '"' || tok.value[0] == '\'' {
- fv.SetString(tok.unquoted)
- return nil
- }
- case reflect.Struct:
- var terminator string
- switch tok.value {
- case "{":
- terminator = "}"
- case "<":
- terminator = ">"
- default:
- return p.errorf("expected '{' or '<', found %q", tok.value)
- }
- // TODO: Handle nested messages which implement encoding.TextUnmarshaler.
- return p.readStruct(fv, terminator)
- case reflect.Uint32:
- if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
- fv.SetUint(uint64(x))
- return nil
- }
- case reflect.Uint64:
- if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
- fv.SetUint(x)
- return nil
- }
- }
- return p.errorf("invalid %v: %v", v.Type(), tok.value)
-}
-
-// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
-// before starting to unmarshal, so any existing data in pb is always removed.
-// If a required field is not set and no other error occurs,
-// UnmarshalText returns *RequiredNotSetError.
-func UnmarshalText(s string, pb Message) error {
- if um, ok := pb.(encoding.TextUnmarshaler); ok {
- return um.UnmarshalText([]byte(s))
- }
- pb.Reset()
- v := reflect.ValueOf(pb)
- return newTextParser(s).readStruct(v.Elem(), "")
-}
diff --git a/vendor/github.com/golang/protobuf/proto/wire.go b/vendor/github.com/golang/protobuf/proto/wire.go
new file mode 100644
index 000000000..d7c28da5a
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/wire.go
@@ -0,0 +1,78 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ protoV2 "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/runtime/protoiface"
+)
+
+// Size returns the size in bytes of the wire-format encoding of m.
+func Size(m Message) int {
+ if m == nil {
+ return 0
+ }
+ mi := MessageV2(m)
+ return protoV2.Size(mi)
+}
+
+// Marshal returns the wire-format encoding of m.
+func Marshal(m Message) ([]byte, error) {
+ b, err := marshalAppend(nil, m, false)
+ if b == nil {
+ b = zeroBytes
+ }
+ return b, err
+}
+
+var zeroBytes = make([]byte, 0, 0)
+
+func marshalAppend(buf []byte, m Message, deterministic bool) ([]byte, error) {
+ if m == nil {
+ return nil, ErrNil
+ }
+ mi := MessageV2(m)
+ nbuf, err := protoV2.MarshalOptions{
+ Deterministic: deterministic,
+ AllowPartial: true,
+ }.MarshalAppend(buf, mi)
+ if err != nil {
+ return buf, err
+ }
+ if len(buf) == len(nbuf) {
+ if !mi.ProtoReflect().IsValid() {
+ return buf, ErrNil
+ }
+ }
+ return nbuf, checkRequiredNotSet(mi)
+}
+
+// Unmarshal parses a wire-format message in b and places the decoded results in m.
+//
+// Unmarshal resets m before starting to unmarshal, so any existing data in m is always
+// removed. Use UnmarshalMerge to preserve and append to existing data.
+func Unmarshal(b []byte, m Message) error {
+ m.Reset()
+ return UnmarshalMerge(b, m)
+}
+
+// UnmarshalMerge parses a wire-format message in b and places the decoded results in m.
+func UnmarshalMerge(b []byte, m Message) error {
+ mi := MessageV2(m)
+ out, err := protoV2.UnmarshalOptions{
+ AllowPartial: true,
+ Merge: true,
+ }.UnmarshalState(protoiface.UnmarshalInput{
+ Buf: b,
+ Message: mi.ProtoReflect(),
+ })
+ if err != nil {
+ return err
+ }
+ if out.Flags&protoiface.UnmarshalInitialized > 0 {
+ return nil
+ }
+ return checkRequiredNotSet(mi)
+}
diff --git a/vendor/github.com/golang/protobuf/proto/wrappers.go b/vendor/github.com/golang/protobuf/proto/wrappers.go
new file mode 100644
index 000000000..398e34859
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/wrappers.go
@@ -0,0 +1,34 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+// Bool stores v in a new bool value and returns a pointer to it.
+func Bool(v bool) *bool { return &v }
+
+// Int stores v in a new int32 value and returns a pointer to it.
+//
+// Deprecated: Use Int32 instead.
+func Int(v int) *int32 { return Int32(int32(v)) }
+
+// Int32 stores v in a new int32 value and returns a pointer to it.
+func Int32(v int32) *int32 { return &v }
+
+// Int64 stores v in a new int64 value and returns a pointer to it.
+func Int64(v int64) *int64 { return &v }
+
+// Uint32 stores v in a new uint32 value and returns a pointer to it.
+func Uint32(v uint32) *uint32 { return &v }
+
+// Uint64 stores v in a new uint64 value and returns a pointer to it.
+func Uint64(v uint64) *uint64 { return &v }
+
+// Float32 stores v in a new float32 value and returns a pointer to it.
+func Float32(v float32) *float32 { return &v }
+
+// Float64 stores v in a new float64 value and returns a pointer to it.
+func Float64(v float64) *float64 { return &v }
+
+// String stores v in a new string value and returns a pointer to it.
+func String(v string) *string { return &v }
diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go
index 70276e8f5..e729dcff1 100644
--- a/vendor/github.com/golang/protobuf/ptypes/any.go
+++ b/vendor/github.com/golang/protobuf/ptypes/any.go
@@ -1,141 +1,165 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2016 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
package ptypes
-// This file implements functions to marshal proto.Message to/from
-// google.protobuf.Any message.
-
import (
"fmt"
- "reflect"
"strings"
"github.com/golang/protobuf/proto"
- "github.com/golang/protobuf/ptypes/any"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+
+ anypb "github.com/golang/protobuf/ptypes/any"
)
-const googleApis = "type.googleapis.com/"
+const urlPrefix = "type.googleapis.com/"
-// AnyMessageName returns the name of the message contained in a google.protobuf.Any message.
-//
-// Note that regular type assertions should be done using the Is
-// function. AnyMessageName is provided for less common use cases like filtering a
-// sequence of Any messages based on a set of allowed message type names.
-func AnyMessageName(any *any.Any) (string, error) {
+// AnyMessageName returns the message name contained in an anypb.Any message.
+// Most type assertions should use the Is function instead.
+func AnyMessageName(any *anypb.Any) (string, error) {
+ name, err := anyMessageName(any)
+ return string(name), err
+}
+func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) {
if any == nil {
return "", fmt.Errorf("message is nil")
}
- slash := strings.LastIndex(any.TypeUrl, "/")
- if slash < 0 {
+ name := protoreflect.FullName(any.TypeUrl)
+ if i := strings.LastIndex(any.TypeUrl, "/"); i >= 0 {
+ name = name[i+len("/"):]
+ }
+ if !name.IsValid() {
return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
}
- return any.TypeUrl[slash+1:], nil
+ return name, nil
}
-// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any.
-func MarshalAny(pb proto.Message) (*any.Any, error) {
- value, err := proto.Marshal(pb)
+// MarshalAny marshals the given message m into an anypb.Any message.
+func MarshalAny(m proto.Message) (*anypb.Any, error) {
+ switch dm := m.(type) {
+ case DynamicAny:
+ m = dm.Message
+ case *DynamicAny:
+ if dm == nil {
+ return nil, proto.ErrNil
+ }
+ m = dm.Message
+ }
+ b, err := proto.Marshal(m)
if err != nil {
return nil, err
}
- return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil
-}
-
-// DynamicAny is a value that can be passed to UnmarshalAny to automatically
-// allocate a proto.Message for the type specified in a google.protobuf.Any
-// message. The allocated message is stored in the embedded proto.Message.
-//
-// Example:
-//
-// var x ptypes.DynamicAny
-// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
-// fmt.Printf("unmarshaled message: %v", x.Message)
-type DynamicAny struct {
- proto.Message
+ return &anypb.Any{TypeUrl: urlPrefix + proto.MessageName(m), Value: b}, nil
}
-// Empty returns a new proto.Message of the type specified in a
-// google.protobuf.Any message. It returns an error if corresponding message
-// type isn't linked in.
-func Empty(any *any.Any) (proto.Message, error) {
- aname, err := AnyMessageName(any)
+// Empty returns a new message of the type specified in an anypb.Any message.
+// It returns protoregistry.NotFound if the corresponding message type could not
+// be resolved in the global registry.
+func Empty(any *anypb.Any) (proto.Message, error) {
+ name, err := anyMessageName(any)
if err != nil {
return nil, err
}
-
- t := proto.MessageType(aname)
- if t == nil {
- return nil, fmt.Errorf("any: message type %q isn't linked in", aname)
+ mt, err := protoregistry.GlobalTypes.FindMessageByName(name)
+ if err != nil {
+ return nil, err
}
- return reflect.New(t.Elem()).Interface().(proto.Message), nil
+ return proto.MessageV1(mt.New().Interface()), nil
}
-// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any
-// message and places the decoded result in pb. It returns an error if type of
-// contents of Any message does not match type of pb message.
+// UnmarshalAny unmarshals the encoded value contained in the anypb.Any message
+// into the provided message m. It returns an error if the target message
+// does not match the type in the Any message or if an unmarshal error occurs.
//
-// pb can be a proto.Message, or a *DynamicAny.
-func UnmarshalAny(any *any.Any, pb proto.Message) error {
- if d, ok := pb.(*DynamicAny); ok {
- if d.Message == nil {
+// The target message m may be a *DynamicAny message. If the underlying message
+// type could not be resolved, then this returns protoregistry.NotFound.
+func UnmarshalAny(any *anypb.Any, m proto.Message) error {
+ if dm, ok := m.(*DynamicAny); ok {
+ if dm.Message == nil {
var err error
- d.Message, err = Empty(any)
+ dm.Message, err = Empty(any)
if err != nil {
return err
}
}
- return UnmarshalAny(any, d.Message)
+ m = dm.Message
}
- aname, err := AnyMessageName(any)
+ anyName, err := AnyMessageName(any)
if err != nil {
return err
}
-
- mname := proto.MessageName(pb)
- if aname != mname {
- return fmt.Errorf("mismatched message type: got %q want %q", aname, mname)
+ msgName := proto.MessageName(m)
+ if anyName != msgName {
+ return fmt.Errorf("mismatched message type: got %q want %q", anyName, msgName)
}
- return proto.Unmarshal(any.Value, pb)
+ return proto.Unmarshal(any.Value, m)
}
-// Is returns true if any value contains a given message type.
-func Is(any *any.Any, pb proto.Message) bool {
- // The following is equivalent to AnyMessageName(any) == proto.MessageName(pb),
- // but it avoids scanning TypeUrl for the slash.
- if any == nil {
+// Is reports whether the Any message contains a message of the specified type.
+func Is(any *anypb.Any, m proto.Message) bool {
+ if any == nil || m == nil {
return false
}
- name := proto.MessageName(pb)
- prefix := len(any.TypeUrl) - len(name)
- return prefix >= 1 && any.TypeUrl[prefix-1] == '/' && any.TypeUrl[prefix:] == name
+ name := proto.MessageName(m)
+ if !strings.HasSuffix(any.TypeUrl, name) {
+ return false
+ }
+ return len(any.TypeUrl) == len(name) || any.TypeUrl[len(any.TypeUrl)-len(name)-1] == '/'
+}
+
+// DynamicAny is a value that can be passed to UnmarshalAny to automatically
+// allocate a proto.Message for the type specified in an anypb.Any message.
+// The allocated message is stored in the embedded proto.Message.
+//
+// Example:
+// var x ptypes.DynamicAny
+// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
+// fmt.Printf("unmarshaled message: %v", x.Message)
+type DynamicAny struct{ proto.Message }
+
+func (m DynamicAny) String() string {
+ if m.Message == nil {
+ return "<nil>"
+ }
+ return m.Message.String()
+}
+func (m DynamicAny) Reset() {
+ if m.Message == nil {
+ return
+ }
+ m.Message.Reset()
+}
+func (m DynamicAny) ProtoMessage() {
+ return
+}
+func (m DynamicAny) ProtoReflect() protoreflect.Message {
+ if m.Message == nil {
+ return nil
+ }
+ return dynamicAny{proto.MessageReflect(m.Message)}
+}
+
+type dynamicAny struct{ protoreflect.Message }
+
+func (m dynamicAny) Type() protoreflect.MessageType {
+ return dynamicAnyType{m.Message.Type()}
+}
+func (m dynamicAny) New() protoreflect.Message {
+ return dynamicAnyType{m.Message.Type()}.New()
+}
+func (m dynamicAny) Interface() protoreflect.ProtoMessage {
+ return DynamicAny{proto.MessageV1(m.Message.Interface())}
+}
+
+type dynamicAnyType struct{ protoreflect.MessageType }
+
+func (t dynamicAnyType) New() protoreflect.Message {
+ return dynamicAny{t.MessageType.New()}
+}
+func (t dynamicAnyType) Zero() protoreflect.Message {
+ return dynamicAny{t.MessageType.Zero()}
}
diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
index 78ee52334..0ef27d33d 100644
--- a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
+++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
@@ -1,200 +1,62 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google/protobuf/any.proto
+// source: github.com/golang/protobuf/ptypes/any/any.proto
package any
import (
- fmt "fmt"
- proto "github.com/golang/protobuf/proto"
- math "math"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ reflect "reflect"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+// Symbols defined in public import of google/protobuf/any.proto.
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+type Any = anypb.Any
-// `Any` contains an arbitrary serialized protocol buffer message along with a
-// URL that describes the type of the serialized message.
-//
-// Protobuf library provides support to pack/unpack Any values in the form
-// of utility functions or additional generated methods of the Any type.
-//
-// Example 1: Pack and unpack a message in C++.
-//
-// Foo foo = ...;
-// Any any;
-// any.PackFrom(foo);
-// ...
-// if (any.UnpackTo(&foo)) {
-// ...
-// }
-//
-// Example 2: Pack and unpack a message in Java.
-//
-// Foo foo = ...;
-// Any any = Any.pack(foo);
-// ...
-// if (any.is(Foo.class)) {
-// foo = any.unpack(Foo.class);
-// }
-//
-// Example 3: Pack and unpack a message in Python.
-//
-// foo = Foo(...)
-// any = Any()
-// any.Pack(foo)
-// ...
-// if any.Is(Foo.DESCRIPTOR):
-// any.Unpack(foo)
-// ...
-//
-// Example 4: Pack and unpack a message in Go
-//
-// foo := &pb.Foo{...}
-// any, err := ptypes.MarshalAny(foo)
-// ...
-// foo := &pb.Foo{}
-// if err := ptypes.UnmarshalAny(any, foo); err != nil {
-// ...
-// }
-//
-// The pack methods provided by protobuf library will by default use
-// 'type.googleapis.com/full.type.name' as the type URL and the unpack
-// methods only use the fully qualified type name after the last '/'
-// in the type URL, for example "foo.bar.com/x/y.z" will yield type
-// name "y.z".
-//
-//
-// JSON
-// ====
-// The JSON representation of an `Any` value uses the regular
-// representation of the deserialized, embedded message, with an
-// additional field `@type` which contains the type URL. Example:
-//
-// package google.profile;
-// message Person {
-// string first_name = 1;
-// string last_name = 2;
-// }
-//
-// {
-// "@type": "type.googleapis.com/google.profile.Person",
-// "firstName": <string>,
-// "lastName": <string>
-// }
-//
-// If the embedded message type is well-known and has a custom JSON
-// representation, that representation will be embedded adding a field
-// `value` which holds the custom JSON in addition to the `@type`
-// field. Example (for message [google.protobuf.Duration][]):
-//
-// {
-// "@type": "type.googleapis.com/google.protobuf.Duration",
-// "value": "1.212s"
-// }
-//
-type Any struct {
- // A URL/resource name that uniquely identifies the type of the serialized
- // protocol buffer message. The last segment of the URL's path must represent
- // the fully qualified name of the type (as in
- // `path/google.protobuf.Duration`). The name should be in a canonical form
- // (e.g., leading "." is not accepted).
- //
- // In practice, teams usually precompile into the binary all types that they
- // expect it to use in the context of Any. However, for URLs which use the
- // scheme `http`, `https`, or no scheme, one can optionally set up a type
- // server that maps type URLs to message definitions as follows:
- //
- // * If no scheme is provided, `https` is assumed.
- // * An HTTP GET on the URL must yield a [google.protobuf.Type][]
- // value in binary format, or produce an error.
- // * Applications are allowed to cache lookup results based on the
- // URL, or have them precompiled into a binary to avoid any
- // lookup. Therefore, binary compatibility needs to be preserved
- // on changes to types. (Use versioned type names to manage
- // breaking changes.)
- //
- // Note: this functionality is not currently available in the official
- // protobuf release, and it is not used for type URLs beginning with
- // type.googleapis.com.
- //
- // Schemes other than `http`, `https` (or the empty scheme) might be
- // used with implementation specific semantics.
- //
- TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"`
- // Must be a valid serialized protocol buffer of the above specified type.
- Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
+var File_github_com_golang_protobuf_ptypes_any_any_proto protoreflect.FileDescriptor
-func (m *Any) Reset() { *m = Any{} }
-func (m *Any) String() string { return proto.CompactTextString(m) }
-func (*Any) ProtoMessage() {}
-func (*Any) Descriptor() ([]byte, []int) {
- return fileDescriptor_b53526c13ae22eb4, []int{0}
+var file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = []byte{
+ 0x0a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
+ 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
+ 0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x2b, 0x5a, 0x29,
+ 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e,
+ 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65,
+ 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x3b, 0x61, 0x6e, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x33,
}
-func (*Any) XXX_WellKnownType() string { return "Any" }
-
-func (m *Any) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Any.Unmarshal(m, b)
-}
-func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Any.Marshal(b, m, deterministic)
-}
-func (m *Any) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Any.Merge(m, src)
-}
-func (m *Any) XXX_Size() int {
- return xxx_messageInfo_Any.Size(m)
-}
-func (m *Any) XXX_DiscardUnknown() {
- xxx_messageInfo_Any.DiscardUnknown(m)
+var file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = []interface{}{}
+var file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
}
-var xxx_messageInfo_Any proto.InternalMessageInfo
-
-func (m *Any) GetTypeUrl() string {
- if m != nil {
- return m.TypeUrl
+func init() { file_github_com_golang_protobuf_ptypes_any_any_proto_init() }
+func file_github_com_golang_protobuf_ptypes_any_any_proto_init() {
+ if File_github_com_golang_protobuf_ptypes_any_any_proto != nil {
+ return
}
- return ""
-}
-
-func (m *Any) GetValue() []byte {
- if m != nil {
- return m.Value
- }
- return nil
-}
-
-func init() {
- proto.RegisterType((*Any)(nil), "google.protobuf.Any")
-}
-
-func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4) }
-
-var fileDescriptor_b53526c13ae22eb4 = []byte{
- // 185 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f,
- 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4,
- 0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a,
- 0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46,
- 0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7,
- 0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xca, 0xe7, 0x12, 0x4e, 0xce,
- 0xcf, 0xd5, 0x43, 0x33, 0xce, 0x89, 0xc3, 0x31, 0xaf, 0x32, 0x00, 0xc4, 0x09, 0x60, 0x8c, 0x52,
- 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc,
- 0x4b, 0x47, 0xb8, 0xa8, 0x00, 0x64, 0x7a, 0x31, 0xc8, 0x61, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c,
- 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x8c, 0x0a, 0x80, 0x2a, 0xd1, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce,
- 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0x29, 0x4d, 0x62, 0x03, 0xeb, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff,
- 0xff, 0x13, 0xf8, 0xe8, 0x42, 0xdd, 0x00, 0x00, 0x00,
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 0,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes,
+ DependencyIndexes: file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs,
+ }.Build()
+ File_github_com_golang_protobuf_ptypes_any_any_proto = out.File
+ file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = nil
+ file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = nil
+ file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = nil
}
diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.proto b/vendor/github.com/golang/protobuf/ptypes/any/any.proto
deleted file mode 100644
index 493294255..000000000
--- a/vendor/github.com/golang/protobuf/ptypes/any/any.proto
+++ /dev/null
@@ -1,154 +0,0 @@
-// Protocol Buffers - Google's data interchange format
-// Copyright 2008 Google Inc. All rights reserved.
-// https://developers.google.com/protocol-buffers/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-syntax = "proto3";
-
-package google.protobuf;
-
-option csharp_namespace = "Google.Protobuf.WellKnownTypes";
-option go_package = "github.com/golang/protobuf/ptypes/any";
-option java_package = "com.google.protobuf";
-option java_outer_classname = "AnyProto";
-option java_multiple_files = true;
-option objc_class_prefix = "GPB";
-
-// `Any` contains an arbitrary serialized protocol buffer message along with a
-// URL that describes the type of the serialized message.
-//
-// Protobuf library provides support to pack/unpack Any values in the form
-// of utility functions or additional generated methods of the Any type.
-//
-// Example 1: Pack and unpack a message in C++.
-//
-// Foo foo = ...;
-// Any any;
-// any.PackFrom(foo);
-// ...
-// if (any.UnpackTo(&foo)) {
-// ...
-// }
-//
-// Example 2: Pack and unpack a message in Java.
-//
-// Foo foo = ...;
-// Any any = Any.pack(foo);
-// ...
-// if (any.is(Foo.class)) {
-// foo = any.unpack(Foo.class);
-// }
-//
-// Example 3: Pack and unpack a message in Python.
-//
-// foo = Foo(...)
-// any = Any()
-// any.Pack(foo)
-// ...
-// if any.Is(Foo.DESCRIPTOR):
-// any.Unpack(foo)
-// ...
-//
-// Example 4: Pack and unpack a message in Go
-//
-// foo := &pb.Foo{...}
-// any, err := ptypes.MarshalAny(foo)
-// ...
-// foo := &pb.Foo{}
-// if err := ptypes.UnmarshalAny(any, foo); err != nil {
-// ...
-// }
-//
-// The pack methods provided by protobuf library will by default use
-// 'type.googleapis.com/full.type.name' as the type URL and the unpack
-// methods only use the fully qualified type name after the last '/'
-// in the type URL, for example "foo.bar.com/x/y.z" will yield type
-// name "y.z".
-//
-//
-// JSON
-// ====
-// The JSON representation of an `Any` value uses the regular
-// representation of the deserialized, embedded message, with an
-// additional field `@type` which contains the type URL. Example:
-//
-// package google.profile;
-// message Person {
-// string first_name = 1;
-// string last_name = 2;
-// }
-//
-// {
-// "@type": "type.googleapis.com/google.profile.Person",
-// "firstName": <string>,
-// "lastName": <string>
-// }
-//
-// If the embedded message type is well-known and has a custom JSON
-// representation, that representation will be embedded adding a field
-// `value` which holds the custom JSON in addition to the `@type`
-// field. Example (for message [google.protobuf.Duration][]):
-//
-// {
-// "@type": "type.googleapis.com/google.protobuf.Duration",
-// "value": "1.212s"
-// }
-//
-message Any {
- // A URL/resource name that uniquely identifies the type of the serialized
- // protocol buffer message. The last segment of the URL's path must represent
- // the fully qualified name of the type (as in
- // `path/google.protobuf.Duration`). The name should be in a canonical form
- // (e.g., leading "." is not accepted).
- //
- // In practice, teams usually precompile into the binary all types that they
- // expect it to use in the context of Any. However, for URLs which use the
- // scheme `http`, `https`, or no scheme, one can optionally set up a type
- // server that maps type URLs to message definitions as follows:
- //
- // * If no scheme is provided, `https` is assumed.
- // * An HTTP GET on the URL must yield a [google.protobuf.Type][]
- // value in binary format, or produce an error.
- // * Applications are allowed to cache lookup results based on the
- // URL, or have them precompiled into a binary to avoid any
- // lookup. Therefore, binary compatibility needs to be preserved
- // on changes to types. (Use versioned type names to manage
- // breaking changes.)
- //
- // Note: this functionality is not currently available in the official
- // protobuf release, and it is not used for type URLs beginning with
- // type.googleapis.com.
- //
- // Schemes other than `http`, `https` (or the empty scheme) might be
- // used with implementation specific semantics.
- //
- string type_url = 1;
-
- // Must be a valid serialized protocol buffer of the above specified type.
- bytes value = 2;
-}
diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go
index c0d595da7..fb9edd5c6 100644
--- a/vendor/github.com/golang/protobuf/ptypes/doc.go
+++ b/vendor/github.com/golang/protobuf/ptypes/doc.go
@@ -1,35 +1,6 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2016 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
-/*
-Package ptypes contains code for interacting with well-known types.
-*/
+// Package ptypes provides functionality for interacting with well-known types.
package ptypes
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go
index 26d1ca2fb..6110ae8a4 100644
--- a/vendor/github.com/golang/protobuf/ptypes/duration.go
+++ b/vendor/github.com/golang/protobuf/ptypes/duration.go
@@ -1,102 +1,72 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2016 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
package ptypes
-// This file implements conversions between google.protobuf.Duration
-// and time.Duration.
-
import (
"errors"
"fmt"
"time"
- durpb "github.com/golang/protobuf/ptypes/duration"
+ durationpb "github.com/golang/protobuf/ptypes/duration"
)
+// Range of google.protobuf.Duration as specified in duration.proto.
+// This is about 10,000 years in seconds.
const (
- // Range of a durpb.Duration in seconds, as specified in
- // google/protobuf/duration.proto. This is about 10,000 years in seconds.
maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
minSeconds = -maxSeconds
)
-// validateDuration determines whether the durpb.Duration is valid according to the
-// definition in google/protobuf/duration.proto. A valid durpb.Duration
-// may still be too large to fit into a time.Duration (the range of durpb.Duration
-// is about 10,000 years, and the range of time.Duration is about 290).
-func validateDuration(d *durpb.Duration) error {
- if d == nil {
- return errors.New("duration: nil Duration")
- }
- if d.Seconds < minSeconds || d.Seconds > maxSeconds {
- return fmt.Errorf("duration: %v: seconds out of range", d)
- }
- if d.Nanos <= -1e9 || d.Nanos >= 1e9 {
- return fmt.Errorf("duration: %v: nanos out of range", d)
- }
- // Seconds and Nanos must have the same sign, unless d.Nanos is zero.
- if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) {
- return fmt.Errorf("duration: %v: seconds and nanos have different signs", d)
- }
- return nil
-}
-
-// Duration converts a durpb.Duration to a time.Duration. Duration
-// returns an error if the durpb.Duration is invalid or is too large to be
-// represented in a time.Duration.
-func Duration(p *durpb.Duration) (time.Duration, error) {
- if err := validateDuration(p); err != nil {
+// Duration converts a durationpb.Duration to a time.Duration.
+// Duration returns an error if dur is invalid or overflows a time.Duration.
+func Duration(dur *durationpb.Duration) (time.Duration, error) {
+ if err := validateDuration(dur); err != nil {
return 0, err
}
- d := time.Duration(p.Seconds) * time.Second
- if int64(d/time.Second) != p.Seconds {
- return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
+ d := time.Duration(dur.Seconds) * time.Second
+ if int64(d/time.Second) != dur.Seconds {
+ return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur)
}
- if p.Nanos != 0 {
- d += time.Duration(p.Nanos) * time.Nanosecond
- if (d < 0) != (p.Nanos < 0) {
- return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
+ if dur.Nanos != 0 {
+ d += time.Duration(dur.Nanos) * time.Nanosecond
+ if (d < 0) != (dur.Nanos < 0) {
+ return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur)
}
}
return d, nil
}
-// DurationProto converts a time.Duration to a durpb.Duration.
-func DurationProto(d time.Duration) *durpb.Duration {
+// DurationProto converts a time.Duration to a durationpb.Duration.
+func DurationProto(d time.Duration) *durationpb.Duration {
nanos := d.Nanoseconds()
secs := nanos / 1e9
nanos -= secs * 1e9
- return &durpb.Duration{
- Seconds: secs,
+ return &durationpb.Duration{
+ Seconds: int64(secs),
Nanos: int32(nanos),
}
}
+
+// validateDuration determines whether the durationpb.Duration is valid
+// according to the definition in google/protobuf/duration.proto.
+// A valid durpb.Duration may still be too large to fit into a time.Duration
+// Note that the range of durationpb.Duration is about 10,000 years,
+// while the range of time.Duration is about 290 years.
+func validateDuration(dur *durationpb.Duration) error {
+ if dur == nil {
+ return errors.New("duration: nil Duration")
+ }
+ if dur.Seconds < minSeconds || dur.Seconds > maxSeconds {
+ return fmt.Errorf("duration: %v: seconds out of range", dur)
+ }
+ if dur.Nanos <= -1e9 || dur.Nanos >= 1e9 {
+ return fmt.Errorf("duration: %v: nanos out of range", dur)
+ }
+ // Seconds and Nanos must have the same sign, unless d.Nanos is zero.
+ if (dur.Seconds < 0 && dur.Nanos > 0) || (dur.Seconds > 0 && dur.Nanos < 0) {
+ return fmt.Errorf("duration: %v: seconds and nanos have different signs", dur)
+ }
+ return nil
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
index 0d681ee21..d0079ee3e 100644
--- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
+++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
@@ -1,161 +1,63 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google/protobuf/duration.proto
+// source: github.com/golang/protobuf/ptypes/duration/duration.proto
package duration
import (
- fmt "fmt"
- proto "github.com/golang/protobuf/proto"
- math "math"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ durationpb "google.golang.org/protobuf/types/known/durationpb"
+ reflect "reflect"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+// Symbols defined in public import of google/protobuf/duration.proto.
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+type Duration = durationpb.Duration
-// A Duration represents a signed, fixed-length span of time represented
-// as a count of seconds and fractions of seconds at nanosecond
-// resolution. It is independent of any calendar and concepts like "day"
-// or "month". It is related to Timestamp in that the difference between
-// two Timestamp values is a Duration and it can be added or subtracted
-// from a Timestamp. Range is approximately +-10,000 years.
-//
-// # Examples
-//
-// Example 1: Compute Duration from two Timestamps in pseudo code.
-//
-// Timestamp start = ...;
-// Timestamp end = ...;
-// Duration duration = ...;
-//
-// duration.seconds = end.seconds - start.seconds;
-// duration.nanos = end.nanos - start.nanos;
-//
-// if (duration.seconds < 0 && duration.nanos > 0) {
-// duration.seconds += 1;
-// duration.nanos -= 1000000000;
-// } else if (durations.seconds > 0 && duration.nanos < 0) {
-// duration.seconds -= 1;
-// duration.nanos += 1000000000;
-// }
-//
-// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
-//
-// Timestamp start = ...;
-// Duration duration = ...;
-// Timestamp end = ...;
-//
-// end.seconds = start.seconds + duration.seconds;
-// end.nanos = start.nanos + duration.nanos;
-//
-// if (end.nanos < 0) {
-// end.seconds -= 1;
-// end.nanos += 1000000000;
-// } else if (end.nanos >= 1000000000) {
-// end.seconds += 1;
-// end.nanos -= 1000000000;
-// }
-//
-// Example 3: Compute Duration from datetime.timedelta in Python.
-//
-// td = datetime.timedelta(days=3, minutes=10)
-// duration = Duration()
-// duration.FromTimedelta(td)
-//
-// # JSON Mapping
-//
-// In JSON format, the Duration type is encoded as a string rather than an
-// object, where the string ends in the suffix "s" (indicating seconds) and
-// is preceded by the number of seconds, with nanoseconds expressed as
-// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
-// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
-// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
-// microsecond should be expressed in JSON format as "3.000001s".
-//
-//
-type Duration struct {
- // Signed seconds of the span of time. Must be from -315,576,000,000
- // to +315,576,000,000 inclusive. Note: these bounds are computed from:
- // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
- Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
- // Signed fractions of a second at nanosecond resolution of the span
- // of time. Durations less than one second are represented with a 0
- // `seconds` field and a positive or negative `nanos` field. For durations
- // of one second or more, a non-zero value for the `nanos` field must be
- // of the same sign as the `seconds` field. Must be from -999,999,999
- // to +999,999,999 inclusive.
- Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
+var File_github_com_golang_protobuf_ptypes_duration_duration_proto protoreflect.FileDescriptor
-func (m *Duration) Reset() { *m = Duration{} }
-func (m *Duration) String() string { return proto.CompactTextString(m) }
-func (*Duration) ProtoMessage() {}
-func (*Duration) Descriptor() ([]byte, []int) {
- return fileDescriptor_23597b2ebd7ac6c5, []int{0}
+var file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = []byte{
+ 0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
+ 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
+ 0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x64, 0x75, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67,
+ 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
+ 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73,
+ 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
-func (*Duration) XXX_WellKnownType() string { return "Duration" }
-
-func (m *Duration) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Duration.Unmarshal(m, b)
-}
-func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Duration.Marshal(b, m, deterministic)
-}
-func (m *Duration) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Duration.Merge(m, src)
-}
-func (m *Duration) XXX_Size() int {
- return xxx_messageInfo_Duration.Size(m)
-}
-func (m *Duration) XXX_DiscardUnknown() {
- xxx_messageInfo_Duration.DiscardUnknown(m)
+var file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = []interface{}{}
+var file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
}
-var xxx_messageInfo_Duration proto.InternalMessageInfo
-
-func (m *Duration) GetSeconds() int64 {
- if m != nil {
- return m.Seconds
+func init() { file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() }
+func file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() {
+ if File_github_com_golang_protobuf_ptypes_duration_duration_proto != nil {
+ return
}
- return 0
-}
-
-func (m *Duration) GetNanos() int32 {
- if m != nil {
- return m.Nanos
- }
- return 0
-}
-
-func init() {
- proto.RegisterType((*Duration)(nil), "google.protobuf.Duration")
-}
-
-func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5) }
-
-var fileDescriptor_23597b2ebd7ac6c5 = []byte{
- // 190 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f,
- 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a,
- 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56,
- 0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5,
- 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e,
- 0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0xc3, 0x25, 0x9c, 0x9c,
- 0x9f, 0xab, 0x87, 0x66, 0xa4, 0x13, 0x2f, 0xcc, 0xc0, 0x00, 0x90, 0x48, 0x00, 0x63, 0x94, 0x56,
- 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e,
- 0x3a, 0xc2, 0x7d, 0x05, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x70, 0x67, 0xfe, 0x60, 0x64, 0x5c, 0xc4,
- 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, 0x00, 0x54, 0xa9, 0x5e, 0x78,
- 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, 0x12, 0x1b, 0xd8, 0x0c, 0x63,
- 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x84, 0x30, 0xff, 0xf3, 0x00, 0x00, 0x00,
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 0,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes,
+ DependencyIndexes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs,
+ }.Build()
+ File_github_com_golang_protobuf_ptypes_duration_duration_proto = out.File
+ file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = nil
+ file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = nil
+ file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = nil
}
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto
deleted file mode 100644
index 975fce41a..000000000
--- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto
+++ /dev/null
@@ -1,117 +0,0 @@
-// Protocol Buffers - Google's data interchange format
-// Copyright 2008 Google Inc. All rights reserved.
-// https://developers.google.com/protocol-buffers/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-syntax = "proto3";
-
-package google.protobuf;
-
-option csharp_namespace = "Google.Protobuf.WellKnownTypes";
-option cc_enable_arenas = true;
-option go_package = "github.com/golang/protobuf/ptypes/duration";
-option java_package = "com.google.protobuf";
-option java_outer_classname = "DurationProto";
-option java_multiple_files = true;
-option objc_class_prefix = "GPB";
-
-// A Duration represents a signed, fixed-length span of time represented
-// as a count of seconds and fractions of seconds at nanosecond
-// resolution. It is independent of any calendar and concepts like "day"
-// or "month". It is related to Timestamp in that the difference between
-// two Timestamp values is a Duration and it can be added or subtracted
-// from a Timestamp. Range is approximately +-10,000 years.
-//
-// # Examples
-//
-// Example 1: Compute Duration from two Timestamps in pseudo code.
-//
-// Timestamp start = ...;
-// Timestamp end = ...;
-// Duration duration = ...;
-//
-// duration.seconds = end.seconds - start.seconds;
-// duration.nanos = end.nanos - start.nanos;
-//
-// if (duration.seconds < 0 && duration.nanos > 0) {
-// duration.seconds += 1;
-// duration.nanos -= 1000000000;
-// } else if (durations.seconds > 0 && duration.nanos < 0) {
-// duration.seconds -= 1;
-// duration.nanos += 1000000000;
-// }
-//
-// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
-//
-// Timestamp start = ...;
-// Duration duration = ...;
-// Timestamp end = ...;
-//
-// end.seconds = start.seconds + duration.seconds;
-// end.nanos = start.nanos + duration.nanos;
-//
-// if (end.nanos < 0) {
-// end.seconds -= 1;
-// end.nanos += 1000000000;
-// } else if (end.nanos >= 1000000000) {
-// end.seconds += 1;
-// end.nanos -= 1000000000;
-// }
-//
-// Example 3: Compute Duration from datetime.timedelta in Python.
-//
-// td = datetime.timedelta(days=3, minutes=10)
-// duration = Duration()
-// duration.FromTimedelta(td)
-//
-// # JSON Mapping
-//
-// In JSON format, the Duration type is encoded as a string rather than an
-// object, where the string ends in the suffix "s" (indicating seconds) and
-// is preceded by the number of seconds, with nanoseconds expressed as
-// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
-// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
-// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
-// microsecond should be expressed in JSON format as "3.000001s".
-//
-//
-message Duration {
-
- // Signed seconds of the span of time. Must be from -315,576,000,000
- // to +315,576,000,000 inclusive. Note: these bounds are computed from:
- // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
- int64 seconds = 1;
-
- // Signed fractions of a second at nanosecond resolution of the span
- // of time. Durations less than one second are represented with a 0
- // `seconds` field and a positive or negative `nanos` field. For durations
- // of one second or more, a non-zero value for the `nanos` field must be
- // of the same sign as the `seconds` field. Must be from -999,999,999
- // to +999,999,999 inclusive.
- int32 nanos = 2;
-}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go
index 8da0df01a..026d0d491 100644
--- a/vendor/github.com/golang/protobuf/ptypes/timestamp.go
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go
@@ -1,46 +1,18 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2016 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
package ptypes
-// This file implements operations on google.protobuf.Timestamp.
-
import (
"errors"
"fmt"
"time"
- tspb "github.com/golang/protobuf/ptypes/timestamp"
+ timestamppb "github.com/golang/protobuf/ptypes/timestamp"
)
+// Range of google.protobuf.Duration as specified in timestamp.proto.
const (
// Seconds field of the earliest valid Timestamp.
// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
@@ -50,44 +22,18 @@ const (
maxValidSeconds = 253402300800
)
-// validateTimestamp determines whether a Timestamp is valid.
-// A valid timestamp represents a time in the range
-// [0001-01-01, 10000-01-01) and has a Nanos field
-// in the range [0, 1e9).
-//
-// If the Timestamp is valid, validateTimestamp returns nil.
-// Otherwise, it returns an error that describes
-// the problem.
-//
-// Every valid Timestamp can be represented by a time.Time, but the converse is not true.
-func validateTimestamp(ts *tspb.Timestamp) error {
- if ts == nil {
- return errors.New("timestamp: nil Timestamp")
- }
- if ts.Seconds < minValidSeconds {
- return fmt.Errorf("timestamp: %v before 0001-01-01", ts)
- }
- if ts.Seconds >= maxValidSeconds {
- return fmt.Errorf("timestamp: %v after 10000-01-01", ts)
- }
- if ts.Nanos < 0 || ts.Nanos >= 1e9 {
- return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts)
- }
- return nil
-}
-
-// Timestamp converts a google.protobuf.Timestamp proto to a time.Time.
+// Timestamp converts a timestamppb.Timestamp to a time.Time.
// It returns an error if the argument is invalid.
//
-// Unlike most Go functions, if Timestamp returns an error, the first return value
-// is not the zero time.Time. Instead, it is the value obtained from the
+// Unlike most Go functions, if Timestamp returns an error, the first return
+// value is not the zero time.Time. Instead, it is the value obtained from the
// time.Unix function when passed the contents of the Timestamp, in the UTC
// locale. This may or may not be a meaningful time; many invalid Timestamps
// do map to valid time.Times.
//
// A nil Timestamp returns an error. The first return value in that case is
// undefined.
-func Timestamp(ts *tspb.Timestamp) (time.Time, error) {
+func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) {
// Don't return the zero value on error, because corresponds to a valid
// timestamp. Instead return whatever time.Unix gives us.
var t time.Time
@@ -100,7 +46,7 @@ func Timestamp(ts *tspb.Timestamp) (time.Time, error) {
}
// TimestampNow returns a google.protobuf.Timestamp for the current time.
-func TimestampNow() *tspb.Timestamp {
+func TimestampNow() *timestamppb.Timestamp {
ts, err := TimestampProto(time.Now())
if err != nil {
panic("ptypes: time.Now() out of Timestamp range")
@@ -110,8 +56,8 @@ func TimestampNow() *tspb.Timestamp {
// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
// It returns an error if the resulting Timestamp is invalid.
-func TimestampProto(t time.Time) (*tspb.Timestamp, error) {
- ts := &tspb.Timestamp{
+func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) {
+ ts := &timestamppb.Timestamp{
Seconds: t.Unix(),
Nanos: int32(t.Nanosecond()),
}
@@ -121,12 +67,37 @@ func TimestampProto(t time.Time) (*tspb.Timestamp, error) {
return ts, nil
}
-// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid
-// Timestamps, it returns an error message in parentheses.
-func TimestampString(ts *tspb.Timestamp) string {
+// TimestampString returns the RFC 3339 string for valid Timestamps.
+// For invalid Timestamps, it returns an error message in parentheses.
+func TimestampString(ts *timestamppb.Timestamp) string {
t, err := Timestamp(ts)
if err != nil {
return fmt.Sprintf("(%v)", err)
}
return t.Format(time.RFC3339Nano)
}
+
+// validateTimestamp determines whether a Timestamp is valid.
+// A valid timestamp represents a time in the range [0001-01-01, 10000-01-01)
+// and has a Nanos field in the range [0, 1e9).
+//
+// If the Timestamp is valid, validateTimestamp returns nil.
+// Otherwise, it returns an error that describes the problem.
+//
+// Every valid Timestamp can be represented by a time.Time,
+// but the converse is not true.
+func validateTimestamp(ts *timestamppb.Timestamp) error {
+ if ts == nil {
+ return errors.New("timestamp: nil Timestamp")
+ }
+ if ts.Seconds < minValidSeconds {
+ return fmt.Errorf("timestamp: %v before 0001-01-01", ts)
+ }
+ if ts.Seconds >= maxValidSeconds {
+ return fmt.Errorf("timestamp: %v after 10000-01-01", ts)
+ }
+ if ts.Nanos < 0 || ts.Nanos >= 1e9 {
+ return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts)
+ }
+ return nil
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
index 31cd846de..a76f80760 100644
--- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
@@ -1,179 +1,64 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google/protobuf/timestamp.proto
+// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
package timestamp
import (
- fmt "fmt"
- proto "github.com/golang/protobuf/proto"
- math "math"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+ reflect "reflect"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+// Symbols defined in public import of google/protobuf/timestamp.proto.
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+type Timestamp = timestamppb.Timestamp
-// A Timestamp represents a point in time independent of any time zone
-// or calendar, represented as seconds and fractions of seconds at
-// nanosecond resolution in UTC Epoch time. It is encoded using the
-// Proleptic Gregorian Calendar which extends the Gregorian calendar
-// backwards to year one. It is encoded assuming all minutes are 60
-// seconds long, i.e. leap seconds are "smeared" so that no leap second
-// table is needed for interpretation. Range is from
-// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
-// By restricting to that range, we ensure that we can convert to
-// and from RFC 3339 date strings.
-// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
-//
-// # Examples
-//
-// Example 1: Compute Timestamp from POSIX `time()`.
-//
-// Timestamp timestamp;
-// timestamp.set_seconds(time(NULL));
-// timestamp.set_nanos(0);
-//
-// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
-//
-// struct timeval tv;
-// gettimeofday(&tv, NULL);
-//
-// Timestamp timestamp;
-// timestamp.set_seconds(tv.tv_sec);
-// timestamp.set_nanos(tv.tv_usec * 1000);
-//
-// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
-//
-// FILETIME ft;
-// GetSystemTimeAsFileTime(&ft);
-// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
-//
-// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
-// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
-// Timestamp timestamp;
-// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
-// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
-//
-// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
-//
-// long millis = System.currentTimeMillis();
-//
-// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
-// .setNanos((int) ((millis % 1000) * 1000000)).build();
-//
-//
-// Example 5: Compute Timestamp from current time in Python.
-//
-// timestamp = Timestamp()
-// timestamp.GetCurrentTime()
-//
-// # JSON Mapping
-//
-// In JSON format, the Timestamp type is encoded as a string in the
-// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
-// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
-// where {year} is always expressed using four digits while {month}, {day},
-// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
-// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
-// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
-// is required. A proto3 JSON serializer should always use UTC (as indicated by
-// "Z") when printing the Timestamp type and a proto3 JSON parser should be
-// able to accept both UTC and other timezones (as indicated by an offset).
-//
-// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
-// 01:30 UTC on January 15, 2017.
-//
-// In JavaScript, one can convert a Date object to this format using the
-// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString]
-// method. In Python, a standard `datetime.datetime` object can be converted
-// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
-// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
-// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
-// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--
-// ) to obtain a formatter capable of generating timestamps in this format.
-//
-//
-type Timestamp struct {
- // Represents seconds of UTC time since Unix epoch
- // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
- // 9999-12-31T23:59:59Z inclusive.
- Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
- // Non-negative fractions of a second at nanosecond resolution. Negative
- // second values with fractions must still have non-negative nanos values
- // that count forward in time. Must be from 0 to 999,999,999
- // inclusive.
- Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
+var File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto protoreflect.FileDescriptor
-func (m *Timestamp) Reset() { *m = Timestamp{} }
-func (m *Timestamp) String() string { return proto.CompactTextString(m) }
-func (*Timestamp) ProtoMessage() {}
-func (*Timestamp) Descriptor() ([]byte, []int) {
- return fileDescriptor_292007bbfe81227e, []int{0}
+var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = []byte{
+ 0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
+ 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
+ 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x74, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74,
+ 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x37,
+ 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
+ 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
+ 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x3b, 0x74, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x33,
}
-func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" }
-
-func (m *Timestamp) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Timestamp.Unmarshal(m, b)
-}
-func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic)
-}
-func (m *Timestamp) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Timestamp.Merge(m, src)
-}
-func (m *Timestamp) XXX_Size() int {
- return xxx_messageInfo_Timestamp.Size(m)
-}
-func (m *Timestamp) XXX_DiscardUnknown() {
- xxx_messageInfo_Timestamp.DiscardUnknown(m)
+var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = []interface{}{}
+var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
}
-var xxx_messageInfo_Timestamp proto.InternalMessageInfo
-
-func (m *Timestamp) GetSeconds() int64 {
- if m != nil {
- return m.Seconds
+func init() { file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() }
+func file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() {
+ if File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto != nil {
+ return
}
- return 0
-}
-
-func (m *Timestamp) GetNanos() int32 {
- if m != nil {
- return m.Nanos
- }
- return 0
-}
-
-func init() {
- proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp")
-}
-
-func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e) }
-
-var fileDescriptor_292007bbfe81227e = []byte{
- // 191 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f,
- 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d,
- 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28,
- 0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5,
- 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89,
- 0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x1d, 0x97, 0x70,
- 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0x99, 0x4e, 0x7c, 0x70, 0x13, 0x03, 0x40, 0x42, 0x01, 0x8c, 0x51,
- 0xda, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xe9, 0xf9, 0x39, 0x89,
- 0x79, 0xe9, 0x08, 0x27, 0x16, 0x94, 0x54, 0x16, 0xa4, 0x16, 0x23, 0x5c, 0xfa, 0x83, 0x91, 0x71,
- 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xc9, 0x01, 0x50, 0xb5, 0x7a,
- 0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x3d, 0x49, 0x6c, 0x60, 0x43,
- 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x77, 0x4a, 0x07, 0xf7, 0x00, 0x00, 0x00,
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 0,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes,
+ DependencyIndexes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs,
+ }.Build()
+ File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto = out.File
+ file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = nil
+ file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = nil
+ file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = nil
}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
deleted file mode 100644
index eafb3fa03..000000000
--- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
+++ /dev/null
@@ -1,135 +0,0 @@
-// Protocol Buffers - Google's data interchange format
-// Copyright 2008 Google Inc. All rights reserved.
-// https://developers.google.com/protocol-buffers/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-syntax = "proto3";
-
-package google.protobuf;
-
-option csharp_namespace = "Google.Protobuf.WellKnownTypes";
-option cc_enable_arenas = true;
-option go_package = "github.com/golang/protobuf/ptypes/timestamp";
-option java_package = "com.google.protobuf";
-option java_outer_classname = "TimestampProto";
-option java_multiple_files = true;
-option objc_class_prefix = "GPB";
-
-// A Timestamp represents a point in time independent of any time zone
-// or calendar, represented as seconds and fractions of seconds at
-// nanosecond resolution in UTC Epoch time. It is encoded using the
-// Proleptic Gregorian Calendar which extends the Gregorian calendar
-// backwards to year one. It is encoded assuming all minutes are 60
-// seconds long, i.e. leap seconds are "smeared" so that no leap second
-// table is needed for interpretation. Range is from
-// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
-// By restricting to that range, we ensure that we can convert to
-// and from RFC 3339 date strings.
-// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
-//
-// # Examples
-//
-// Example 1: Compute Timestamp from POSIX `time()`.
-//
-// Timestamp timestamp;
-// timestamp.set_seconds(time(NULL));
-// timestamp.set_nanos(0);
-//
-// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
-//
-// struct timeval tv;
-// gettimeofday(&tv, NULL);
-//
-// Timestamp timestamp;
-// timestamp.set_seconds(tv.tv_sec);
-// timestamp.set_nanos(tv.tv_usec * 1000);
-//
-// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
-//
-// FILETIME ft;
-// GetSystemTimeAsFileTime(&ft);
-// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
-//
-// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
-// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
-// Timestamp timestamp;
-// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
-// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
-//
-// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
-//
-// long millis = System.currentTimeMillis();
-//
-// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
-// .setNanos((int) ((millis % 1000) * 1000000)).build();
-//
-//
-// Example 5: Compute Timestamp from current time in Python.
-//
-// timestamp = Timestamp()
-// timestamp.GetCurrentTime()
-//
-// # JSON Mapping
-//
-// In JSON format, the Timestamp type is encoded as a string in the
-// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
-// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
-// where {year} is always expressed using four digits while {month}, {day},
-// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
-// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
-// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
-// is required. A proto3 JSON serializer should always use UTC (as indicated by
-// "Z") when printing the Timestamp type and a proto3 JSON parser should be
-// able to accept both UTC and other timezones (as indicated by an offset).
-//
-// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
-// 01:30 UTC on January 15, 2017.
-//
-// In JavaScript, one can convert a Date object to this format using the
-// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString]
-// method. In Python, a standard `datetime.datetime` object can be converted
-// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
-// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
-// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
-// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--
-// ) to obtain a formatter capable of generating timestamps in this format.
-//
-//
-message Timestamp {
-
- // Represents seconds of UTC time since Unix epoch
- // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
- // 9999-12-31T23:59:59Z inclusive.
- int64 seconds = 1;
-
- // Non-negative fractions of a second at nanosecond resolution. Negative
- // second values with fractions must still have non-negative nanos values
- // that count forward in time. Must be from 0 to 999,999,999
- // inclusive.
- int32 nanos = 2;
-}
diff --git a/vendor/github.com/klauspost/pgzip/LICENSE b/vendor/github.com/klauspost/pgzip/LICENSE
index 2bdc0d751..3909da410 100644
--- a/vendor/github.com/klauspost/pgzip/LICENSE
+++ b/vendor/github.com/klauspost/pgzip/LICENSE
@@ -1,4 +1,4 @@
-The MIT License (MIT)
+MIT License
Copyright (c) 2014 Klaus Post
@@ -19,4 +19,3 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
-
diff --git a/vendor/github.com/klauspost/pgzip/gzip.go b/vendor/github.com/klauspost/pgzip/gzip.go
index cb3dc0896..257c4d299 100644
--- a/vendor/github.com/klauspost/pgzip/gzip.go
+++ b/vendor/github.com/klauspost/pgzip/gzip.go
@@ -405,7 +405,7 @@ func (z *Writer) Write(p []byte) (int, error) {
if len(z.currentBuffer) == z.blockSize {
z.compressCurrent(false)
if err := z.checkError(); err != nil {
- return len(p) - len(q) - length, err
+ return len(p) - len(q), err
}
}
z.size += length
diff --git a/vendor/github.com/nxadm/tail/.gitignore b/vendor/github.com/nxadm/tail/.gitignore
new file mode 100644
index 000000000..fa81aa93a
--- /dev/null
+++ b/vendor/github.com/nxadm/tail/.gitignore
@@ -0,0 +1,2 @@
+.idea/
+.test/ \ No newline at end of file
diff --git a/vendor/github.com/nxadm/tail/.travis.yml b/vendor/github.com/nxadm/tail/.travis.yml
new file mode 100644
index 000000000..95dd3bd78
--- /dev/null
+++ b/vendor/github.com/nxadm/tail/.travis.yml
@@ -0,0 +1,16 @@
+language: go
+
+script:
+ - go test -race -v ./...
+
+go:
+ - "1.9"
+ - "1.10"
+ - "1.11"
+ - "1.12"
+ - "1.13"
+ - tip
+
+matrix:
+ allow_failures:
+ - go: tip
diff --git a/vendor/github.com/nxadm/tail/CHANGES.md b/vendor/github.com/nxadm/tail/CHANGES.md
new file mode 100644
index 000000000..ef1b5fbed
--- /dev/null
+++ b/vendor/github.com/nxadm/tail/CHANGES.md
@@ -0,0 +1,46 @@
+# Version v1.4.4
+
+* Fix of checksum problem because of forced tag. No changes to the code.
+
+# Version v1.4.1
+
+* Incorporated PR 162 by by Mohammed902: "Simplify non-Windows build tag".
+
+# Version v1.4.0
+
+* Incorporated PR 9 by mschneider82: "Added seekinfo to Tail".
+
+# Version v1.3.1
+
+* Incorporated PR 7: "Fix deadlock when stopping on non-empty file/buffer",
+fixes upstream issue 93.
+
+
+# Version v1.3.0
+
+* Incorporated changes of unmerged upstream PR 149 by mezzi: "added line num
+to Line struct".
+
+# Version v1.2.1
+
+* Incorporated changes of unmerged upstream PR 128 by jadekler: "Compile-able
+code in readme".
+* Incorporated changes of unmerged upstream PR 130 by fgeller: "small change
+to comment wording".
+* Incorporated changes of unmerged upstream PR 133 by sm3142: "removed
+spurious newlines from log messages".
+
+# Version v1.2.0
+
+* Incorporated changes of unmerged upstream PR 126 by Code-Hex: "Solved the
+ problem for never return the last line if it's not followed by a newline".
+* Incorporated changes of unmerged upstream PR 131 by StoicPerlman: "Remove
+deprecated os.SEEK consts". The changes bumped the minimal supported Go
+release to 1.9.
+
+# Version v1.1.0
+
+* migration to go modules.
+* release of master branch of the dormant upstream, because it contains
+fixes and improvement no present in the tagged release.
+
diff --git a/vendor/github.com/nxadm/tail/Dockerfile b/vendor/github.com/nxadm/tail/Dockerfile
new file mode 100644
index 000000000..d9633891c
--- /dev/null
+++ b/vendor/github.com/nxadm/tail/Dockerfile
@@ -0,0 +1,19 @@
+FROM golang
+
+RUN mkdir -p $GOPATH/src/github.com/nxadm/tail/
+ADD . $GOPATH/src/github.com/nxadm/tail/
+
+# expecting to fetch dependencies successfully.
+RUN go get -v github.com/nxadm/tail
+
+# expecting to run the test successfully.
+RUN go test -v github.com/nxadm/tail
+
+# expecting to install successfully
+RUN go install -v github.com/nxadm/tail
+RUN go install -v github.com/nxadm/tail/cmd/gotail
+
+RUN $GOPATH/bin/gotail -h || true
+
+ENV PATH $GOPATH/bin:$PATH
+CMD ["gotail"]
diff --git a/vendor/github.com/nxadm/tail/LICENSE b/vendor/github.com/nxadm/tail/LICENSE
new file mode 100644
index 000000000..818d802a5
--- /dev/null
+++ b/vendor/github.com/nxadm/tail/LICENSE
@@ -0,0 +1,21 @@
+# The MIT License (MIT)
+
+# © Copyright 2015 Hewlett Packard Enterprise Development LP
+Copyright (c) 2014 ActiveState
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/nxadm/tail/README.md b/vendor/github.com/nxadm/tail/README.md
new file mode 100644
index 000000000..dbb6c1727
--- /dev/null
+++ b/vendor/github.com/nxadm/tail/README.md
@@ -0,0 +1,36 @@
+[![Build Status](https://travis-ci.org/nxadm/tail.svg?branch=master)](https://travis-ci.org/nxadm/tail)
+
+This is repo is forked from the dormant upstream repo at
+[hpcloud](https://github.com/hpcloud/tail). This fork adds support for go
+modules, updates the dependencies, adds features and fixes bugs. Go 1.9 is
+the oldest compiler release supported.
+
+# Go package for tail-ing files
+
+A Go package striving to emulate the features of the BSD `tail` program.
+
+```Go
+t, err := tail.TailFile("/var/log/nginx.log", tail.Config{Follow: true})
+if err != nil {
+ panic(err)
+}
+
+for line := range t.Lines {
+ fmt.Println(line.Text)
+}
+```
+
+See [API documentation](http://godoc.org/github.com/nxadm/tail).
+
+## Log rotation
+
+Tail comes with full support for truncation/move detection as it is
+designed to work with log rotation tools.
+
+## Installing
+
+ go get github.com/nxadm/tail/...
+
+## Windows support
+
+This package [needs assistance](https://github.com/nxadm/tail/labels/Windows) for full Windows support.
diff --git a/vendor/github.com/nxadm/tail/appveyor.yml b/vendor/github.com/nxadm/tail/appveyor.yml
new file mode 100644
index 000000000..e149bc62d
--- /dev/null
+++ b/vendor/github.com/nxadm/tail/appveyor.yml
@@ -0,0 +1,11 @@
+version: 0.{build}
+skip_tags: true
+cache: C:\Users\appveyor\AppData\Local\NuGet\Cache
+build_script:
+- SET GOPATH=c:\workspace
+- go test -v -race ./...
+test: off
+clone_folder: c:\workspace\src\github.com\nxadm\tail
+branches:
+ only:
+ - master
diff --git a/vendor/github.com/nxadm/tail/go.mod b/vendor/github.com/nxadm/tail/go.mod
new file mode 100644
index 000000000..fb10d42af
--- /dev/null
+++ b/vendor/github.com/nxadm/tail/go.mod
@@ -0,0 +1,9 @@
+module github.com/nxadm/tail
+
+go 1.13
+
+require (
+ github.com/fsnotify/fsnotify v1.4.7
+ golang.org/x/sys v0.0.0-20190904154756-749cb33beabd // indirect
+ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7
+)
diff --git a/vendor/github.com/nxadm/tail/go.sum b/vendor/github.com/nxadm/tail/go.sum
new file mode 100644
index 000000000..b391f1904
--- /dev/null
+++ b/vendor/github.com/nxadm/tail/go.sum
@@ -0,0 +1,6 @@
+github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+golang.org/x/sys v0.0.0-20190904154756-749cb33beabd h1:DBH9mDw0zluJT/R+nGuV3jWFWLFaHyYZWD4tOT+cjn0=
+golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
diff --git a/vendor/github.com/nxadm/tail/ratelimiter/Licence b/vendor/github.com/nxadm/tail/ratelimiter/Licence
new file mode 100644
index 000000000..434aab19f
--- /dev/null
+++ b/vendor/github.com/nxadm/tail/ratelimiter/Licence
@@ -0,0 +1,7 @@
+Copyright (C) 2013 99designs
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/nxadm/tail/ratelimiter/leakybucket.go b/vendor/github.com/nxadm/tail/ratelimiter/leakybucket.go
new file mode 100644
index 000000000..358b69e7f
--- /dev/null
+++ b/vendor/github.com/nxadm/tail/ratelimiter/leakybucket.go
@@ -0,0 +1,97 @@
+// Package ratelimiter implements the Leaky Bucket ratelimiting algorithm with memcached and in-memory backends.
+package ratelimiter
+
+import (
+ "time"
+)
+
+type LeakyBucket struct {
+ Size uint16
+ Fill float64
+ LeakInterval time.Duration // time.Duration for 1 unit of size to leak
+ Lastupdate time.Time
+ Now func() time.Time
+}
+
+func NewLeakyBucket(size uint16, leakInterval time.Duration) *LeakyBucket {
+ bucket := LeakyBucket{
+ Size: size,
+ Fill: 0,
+ LeakInterval: leakInterval,
+ Now: time.Now,
+ Lastupdate: time.Now(),
+ }
+
+ return &bucket
+}
+
+func (b *LeakyBucket) updateFill() {
+ now := b.Now()
+ if b.Fill > 0 {
+ elapsed := now.Sub(b.Lastupdate)
+
+ b.Fill -= float64(elapsed) / float64(b.LeakInterval)
+ if b.Fill < 0 {
+ b.Fill = 0
+ }
+ }
+ b.Lastupdate = now
+}
+
+func (b *LeakyBucket) Pour(amount uint16) bool {
+ b.updateFill()
+
+ var newfill float64 = b.Fill + float64(amount)
+
+ if newfill > float64(b.Size) {
+ return false
+ }
+
+ b.Fill = newfill
+
+ return true
+}
+
+// The time at which this bucket will be completely drained
+func (b *LeakyBucket) DrainedAt() time.Time {
+ return b.Lastupdate.Add(time.Duration(b.Fill * float64(b.LeakInterval)))
+}
+
+// The duration until this bucket is completely drained
+func (b *LeakyBucket) TimeToDrain() time.Duration {
+ return b.DrainedAt().Sub(b.Now())
+}
+
+func (b *LeakyBucket) TimeSinceLastUpdate() time.Duration {
+ return b.Now().Sub(b.Lastupdate)
+}
+
+type LeakyBucketSer struct {
+ Size uint16
+ Fill float64
+ LeakInterval time.Duration // time.Duration for 1 unit of size to leak
+ Lastupdate time.Time
+}
+
+func (b *LeakyBucket) Serialise() *LeakyBucketSer {
+ bucket := LeakyBucketSer{
+ Size: b.Size,
+ Fill: b.Fill,
+ LeakInterval: b.LeakInterval,
+ Lastupdate: b.Lastupdate,
+ }
+
+ return &bucket
+}
+
+func (b *LeakyBucketSer) DeSerialise() *LeakyBucket {
+ bucket := LeakyBucket{
+ Size: b.Size,
+ Fill: b.Fill,
+ LeakInterval: b.LeakInterval,
+ Lastupdate: b.Lastupdate,
+ Now: time.Now,
+ }
+
+ return &bucket
+}
diff --git a/vendor/github.com/nxadm/tail/ratelimiter/memory.go b/vendor/github.com/nxadm/tail/ratelimiter/memory.go
new file mode 100644
index 000000000..bf3c2131b
--- /dev/null
+++ b/vendor/github.com/nxadm/tail/ratelimiter/memory.go
@@ -0,0 +1,60 @@
+package ratelimiter
+
+import (
+ "errors"
+ "time"
+)
+
+const (
+ GC_SIZE int = 100
+ GC_PERIOD time.Duration = 60 * time.Second
+)
+
+type Memory struct {
+ store map[string]LeakyBucket
+ lastGCCollected time.Time
+}
+
+func NewMemory() *Memory {
+ m := new(Memory)
+ m.store = make(map[string]LeakyBucket)
+ m.lastGCCollected = time.Now()
+ return m
+}
+
+func (m *Memory) GetBucketFor(key string) (*LeakyBucket, error) {
+
+ bucket, ok := m.store[key]
+ if !ok {
+ return nil, errors.New("miss")
+ }
+
+ return &bucket, nil
+}
+
+func (m *Memory) SetBucketFor(key string, bucket LeakyBucket) error {
+
+ if len(m.store) > GC_SIZE {
+ m.GarbageCollect()
+ }
+
+ m.store[key] = bucket
+
+ return nil
+}
+
+func (m *Memory) GarbageCollect() {
+ now := time.Now()
+
+ // rate limit GC to once per minute
+ if now.Unix() >= m.lastGCCollected.Add(GC_PERIOD).Unix() {
+ for key, bucket := range m.store {
+ // if the bucket is drained, then GC
+ if bucket.DrainedAt().Unix() < now.Unix() {
+ delete(m.store, key)
+ }
+ }
+
+ m.lastGCCollected = now
+ }
+}
diff --git a/vendor/github.com/nxadm/tail/ratelimiter/storage.go b/vendor/github.com/nxadm/tail/ratelimiter/storage.go
new file mode 100644
index 000000000..89b2fe882
--- /dev/null
+++ b/vendor/github.com/nxadm/tail/ratelimiter/storage.go
@@ -0,0 +1,6 @@
+package ratelimiter
+
+type Storage interface {
+ GetBucketFor(string) (*LeakyBucket, error)
+ SetBucketFor(string, LeakyBucket) error
+}
diff --git a/vendor/github.com/nxadm/tail/tail.go b/vendor/github.com/nxadm/tail/tail.go
new file mode 100644
index 000000000..58d3c4b95
--- /dev/null
+++ b/vendor/github.com/nxadm/tail/tail.go
@@ -0,0 +1,440 @@
+// Copyright (c) 2015 HPE Software Inc. All rights reserved.
+// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
+
+package tail
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/nxadm/tail/ratelimiter"
+ "github.com/nxadm/tail/util"
+ "github.com/nxadm/tail/watch"
+ "gopkg.in/tomb.v1"
+)
+
+var (
+ ErrStop = errors.New("tail should now stop")
+)
+
+type Line struct {
+ Text string
+ Num int
+ SeekInfo SeekInfo
+ Time time.Time
+ Err error // Error from tail
+}
+
+// NewLine returns a Line with present time.
+func NewLine(text string, lineNum int) *Line {
+ return &Line{text, lineNum, SeekInfo{}, time.Now(), nil}
+}
+
+// SeekInfo represents arguments to `io.Seek`
+type SeekInfo struct {
+ Offset int64
+ Whence int // io.Seek*
+}
+
+type logger interface {
+ Fatal(v ...interface{})
+ Fatalf(format string, v ...interface{})
+ Fatalln(v ...interface{})
+ Panic(v ...interface{})
+ Panicf(format string, v ...interface{})
+ Panicln(v ...interface{})
+ Print(v ...interface{})
+ Printf(format string, v ...interface{})
+ Println(v ...interface{})
+}
+
+// Config is used to specify how a file must be tailed.
+type Config struct {
+ // File-specifc
+ Location *SeekInfo // Seek to this location before tailing
+ ReOpen bool // Reopen recreated files (tail -F)
+ MustExist bool // Fail early if the file does not exist
+ Poll bool // Poll for file changes instead of using inotify
+ Pipe bool // Is a named pipe (mkfifo)
+ RateLimiter *ratelimiter.LeakyBucket
+
+ // Generic IO
+ Follow bool // Continue looking for new lines (tail -f)
+ MaxLineSize int // If non-zero, split longer lines into multiple lines
+
+ // Logger, when nil, is set to tail.DefaultLogger
+ // To disable logging: set field to tail.DiscardingLogger
+ Logger logger
+}
+
+type Tail struct {
+ Filename string
+ Lines chan *Line
+ Config
+
+ file *os.File
+ reader *bufio.Reader
+ lineNum int
+
+ watcher watch.FileWatcher
+ changes *watch.FileChanges
+
+ tomb.Tomb // provides: Done, Kill, Dying
+
+ lk sync.Mutex
+}
+
+var (
+ // DefaultLogger is used when Config.Logger == nil
+ DefaultLogger = log.New(os.Stderr, "", log.LstdFlags)
+ // DiscardingLogger can be used to disable logging output
+ DiscardingLogger = log.New(ioutil.Discard, "", 0)
+)
+
+// TailFile begins tailing the file. Output stream is made available
+// via the `Tail.Lines` channel. To handle errors during tailing,
+// invoke the `Wait` or `Err` method after finishing reading from the
+// `Lines` channel.
+func TailFile(filename string, config Config) (*Tail, error) {
+ if config.ReOpen && !config.Follow {
+ util.Fatal("cannot set ReOpen without Follow.")
+ }
+
+ t := &Tail{
+ Filename: filename,
+ Lines: make(chan *Line),
+ Config: config,
+ }
+
+ // when Logger was not specified in config, use default logger
+ if t.Logger == nil {
+ t.Logger = DefaultLogger
+ }
+
+ if t.Poll {
+ t.watcher = watch.NewPollingFileWatcher(filename)
+ } else {
+ t.watcher = watch.NewInotifyFileWatcher(filename)
+ }
+
+ if t.MustExist {
+ var err error
+ t.file, err = OpenFile(t.Filename)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ go t.tailFileSync()
+
+ return t, nil
+}
+
+// Tell returns the file's current position, like stdio's ftell().
+// But this value is not very accurate.
+// One line from the chan(tail.Lines) may have been read,
+// so it may have lost one line.
+func (tail *Tail) Tell() (offset int64, err error) {
+ if tail.file == nil {
+ return
+ }
+ offset, err = tail.file.Seek(0, io.SeekCurrent)
+ if err != nil {
+ return
+ }
+
+ tail.lk.Lock()
+ defer tail.lk.Unlock()
+ if tail.reader == nil {
+ return
+ }
+
+ offset -= int64(tail.reader.Buffered())
+ return
+}
+
+// Stop stops the tailing activity.
+func (tail *Tail) Stop() error {
+ tail.Kill(nil)
+ return tail.Wait()
+}
+
+// StopAtEOF stops tailing as soon as the end of the file is reached.
+func (tail *Tail) StopAtEOF() error {
+ tail.Kill(errStopAtEOF)
+ return tail.Wait()
+}
+
+var errStopAtEOF = errors.New("tail: stop at eof")
+
+func (tail *Tail) close() {
+ close(tail.Lines)
+ tail.closeFile()
+}
+
+func (tail *Tail) closeFile() {
+ if tail.file != nil {
+ tail.file.Close()
+ tail.file = nil
+ }
+}
+
+func (tail *Tail) reopen() error {
+ tail.closeFile()
+ tail.lineNum = 0
+ for {
+ var err error
+ tail.file, err = OpenFile(tail.Filename)
+ if err != nil {
+ if os.IsNotExist(err) {
+ tail.Logger.Printf("Waiting for %s to appear...", tail.Filename)
+ if err := tail.watcher.BlockUntilExists(&tail.Tomb); err != nil {
+ if err == tomb.ErrDying {
+ return err
+ }
+ return fmt.Errorf("Failed to detect creation of %s: %s", tail.Filename, err)
+ }
+ continue
+ }
+ return fmt.Errorf("Unable to open file %s: %s", tail.Filename, err)
+ }
+ break
+ }
+ return nil
+}
+
+func (tail *Tail) readLine() (string, error) {
+ tail.lk.Lock()
+ line, err := tail.reader.ReadString('\n')
+ tail.lk.Unlock()
+ if err != nil {
+ // Note ReadString "returns the data read before the error" in
+ // case of an error, including EOF, so we return it as is. The
+ // caller is expected to process it if err is EOF.
+ return line, err
+ }
+
+ line = strings.TrimRight(line, "\n")
+
+ return line, err
+}
+
+func (tail *Tail) tailFileSync() {
+ defer tail.Done()
+ defer tail.close()
+
+ if !tail.MustExist {
+ // deferred first open.
+ err := tail.reopen()
+ if err != nil {
+ if err != tomb.ErrDying {
+ tail.Kill(err)
+ }
+ return
+ }
+ }
+
+ // Seek to requested location on first open of the file.
+ if tail.Location != nil {
+ _, err := tail.file.Seek(tail.Location.Offset, tail.Location.Whence)
+ if err != nil {
+ tail.Killf("Seek error on %s: %s", tail.Filename, err)
+ return
+ }
+ }
+
+ tail.openReader()
+
+ // Read line by line.
+ for {
+ // do not seek in named pipes
+ if !tail.Pipe {
+ // grab the position in case we need to back up in the event of a half-line
+ if _, err := tail.Tell(); err != nil {
+ tail.Kill(err)
+ return
+ }
+ }
+
+ line, err := tail.readLine()
+
+ // Process `line` even if err is EOF.
+ if err == nil {
+ cooloff := !tail.sendLine(line)
+ if cooloff {
+ // Wait a second before seeking till the end of
+ // file when rate limit is reached.
+ msg := ("Too much log activity; waiting a second before resuming tailing")
+ offset, _ := tail.Tell()
+ tail.Lines <- &Line{msg, tail.lineNum, SeekInfo{Offset: offset}, time.Now(), errors.New(msg)}
+ select {
+ case <-time.After(time.Second):
+ case <-tail.Dying():
+ return
+ }
+ if err := tail.seekEnd(); err != nil {
+ tail.Kill(err)
+ return
+ }
+ }
+ } else if err == io.EOF {
+ if !tail.Follow {
+ if line != "" {
+ tail.sendLine(line)
+ }
+ return
+ }
+
+ if tail.Follow && line != "" {
+ tail.sendLine(line)
+ if err := tail.seekEnd(); err != nil {
+ tail.Kill(err)
+ return
+ }
+ }
+
+ // When EOF is reached, wait for more data to become
+ // available. Wait strategy is based on the `tail.watcher`
+ // implementation (inotify or polling).
+ err := tail.waitForChanges()
+ if err != nil {
+ if err != ErrStop {
+ tail.Kill(err)
+ }
+ return
+ }
+ } else {
+ // non-EOF error
+ tail.Killf("Error reading %s: %s", tail.Filename, err)
+ return
+ }
+
+ select {
+ case <-tail.Dying():
+ if tail.Err() == errStopAtEOF {
+ continue
+ }
+ return
+ default:
+ }
+ }
+}
+
+// waitForChanges waits until the file has been appended, deleted,
+// moved or truncated. When moved or deleted - the file will be
+// reopened if ReOpen is true. Truncated files are always reopened.
+func (tail *Tail) waitForChanges() error {
+ if tail.changes == nil {
+ pos, err := tail.file.Seek(0, io.SeekCurrent)
+ if err != nil {
+ return err
+ }
+ tail.changes, err = tail.watcher.ChangeEvents(&tail.Tomb, pos)
+ if err != nil {
+ return err
+ }
+ }
+
+ select {
+ case <-tail.changes.Modified:
+ return nil
+ case <-tail.changes.Deleted:
+ tail.changes = nil
+ if tail.ReOpen {
+ // XXX: we must not log from a library.
+ tail.Logger.Printf("Re-opening moved/deleted file %s ...", tail.Filename)
+ if err := tail.reopen(); err != nil {
+ return err
+ }
+ tail.Logger.Printf("Successfully reopened %s", tail.Filename)
+ tail.openReader()
+ return nil
+ }
+ tail.Logger.Printf("Stopping tail as file no longer exists: %s", tail.Filename)
+ return ErrStop
+ case <-tail.changes.Truncated:
+ // Always reopen truncated files (Follow is true)
+ tail.Logger.Printf("Re-opening truncated file %s ...", tail.Filename)
+ if err := tail.reopen(); err != nil {
+ return err
+ }
+ tail.Logger.Printf("Successfully reopened truncated %s", tail.Filename)
+ tail.openReader()
+ return nil
+ case <-tail.Dying():
+ return ErrStop
+ }
+}
+
+func (tail *Tail) openReader() {
+ tail.lk.Lock()
+ if tail.MaxLineSize > 0 {
+ // add 2 to account for newline characters
+ tail.reader = bufio.NewReaderSize(tail.file, tail.MaxLineSize+2)
+ } else {
+ tail.reader = bufio.NewReader(tail.file)
+ }
+ tail.lk.Unlock()
+}
+
+func (tail *Tail) seekEnd() error {
+ return tail.seekTo(SeekInfo{Offset: 0, Whence: io.SeekEnd})
+}
+
+func (tail *Tail) seekTo(pos SeekInfo) error {
+ _, err := tail.file.Seek(pos.Offset, pos.Whence)
+ if err != nil {
+ return fmt.Errorf("Seek error on %s: %s", tail.Filename, err)
+ }
+ // Reset the read buffer whenever the file is re-seek'ed
+ tail.reader.Reset(tail.file)
+ return nil
+}
+
+// sendLine sends the line(s) to Lines channel, splitting longer lines
+// if necessary. Return false if rate limit is reached.
+func (tail *Tail) sendLine(line string) bool {
+ now := time.Now()
+ lines := []string{line}
+
+ // Split longer lines
+ if tail.MaxLineSize > 0 && len(line) > tail.MaxLineSize {
+ lines = util.PartitionString(line, tail.MaxLineSize)
+ }
+
+ for _, line := range lines {
+ tail.lineNum++
+ offset, _ := tail.Tell()
+ select {
+ case tail.Lines <- &Line{line, tail.lineNum, SeekInfo{Offset: offset}, now, nil}:
+ case <-tail.Dying():
+ return true
+ }
+ }
+
+ if tail.Config.RateLimiter != nil {
+ ok := tail.Config.RateLimiter.Pour(uint16(len(lines)))
+ if !ok {
+ tail.Logger.Printf("Leaky bucket full (%v); entering 1s cooloff period.",
+ tail.Filename)
+ return false
+ }
+ }
+
+ return true
+}
+
+// Cleanup removes inotify watches added by the tail package. This function is
+// meant to be invoked from a process's exit handler. Linux kernel may not
+// automatically remove inotify watches after the process exits.
+func (tail *Tail) Cleanup() {
+ watch.Cleanup(tail.Filename)
+}
diff --git a/vendor/github.com/nxadm/tail/tail_posix.go b/vendor/github.com/nxadm/tail/tail_posix.go
new file mode 100644
index 000000000..1b94520ec
--- /dev/null
+++ b/vendor/github.com/nxadm/tail/tail_posix.go
@@ -0,0 +1,11 @@
+// +build !windows
+
+package tail
+
+import (
+ "os"
+)
+
+func OpenFile(name string) (file *os.File, err error) {
+ return os.Open(name)
+}
diff --git a/vendor/github.com/nxadm/tail/tail_windows.go b/vendor/github.com/nxadm/tail/tail_windows.go
new file mode 100644
index 000000000..4aaceea28
--- /dev/null
+++ b/vendor/github.com/nxadm/tail/tail_windows.go
@@ -0,0 +1,12 @@
+// +build windows
+
+package tail
+
+import (
+ "github.com/nxadm/tail/winfile"
+ "os"
+)
+
+func OpenFile(name string) (file *os.File, err error) {
+ return winfile.OpenFile(name, os.O_RDONLY, 0)
+}
diff --git a/vendor/github.com/nxadm/tail/util/util.go b/vendor/github.com/nxadm/tail/util/util.go
new file mode 100644
index 000000000..2ba0ed71c
--- /dev/null
+++ b/vendor/github.com/nxadm/tail/util/util.go
@@ -0,0 +1,48 @@
+// Copyright (c) 2015 HPE Software Inc. All rights reserved.
+// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
+
+package util
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "runtime/debug"
+)
+
+type Logger struct {
+ *log.Logger
+}
+
+var LOGGER = &Logger{log.New(os.Stderr, "", log.LstdFlags)}
+
+// fatal is like panic except it displays only the current goroutine's stack.
+func Fatal(format string, v ...interface{}) {
+ // https://github.com/nxadm/log/blob/master/log.go#L45
+ LOGGER.Output(2, fmt.Sprintf("FATAL -- "+format, v...)+"\n"+string(debug.Stack()))
+ os.Exit(1)
+}
+
+// partitionString partitions the string into chunks of given size,
+// with the last chunk of variable size.
+func PartitionString(s string, chunkSize int) []string {
+ if chunkSize <= 0 {
+ panic("invalid chunkSize")
+ }
+ length := len(s)
+ chunks := 1 + length/chunkSize
+ start := 0
+ end := chunkSize
+ parts := make([]string, 0, chunks)
+ for {
+ if end > length {
+ end = length
+ }
+ parts = append(parts, s[start:end])
+ if end == length {
+ break
+ }
+ start, end = end, end+chunkSize
+ }
+ return parts
+}
diff --git a/vendor/github.com/nxadm/tail/watch/filechanges.go b/vendor/github.com/nxadm/tail/watch/filechanges.go
new file mode 100644
index 000000000..f80aead9a
--- /dev/null
+++ b/vendor/github.com/nxadm/tail/watch/filechanges.go
@@ -0,0 +1,36 @@
+package watch
+
+type FileChanges struct {
+ Modified chan bool // Channel to get notified of modifications
+ Truncated chan bool // Channel to get notified of truncations
+ Deleted chan bool // Channel to get notified of deletions/renames
+}
+
+func NewFileChanges() *FileChanges {
+ return &FileChanges{
+ make(chan bool, 1), make(chan bool, 1), make(chan bool, 1)}
+}
+
+func (fc *FileChanges) NotifyModified() {
+ sendOnlyIfEmpty(fc.Modified)
+}
+
+func (fc *FileChanges) NotifyTruncated() {
+ sendOnlyIfEmpty(fc.Truncated)
+}
+
+func (fc *FileChanges) NotifyDeleted() {
+ sendOnlyIfEmpty(fc.Deleted)
+}
+
+// sendOnlyIfEmpty sends on a bool channel only if the channel has no
+// backlog to be read by other goroutines. This concurrency pattern
+// can be used to notify other goroutines if and only if they are
+// looking for it (i.e., subsequent notifications can be compressed
+// into one).
+func sendOnlyIfEmpty(ch chan bool) {
+ select {
+ case ch <- true:
+ default:
+ }
+}
diff --git a/vendor/github.com/nxadm/tail/watch/inotify.go b/vendor/github.com/nxadm/tail/watch/inotify.go
new file mode 100644
index 000000000..439921810
--- /dev/null
+++ b/vendor/github.com/nxadm/tail/watch/inotify.go
@@ -0,0 +1,135 @@
+// Copyright (c) 2015 HPE Software Inc. All rights reserved.
+// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
+
+package watch
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/nxadm/tail/util"
+
+ "github.com/fsnotify/fsnotify"
+ "gopkg.in/tomb.v1"
+)
+
+// InotifyFileWatcher uses inotify to monitor file changes.
+type InotifyFileWatcher struct {
+ Filename string
+ Size int64
+}
+
+func NewInotifyFileWatcher(filename string) *InotifyFileWatcher {
+ fw := &InotifyFileWatcher{filepath.Clean(filename), 0}
+ return fw
+}
+
+func (fw *InotifyFileWatcher) BlockUntilExists(t *tomb.Tomb) error {
+ err := WatchCreate(fw.Filename)
+ if err != nil {
+ return err
+ }
+ defer RemoveWatchCreate(fw.Filename)
+
+ // Do a real check now as the file might have been created before
+ // calling `WatchFlags` above.
+ if _, err = os.Stat(fw.Filename); !os.IsNotExist(err) {
+ // file exists, or stat returned an error.
+ return err
+ }
+
+ events := Events(fw.Filename)
+
+ for {
+ select {
+ case evt, ok := <-events:
+ if !ok {
+ return fmt.Errorf("inotify watcher has been closed")
+ }
+ evtName, err := filepath.Abs(evt.Name)
+ if err != nil {
+ return err
+ }
+ fwFilename, err := filepath.Abs(fw.Filename)
+ if err != nil {
+ return err
+ }
+ if evtName == fwFilename {
+ return nil
+ }
+ case <-t.Dying():
+ return tomb.ErrDying
+ }
+ }
+ panic("unreachable")
+}
+
+func (fw *InotifyFileWatcher) ChangeEvents(t *tomb.Tomb, pos int64) (*FileChanges, error) {
+ err := Watch(fw.Filename)
+ if err != nil {
+ return nil, err
+ }
+
+ changes := NewFileChanges()
+ fw.Size = pos
+
+ go func() {
+
+ events := Events(fw.Filename)
+
+ for {
+ prevSize := fw.Size
+
+ var evt fsnotify.Event
+ var ok bool
+
+ select {
+ case evt, ok = <-events:
+ if !ok {
+ RemoveWatch(fw.Filename)
+ return
+ }
+ case <-t.Dying():
+ RemoveWatch(fw.Filename)
+ return
+ }
+
+ switch {
+ case evt.Op&fsnotify.Remove == fsnotify.Remove:
+ fallthrough
+
+ case evt.Op&fsnotify.Rename == fsnotify.Rename:
+ RemoveWatch(fw.Filename)
+ changes.NotifyDeleted()
+ return
+
+ //With an open fd, unlink(fd) - inotify returns IN_ATTRIB (==fsnotify.Chmod)
+ case evt.Op&fsnotify.Chmod == fsnotify.Chmod:
+ fallthrough
+
+ case evt.Op&fsnotify.Write == fsnotify.Write:
+ fi, err := os.Stat(fw.Filename)
+ if err != nil {
+ if os.IsNotExist(err) {
+ RemoveWatch(fw.Filename)
+ changes.NotifyDeleted()
+ return
+ }
+ // XXX: report this error back to the user
+ util.Fatal("Failed to stat file %v: %v", fw.Filename, err)
+ }
+ fw.Size = fi.Size()
+
+ if prevSize > 0 && prevSize > fw.Size {
+ changes.NotifyTruncated()
+ } else {
+ changes.NotifyModified()
+ }
+ prevSize = fw.Size
+ }
+ }
+ }()
+
+ return changes, nil
+}
diff --git a/vendor/github.com/nxadm/tail/watch/inotify_tracker.go b/vendor/github.com/nxadm/tail/watch/inotify_tracker.go
new file mode 100644
index 000000000..a94bcd4cb
--- /dev/null
+++ b/vendor/github.com/nxadm/tail/watch/inotify_tracker.go
@@ -0,0 +1,248 @@
+// Copyright (c) 2015 HPE Software Inc. All rights reserved.
+// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
+
+package watch
+
+import (
+ "log"
+ "os"
+ "path/filepath"
+ "sync"
+ "syscall"
+
+ "github.com/nxadm/tail/util"
+
+ "github.com/fsnotify/fsnotify"
+)
+
+type InotifyTracker struct {
+ mux sync.Mutex
+ watcher *fsnotify.Watcher
+ chans map[string]chan fsnotify.Event
+ done map[string]chan bool
+ watchNums map[string]int
+ watch chan *watchInfo
+ remove chan *watchInfo
+ error chan error
+}
+
+type watchInfo struct {
+ op fsnotify.Op
+ fname string
+}
+
+func (this *watchInfo) isCreate() bool {
+ return this.op == fsnotify.Create
+}
+
+var (
+ // globally shared InotifyTracker; ensures only one fsnotify.Watcher is used
+ shared *InotifyTracker
+
+ // these are used to ensure the shared InotifyTracker is run exactly once
+ once = sync.Once{}
+ goRun = func() {
+ shared = &InotifyTracker{
+ mux: sync.Mutex{},
+ chans: make(map[string]chan fsnotify.Event),
+ done: make(map[string]chan bool),
+ watchNums: make(map[string]int),
+ watch: make(chan *watchInfo),
+ remove: make(chan *watchInfo),
+ error: make(chan error),
+ }
+ go shared.run()
+ }
+
+ logger = log.New(os.Stderr, "", log.LstdFlags)
+)
+
+// Watch signals the run goroutine to begin watching the input filename
+func Watch(fname string) error {
+ return watch(&watchInfo{
+ fname: fname,
+ })
+}
+
+// Watch create signals the run goroutine to begin watching the input filename
+// if call the WatchCreate function, don't call the Cleanup, call the RemoveWatchCreate
+func WatchCreate(fname string) error {
+ return watch(&watchInfo{
+ op: fsnotify.Create,
+ fname: fname,
+ })
+}
+
+func watch(winfo *watchInfo) error {
+ // start running the shared InotifyTracker if not already running
+ once.Do(goRun)
+
+ winfo.fname = filepath.Clean(winfo.fname)
+ shared.watch <- winfo
+ return <-shared.error
+}
+
+// RemoveWatch signals the run goroutine to remove the watch for the input filename
+func RemoveWatch(fname string) error {
+ return remove(&watchInfo{
+ fname: fname,
+ })
+}
+
+// RemoveWatch create signals the run goroutine to remove the watch for the input filename
+func RemoveWatchCreate(fname string) error {
+ return remove(&watchInfo{
+ op: fsnotify.Create,
+ fname: fname,
+ })
+}
+
+func remove(winfo *watchInfo) error {
+ // start running the shared InotifyTracker if not already running
+ once.Do(goRun)
+
+ winfo.fname = filepath.Clean(winfo.fname)
+ shared.mux.Lock()
+ done := shared.done[winfo.fname]
+ if done != nil {
+ delete(shared.done, winfo.fname)
+ close(done)
+ }
+ shared.mux.Unlock()
+
+ shared.remove <- winfo
+ return <-shared.error
+}
+
+// Events returns a channel to which FileEvents corresponding to the input filename
+// will be sent. This channel will be closed when removeWatch is called on this
+// filename.
+func Events(fname string) <-chan fsnotify.Event {
+ shared.mux.Lock()
+ defer shared.mux.Unlock()
+
+ return shared.chans[fname]
+}
+
+// Cleanup removes the watch for the input filename if necessary.
+func Cleanup(fname string) error {
+ return RemoveWatch(fname)
+}
+
+// watchFlags calls fsnotify.WatchFlags for the input filename and flags, creating
+// a new Watcher if the previous Watcher was closed.
+func (shared *InotifyTracker) addWatch(winfo *watchInfo) error {
+ shared.mux.Lock()
+ defer shared.mux.Unlock()
+
+ if shared.chans[winfo.fname] == nil {
+ shared.chans[winfo.fname] = make(chan fsnotify.Event)
+ }
+ if shared.done[winfo.fname] == nil {
+ shared.done[winfo.fname] = make(chan bool)
+ }
+
+ fname := winfo.fname
+ if winfo.isCreate() {
+ // Watch for new files to be created in the parent directory.
+ fname = filepath.Dir(fname)
+ }
+
+ var err error
+ // already in inotify watch
+ if shared.watchNums[fname] == 0 {
+ err = shared.watcher.Add(fname)
+ }
+ if err == nil {
+ shared.watchNums[fname]++
+ }
+ return err
+}
+
+// removeWatch calls fsnotify.RemoveWatch for the input filename and closes the
+// corresponding events channel.
+func (shared *InotifyTracker) removeWatch(winfo *watchInfo) error {
+ shared.mux.Lock()
+
+ ch := shared.chans[winfo.fname]
+ if ch != nil {
+ delete(shared.chans, winfo.fname)
+ close(ch)
+ }
+
+ fname := winfo.fname
+ if winfo.isCreate() {
+ // Watch for new files to be created in the parent directory.
+ fname = filepath.Dir(fname)
+ }
+ shared.watchNums[fname]--
+ watchNum := shared.watchNums[fname]
+ if watchNum == 0 {
+ delete(shared.watchNums, fname)
+ }
+ shared.mux.Unlock()
+
+ var err error
+ // If we were the last ones to watch this file, unsubscribe from inotify.
+ // This needs to happen after releasing the lock because fsnotify waits
+ // synchronously for the kernel to acknowledge the removal of the watch
+ // for this file, which causes us to deadlock if we still held the lock.
+ if watchNum == 0 {
+ err = shared.watcher.Remove(fname)
+ }
+
+ return err
+}
+
+// sendEvent sends the input event to the appropriate Tail.
+func (shared *InotifyTracker) sendEvent(event fsnotify.Event) {
+ name := filepath.Clean(event.Name)
+
+ shared.mux.Lock()
+ ch := shared.chans[name]
+ done := shared.done[name]
+ shared.mux.Unlock()
+
+ if ch != nil && done != nil {
+ select {
+ case ch <- event:
+ case <-done:
+ }
+ }
+}
+
+// run starts the goroutine in which the shared struct reads events from its
+// Watcher's Event channel and sends the events to the appropriate Tail.
+func (shared *InotifyTracker) run() {
+ watcher, err := fsnotify.NewWatcher()
+ if err != nil {
+ util.Fatal("failed to create Watcher")
+ }
+ shared.watcher = watcher
+
+ for {
+ select {
+ case winfo := <-shared.watch:
+ shared.error <- shared.addWatch(winfo)
+
+ case winfo := <-shared.remove:
+ shared.error <- shared.removeWatch(winfo)
+
+ case event, open := <-shared.watcher.Events:
+ if !open {
+ return
+ }
+ shared.sendEvent(event)
+
+ case err, open := <-shared.watcher.Errors:
+ if !open {
+ return
+ } else if err != nil {
+ sysErr, ok := err.(*os.SyscallError)
+ if !ok || sysErr.Err != syscall.EINTR {
+ logger.Printf("Error in Watcher Error channel: %s", err)
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/nxadm/tail/watch/polling.go b/vendor/github.com/nxadm/tail/watch/polling.go
new file mode 100644
index 000000000..fb1706908
--- /dev/null
+++ b/vendor/github.com/nxadm/tail/watch/polling.go
@@ -0,0 +1,118 @@
+// Copyright (c) 2015 HPE Software Inc. All rights reserved.
+// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
+
+package watch
+
+import (
+ "os"
+ "runtime"
+ "time"
+
+ "github.com/nxadm/tail/util"
+ "gopkg.in/tomb.v1"
+)
+
+// PollingFileWatcher polls the file for changes.
+type PollingFileWatcher struct {
+ Filename string
+ Size int64
+}
+
+func NewPollingFileWatcher(filename string) *PollingFileWatcher {
+ fw := &PollingFileWatcher{filename, 0}
+ return fw
+}
+
+var POLL_DURATION time.Duration
+
+func (fw *PollingFileWatcher) BlockUntilExists(t *tomb.Tomb) error {
+ for {
+ if _, err := os.Stat(fw.Filename); err == nil {
+ return nil
+ } else if !os.IsNotExist(err) {
+ return err
+ }
+ select {
+ case <-time.After(POLL_DURATION):
+ continue
+ case <-t.Dying():
+ return tomb.ErrDying
+ }
+ }
+ panic("unreachable")
+}
+
+func (fw *PollingFileWatcher) ChangeEvents(t *tomb.Tomb, pos int64) (*FileChanges, error) {
+ origFi, err := os.Stat(fw.Filename)
+ if err != nil {
+ return nil, err
+ }
+
+ changes := NewFileChanges()
+ var prevModTime time.Time
+
+ // XXX: use tomb.Tomb to cleanly manage these goroutines. replace
+ // the fatal (below) with tomb's Kill.
+
+ fw.Size = pos
+
+ go func() {
+ prevSize := fw.Size
+ for {
+ select {
+ case <-t.Dying():
+ return
+ default:
+ }
+
+ time.Sleep(POLL_DURATION)
+ fi, err := os.Stat(fw.Filename)
+ if err != nil {
+ // Windows cannot delete a file if a handle is still open (tail keeps one open)
+ // so it gives access denied to anything trying to read it until all handles are released.
+ if os.IsNotExist(err) || (runtime.GOOS == "windows" && os.IsPermission(err)) {
+ // File does not exist (has been deleted).
+ changes.NotifyDeleted()
+ return
+ }
+
+ // XXX: report this error back to the user
+ util.Fatal("Failed to stat file %v: %v", fw.Filename, err)
+ }
+
+ // File got moved/renamed?
+ if !os.SameFile(origFi, fi) {
+ changes.NotifyDeleted()
+ return
+ }
+
+ // File got truncated?
+ fw.Size = fi.Size()
+ if prevSize > 0 && prevSize > fw.Size {
+ changes.NotifyTruncated()
+ prevSize = fw.Size
+ continue
+ }
+ // File got bigger?
+ if prevSize > 0 && prevSize < fw.Size {
+ changes.NotifyModified()
+ prevSize = fw.Size
+ continue
+ }
+ prevSize = fw.Size
+
+ // File was appended to (changed)?
+ modTime := fi.ModTime()
+ if modTime != prevModTime {
+ prevModTime = modTime
+ changes.NotifyModified()
+ }
+ }
+ }()
+
+ return changes, nil
+}
+
+func init() {
+ POLL_DURATION = 250 * time.Millisecond
+}
diff --git a/vendor/github.com/nxadm/tail/watch/watch.go b/vendor/github.com/nxadm/tail/watch/watch.go
new file mode 100644
index 000000000..2e1783ef0
--- /dev/null
+++ b/vendor/github.com/nxadm/tail/watch/watch.go
@@ -0,0 +1,20 @@
+// Copyright (c) 2015 HPE Software Inc. All rights reserved.
+// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
+
+package watch
+
+import "gopkg.in/tomb.v1"
+
+// FileWatcher monitors file-level events.
+type FileWatcher interface {
+ // BlockUntilExists blocks until the file comes into existence.
+ BlockUntilExists(*tomb.Tomb) error
+
+ // ChangeEvents reports on changes to a file, be it modification,
+ // deletion, renames or truncations. Returned FileChanges group of
+ // channels will be closed, thus become unusable, after a deletion
+ // or truncation event.
+ // In order to properly report truncations, ChangeEvents requires
+ // the caller to pass their current offset in the file.
+ ChangeEvents(*tomb.Tomb, int64) (*FileChanges, error)
+}
diff --git a/vendor/github.com/nxadm/tail/winfile/winfile.go b/vendor/github.com/nxadm/tail/winfile/winfile.go
new file mode 100644
index 000000000..aa7e7bc5d
--- /dev/null
+++ b/vendor/github.com/nxadm/tail/winfile/winfile.go
@@ -0,0 +1,92 @@
+// +build windows
+
+package winfile
+
+import (
+ "os"
+ "syscall"
+ "unsafe"
+)
+
+// issue also described here
+//https://codereview.appspot.com/8203043/
+
+// https://github.com/jnwhiteh/golang/blob/master/src/pkg/syscall/syscall_windows.go#L218
+func Open(path string, mode int, perm uint32) (fd syscall.Handle, err error) {
+ if len(path) == 0 {
+ return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND
+ }
+ pathp, err := syscall.UTF16PtrFromString(path)
+ if err != nil {
+ return syscall.InvalidHandle, err
+ }
+ var access uint32
+ switch mode & (syscall.O_RDONLY | syscall.O_WRONLY | syscall.O_RDWR) {
+ case syscall.O_RDONLY:
+ access = syscall.GENERIC_READ
+ case syscall.O_WRONLY:
+ access = syscall.GENERIC_WRITE
+ case syscall.O_RDWR:
+ access = syscall.GENERIC_READ | syscall.GENERIC_WRITE
+ }
+ if mode&syscall.O_CREAT != 0 {
+ access |= syscall.GENERIC_WRITE
+ }
+ if mode&syscall.O_APPEND != 0 {
+ access &^= syscall.GENERIC_WRITE
+ access |= syscall.FILE_APPEND_DATA
+ }
+ sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE | syscall.FILE_SHARE_DELETE)
+ var sa *syscall.SecurityAttributes
+ if mode&syscall.O_CLOEXEC == 0 {
+ sa = makeInheritSa()
+ }
+ var createmode uint32
+ switch {
+ case mode&(syscall.O_CREAT|syscall.O_EXCL) == (syscall.O_CREAT | syscall.O_EXCL):
+ createmode = syscall.CREATE_NEW
+ case mode&(syscall.O_CREAT|syscall.O_TRUNC) == (syscall.O_CREAT | syscall.O_TRUNC):
+ createmode = syscall.CREATE_ALWAYS
+ case mode&syscall.O_CREAT == syscall.O_CREAT:
+ createmode = syscall.OPEN_ALWAYS
+ case mode&syscall.O_TRUNC == syscall.O_TRUNC:
+ createmode = syscall.TRUNCATE_EXISTING
+ default:
+ createmode = syscall.OPEN_EXISTING
+ }
+ h, e := syscall.CreateFile(pathp, access, sharemode, sa, createmode, syscall.FILE_ATTRIBUTE_NORMAL, 0)
+ return h, e
+}
+
+// https://github.com/jnwhiteh/golang/blob/master/src/pkg/syscall/syscall_windows.go#L211
+func makeInheritSa() *syscall.SecurityAttributes {
+ var sa syscall.SecurityAttributes
+ sa.Length = uint32(unsafe.Sizeof(sa))
+ sa.InheritHandle = 1
+ return &sa
+}
+
+// https://github.com/jnwhiteh/golang/blob/master/src/pkg/os/file_windows.go#L133
+func OpenFile(name string, flag int, perm os.FileMode) (file *os.File, err error) {
+ r, e := Open(name, flag|syscall.O_CLOEXEC, syscallMode(perm))
+ if e != nil {
+ return nil, e
+ }
+ return os.NewFile(uintptr(r), name), nil
+}
+
+// https://github.com/jnwhiteh/golang/blob/master/src/pkg/os/file_posix.go#L61
+func syscallMode(i os.FileMode) (o uint32) {
+ o |= uint32(i.Perm())
+ if i&os.ModeSetuid != 0 {
+ o |= syscall.S_ISUID
+ }
+ if i&os.ModeSetgid != 0 {
+ o |= syscall.S_ISGID
+ }
+ if i&os.ModeSticky != 0 {
+ o |= syscall.S_ISVTX
+ }
+ // No mapping for Go's ModeTemporary (plan9 only).
+ return
+}
diff --git a/vendor/github.com/onsi/ginkgo/.travis.yml b/vendor/github.com/onsi/ginkgo/.travis.yml
index 65dc3002b..079af2431 100644
--- a/vendor/github.com/onsi/ginkgo/.travis.yml
+++ b/vendor/github.com/onsi/ginkgo/.travis.yml
@@ -1,7 +1,7 @@
language: go
go:
- - 1.12.x
- 1.13.x
+ - 1.14.x
- tip
cache:
diff --git a/vendor/github.com/onsi/ginkgo/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/CHANGELOG.md
index 84b479404..52d8ca36d 100644
--- a/vendor/github.com/onsi/ginkgo/CHANGELOG.md
+++ b/vendor/github.com/onsi/ginkgo/CHANGELOG.md
@@ -1,3 +1,27 @@
+## 1.12.2
+
+### Fixes
+- Update dependencies [ea4a036]
+
+## 1.12.1
+
+### Fixes
+- Make unfocus ("blur") much faster (#674) [8b18061]
+- Fix typo (#673) [7fdcbe8]
+- Test against 1.14 and remove 1.12 [d5c2ad6]
+- Test if a coverprofile content is empty before checking its latest character (#670) [14d9fa2]
+- replace tail package with maintained one. this fixes go get errors (#667) [4ba33d4]
+- improve ginkgo performance - makes progress on #644 [a14f98e]
+- fix convert integration tests [1f8ba69]
+- fix typo succesful -> successful (#663) [1ea49cf]
+- Fix invalid link (#658) [b886136]
+- convert utility : Include comments from source (#657) [1077c6d]
+- Explain what BDD means [d79e7fb]
+- skip race detector test on unsupported platform (#642) [f8ab89d]
+- Use Dup2 from golang.org/x/sys/unix instead of syscallDup (#638) [5d53c55]
+- Fix missing newline in combined coverage file (#641) [6a07ea2]
+- check if a spec is run before returning SpecSummary (#645) [8850000]
+
## 1.12.0
### Features
@@ -208,7 +232,7 @@ New Features:
- `ginkgo -tags=TAG_LIST` passes a list of tags down to the `go build` command.
- `ginkgo --failFast` aborts the test suite after the first failure.
- `ginkgo generate file_1 file_2` can take multiple file arguments.
-- Ginkgo now summarizes any spec failures that occured at the end of the test run.
+- Ginkgo now summarizes any spec failures that occurred at the end of the test run.
- `ginkgo --randomizeSuites` will run tests *suites* in random order using the generated/passed-in seed.
Improvements:
diff --git a/vendor/github.com/onsi/ginkgo/README.md b/vendor/github.com/onsi/ginkgo/README.md
index cdf8d054a..2fda7b9eb 100644
--- a/vendor/github.com/onsi/ginkgo/README.md
+++ b/vendor/github.com/onsi/ginkgo/README.md
@@ -1,29 +1,29 @@
-![Ginkgo: A Go BDD Testing Framework](http://onsi.github.io/ginkgo/images/ginkgo.png)
+![Ginkgo: A Go BDD Testing Framework](https://onsi.github.io/ginkgo/images/ginkgo.png)
[![Build Status](https://travis-ci.org/onsi/ginkgo.svg?branch=master)](https://travis-ci.org/onsi/ginkgo)
-Jump to the [docs](http://onsi.github.io/ginkgo/) to learn more. To start rolling your Ginkgo tests *now* [keep reading](#set-me-up)!
+Jump to the [docs](https://onsi.github.io/ginkgo/) to learn more. To start rolling your Ginkgo tests *now* [keep reading](#set-me-up)!
If you have a question, comment, bug report, feature request, etc. please open a GitHub issue.
## Feature List
-- Ginkgo uses Go's `testing` package and can live alongside your existing `testing` tests. It's easy to [bootstrap](http://onsi.github.io/ginkgo/#bootstrapping-a-suite) and start writing your [first tests](http://onsi.github.io/ginkgo/#adding-specs-to-a-suite)
+- Ginkgo uses Go's `testing` package and can live alongside your existing `testing` tests. It's easy to [bootstrap](https://onsi.github.io/ginkgo/#bootstrapping-a-suite) and start writing your [first tests](https://onsi.github.io/ginkgo/#adding-specs-to-a-suite)
-- Structure your BDD-style tests expressively:
- - Nestable [`Describe`, `Context` and `When` container blocks](http://onsi.github.io/ginkgo/#organizing-specs-with-containers-describe-and-context)
- - [`BeforeEach` and `AfterEach` blocks](http://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach) for setup and teardown
- - [`It` and `Specify` blocks](http://onsi.github.io/ginkgo/#individual-specs-) that hold your assertions
- - [`JustBeforeEach` blocks](http://onsi.github.io/ginkgo/#separating-creation-and-configuration-justbeforeeach) that separate creation from configuration (also known as the subject action pattern).
- - [`BeforeSuite` and `AfterSuite` blocks](http://onsi.github.io/ginkgo/#global-setup-and-teardown-beforesuite-and-aftersuite) to prep for and cleanup after a suite.
+- Ginkgo allows you to write tests in Go using expressive [Behavior-Driven Development](https://en.wikipedia.org/wiki/Behavior-driven_development) ("BDD") style:
+ - Nestable [`Describe`, `Context` and `When` container blocks](https://onsi.github.io/ginkgo/#organizing-specs-with-containers-describe-and-context)
+ - [`BeforeEach` and `AfterEach` blocks](https://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach) for setup and teardown
+ - [`It` and `Specify` blocks](https://onsi.github.io/ginkgo/#individual-specs-it) that hold your assertions
+ - [`JustBeforeEach` blocks](https://onsi.github.io/ginkgo/#separating-creation-and-configuration-justbeforeeach) that separate creation from configuration (also known as the subject action pattern).
+ - [`BeforeSuite` and `AfterSuite` blocks](https://onsi.github.io/ginkgo/#global-setup-and-teardown-beforesuite-and-aftersuite) to prep for and cleanup after a suite.
- A comprehensive test runner that lets you:
- - Mark specs as [pending](http://onsi.github.io/ginkgo/#pending-specs)
- - [Focus](http://onsi.github.io/ginkgo/#focused-specs) individual specs, and groups of specs, either programmatically or on the command line
- - Run your tests in [random order](http://onsi.github.io/ginkgo/#spec-permutation), and then reuse random seeds to replicate the same order.
- - Break up your test suite into parallel processes for straightforward [test parallelization](http://onsi.github.io/ginkgo/#parallel-specs)
+ - Mark specs as [pending](https://onsi.github.io/ginkgo/#pending-specs)
+ - [Focus](https://onsi.github.io/ginkgo/#focused-specs) individual specs, and groups of specs, either programmatically or on the command line
+ - Run your tests in [random order](https://onsi.github.io/ginkgo/#spec-permutation), and then reuse random seeds to replicate the same order.
+ - Break up your test suite into parallel processes for straightforward [test parallelization](https://onsi.github.io/ginkgo/#parallel-specs)
-- `ginkgo`: a command line interface with plenty of handy command line arguments for [running your tests](http://onsi.github.io/ginkgo/#running-tests) and [generating](http://onsi.github.io/ginkgo/#generators) test files. Here are a few choice examples:
+- `ginkgo`: a command line interface with plenty of handy command line arguments for [running your tests](https://onsi.github.io/ginkgo/#running-tests) and [generating](https://onsi.github.io/ginkgo/#generators) test files. Here are a few choice examples:
- `ginkgo -nodes=N` runs your tests in `N` parallel processes and print out coherent output in realtime
- `ginkgo -cover` runs your tests using Go's code coverage tool
- `ginkgo convert` converts an XUnit-style `testing` package to a Ginkgo-style package
@@ -37,27 +37,27 @@ If you have a question, comment, bug report, feature request, etc. please open a
- `ginkgo watch` [watches](https://onsi.github.io/ginkgo/#watching-for-changes) packages *and their dependencies* for changes, then reruns tests. Run tests immediately as you develop!
-- Built-in support for testing [asynchronicity](http://onsi.github.io/ginkgo/#asynchronous-tests)
+- Built-in support for testing [asynchronicity](https://onsi.github.io/ginkgo/#asynchronous-tests)
-- Built-in support for [benchmarking](http://onsi.github.io/ginkgo/#benchmark-tests) your code. Control the number of benchmark samples as you gather runtimes and other, arbitrary, bits of numerical information about your code.
+- Built-in support for [benchmarking](https://onsi.github.io/ginkgo/#benchmark-tests) your code. Control the number of benchmark samples as you gather runtimes and other, arbitrary, bits of numerical information about your code.
- [Completions for Sublime Text](https://github.com/onsi/ginkgo-sublime-completions): just use [Package Control](https://sublime.wbond.net/) to install `Ginkgo Completions`.
- [Completions for VSCode](https://github.com/onsi/vscode-ginkgo): just use VSCode's extension installer to install `vscode-ginkgo`.
-- Straightforward support for third-party testing libraries such as [Gomock](https://code.google.com/p/gomock/) and [Testify](https://github.com/stretchr/testify). Check out the [docs](http://onsi.github.io/ginkgo/#third-party-integrations) for details.
+- Straightforward support for third-party testing libraries such as [Gomock](https://code.google.com/p/gomock/) and [Testify](https://github.com/stretchr/testify). Check out the [docs](https://onsi.github.io/ginkgo/#third-party-integrations) for details.
- A modular architecture that lets you easily:
- - Write [custom reporters](http://onsi.github.io/ginkgo/#writing-custom-reporters) (for example, Ginkgo comes with a [JUnit XML reporter](http://onsi.github.io/ginkgo/#generating-junit-xml-output) and a TeamCity reporter).
- - [Adapt an existing matcher library (or write your own!)](http://onsi.github.io/ginkgo/#using-other-matcher-libraries) to work with Ginkgo
+ - Write [custom reporters](https://onsi.github.io/ginkgo/#writing-custom-reporters) (for example, Ginkgo comes with a [JUnit XML reporter](https://onsi.github.io/ginkgo/#generating-junit-xml-output) and a TeamCity reporter).
+ - [Adapt an existing matcher library (or write your own!)](https://onsi.github.io/ginkgo/#using-other-matcher-libraries) to work with Ginkgo
-## [Gomega](http://github.com/onsi/gomega): Ginkgo's Preferred Matcher Library
+## [Gomega](https://github.com/onsi/gomega): Ginkgo's Preferred Matcher Library
-Ginkgo is best paired with Gomega. Learn more about Gomega [here](http://onsi.github.io/gomega/)
+Ginkgo is best paired with Gomega. Learn more about Gomega [here](https://onsi.github.io/gomega/)
-## [Agouti](http://github.com/sclevine/agouti): A Go Acceptance Testing Framework
+## [Agouti](https://github.com/sclevine/agouti): A Go Acceptance Testing Framework
-Agouti allows you run WebDriver integration tests. Learn more about Agouti [here](http://agouti.org)
+Agouti allows you run WebDriver integration tests. Learn more about Agouti [here](https://agouti.org)
## Set Me Up!
@@ -87,16 +87,16 @@ With that said, it's great to know what your options are :)
### What Go gives you out of the box
-Testing is a first class citizen in Go, however Go's built-in testing primitives are somewhat limited: The [testing](http://golang.org/pkg/testing) package provides basic XUnit style tests and no assertion library.
+Testing is a first class citizen in Go, however Go's built-in testing primitives are somewhat limited: The [testing](https://golang.org/pkg/testing) package provides basic XUnit style tests and no assertion library.
### Matcher libraries for Go's XUnit style tests
A number of matcher libraries have been written to augment Go's built-in XUnit style tests. Here are two that have gained traction:
- [testify](https://github.com/stretchr/testify)
-- [gocheck](http://labix.org/gocheck)
+- [gocheck](https://labix.org/gocheck)
-You can also use Ginkgo's matcher library [Gomega](https://github.com/onsi/gomega) in [XUnit style tests](http://onsi.github.io/gomega/#using-gomega-with-golangs-xunitstyle-tests)
+You can also use Ginkgo's matcher library [Gomega](https://github.com/onsi/gomega) in [XUnit style tests](https://onsi.github.io/gomega/#using-gomega-with-golangs-xunitstyle-tests)
### BDD style testing frameworks
diff --git a/vendor/github.com/onsi/ginkgo/config/config.go b/vendor/github.com/onsi/ginkgo/config/config.go
index 949f8130f..342645022 100644
--- a/vendor/github.com/onsi/ginkgo/config/config.go
+++ b/vendor/github.com/onsi/ginkgo/config/config.go
@@ -20,7 +20,7 @@ import (
"fmt"
)
-const VERSION = "1.12.0"
+const VERSION = "1.12.2"
type GinkgoConfigType struct {
RandomSeed int64
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/build_command.go b/vendor/github.com/onsi/ginkgo/ginkgo/build_command.go
index f0eb375c3..2fddef0f7 100644
--- a/vendor/github.com/onsi/ginkgo/ginkgo/build_command.go
+++ b/vendor/github.com/onsi/ginkgo/ginkgo/build_command.go
@@ -57,8 +57,6 @@ func (r *SpecBuilder) BuildSpecs(args []string, additionalArgs []string) {
} else {
fmt.Printf(" compiled %s.test\n", suite.PackageName)
}
-
- runner.CleanUp()
}
if passed {
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/convert/package_rewriter.go b/vendor/github.com/onsi/ginkgo/ginkgo/convert/package_rewriter.go
index 5e00d5618..363e52fe2 100644
--- a/vendor/github.com/onsi/ginkgo/ginkgo/convert/package_rewriter.go
+++ b/vendor/github.com/onsi/ginkgo/ginkgo/convert/package_rewriter.go
@@ -33,7 +33,8 @@ func RewritePackage(packageName string) {
*/
func findTestsInPackage(pkg *build.Package) (testfiles []string) {
for _, file := range append(pkg.TestGoFiles, pkg.XTestGoFiles...) {
- testfiles = append(testfiles, filepath.Join(pkg.Dir, file))
+ testfile, _ := filepath.Abs(filepath.Join(pkg.Dir, file))
+ testfiles = append(testfiles, testfile)
}
dirFiles, err := ioutil.ReadDir(pkg.Dir)
@@ -103,10 +104,11 @@ func addGinkgoSuiteForPackage(pkg *build.Package) {
* Shells out to `go fmt` to format the package
*/
func goFmtPackage(pkg *build.Package) {
- output, err := exec.Command("go", "fmt", pkg.ImportPath).Output()
+ path, _ := filepath.Abs(pkg.ImportPath)
+ output, err := exec.Command("go", "fmt", path).CombinedOutput()
if err != nil {
- fmt.Printf("Warning: Error running 'go fmt %s'.\nstdout: %s\n%s\n", pkg.ImportPath, output, err.Error())
+ fmt.Printf("Warning: Error running 'go fmt %s'.\nstdout: %s\n%s\n", path, output, err.Error())
}
}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/convert/testfile_rewriter.go b/vendor/github.com/onsi/ginkgo/ginkgo/convert/testfile_rewriter.go
index d415050ef..60c73504a 100644
--- a/vendor/github.com/onsi/ginkgo/ginkgo/convert/testfile_rewriter.go
+++ b/vendor/github.com/onsi/ginkgo/ginkgo/convert/testfile_rewriter.go
@@ -22,7 +22,7 @@ import (
*/
func rewriteTestsInFile(pathToFile string) {
fileSet := token.NewFileSet()
- rootNode, err := parser.ParseFile(fileSet, pathToFile, nil, 0)
+ rootNode, err := parser.ParseFile(fileSet, pathToFile, nil, parser.ParseComments)
if err != nil {
panic(fmt.Sprintf("Error parsing test file '%s':\n%s\n", pathToFile, err.Error()))
}
@@ -56,11 +56,12 @@ func rewriteTestsInFile(pathToFile string) {
}
fileInfo, err := os.Stat(pathToFile)
+
if err != nil {
panic(fmt.Sprintf("Error stat'ing file: %s\n", pathToFile))
}
- ioutil.WriteFile(pathToFile, buffer.Bytes(), fileInfo.Mode())
+ err = ioutil.WriteFile(pathToFile, buffer.Bytes(), fileInfo.Mode())
}
/*
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/run_command.go b/vendor/github.com/onsi/ginkgo/ginkgo/run_command.go
index 1d06e08fd..f225d272f 100644
--- a/vendor/github.com/onsi/ginkgo/ginkgo/run_command.go
+++ b/vendor/github.com/onsi/ginkgo/ginkgo/run_command.go
@@ -179,19 +179,24 @@ func (r *SpecRunner) combineCoverprofiles(runners []*testrunner.TestRunner) erro
for index, runner := range runners {
contents, err := ioutil.ReadFile(runner.CoverageFile)
+ if err != nil {
+ fmt.Printf("Unable to read coverage file %s to combine, %v\n", runner.CoverageFile, err)
+ return nil // non-fatal error
+ }
+
// remove the cover mode line from every file
// except the first one
if index > 0 {
contents = modeRegex.ReplaceAll(contents, []byte{})
}
- if err != nil {
- fmt.Printf("Unable to read coverage file %s to combine, %v\n", runner.CoverageFile, err)
- return nil // non-fatal error
- }
-
_, err = combined.Write(contents)
+ // Add a newline to the end of every file if missing.
+ if err == nil && len(contents) > 0 && contents[len(contents)-1] != '\n' {
+ _, err = combined.Write([]byte("\n"))
+ }
+
if err != nil {
fmt.Printf("Unable to append to coverprofile, %v\n", err)
return nil // non-fatal error
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/test_runner.go b/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/test_runner.go
index 80670d24a..66c0f06f6 100644
--- a/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/test_runner.go
+++ b/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/test_runner.go
@@ -49,11 +49,7 @@ func New(suite testsuite.TestSuite, numCPU int, parallelStream bool, timeout tim
}
if !suite.Precompiled {
- dir, err := ioutil.TempDir("", "ginkgo")
- if err != nil {
- panic(fmt.Sprintf("couldn't create temporary directory... might be time to rm -rf:\n%s", err.Error()))
- }
- runner.compilationTargetPath = filepath.Join(dir, suite.PackageName+".test")
+ runner.compilationTargetPath, _ = filepath.Abs(filepath.Join(suite.Path, suite.PackageName+".test"))
}
return runner
@@ -248,7 +244,7 @@ func (t *TestRunner) CleanUp() {
if t.Suite.Precompiled {
return
}
- os.RemoveAll(filepath.Dir(t.compilationTargetPath))
+ os.Remove(t.compilationTargetPath)
}
func (t *TestRunner) runSerialGinkgoSuite() RunResult {
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/unfocus_command.go b/vendor/github.com/onsi/ginkgo/ginkgo/unfocus_command.go
index cedc2b59c..d9dfb6e44 100644
--- a/vendor/github.com/onsi/ginkgo/ginkgo/unfocus_command.go
+++ b/vendor/github.com/onsi/ginkgo/ginkgo/unfocus_command.go
@@ -1,11 +1,18 @@
package main
import (
+ "bytes"
"flag"
"fmt"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "io"
"io/ioutil"
- "os/exec"
+ "os"
+ "path/filepath"
"strings"
+ "sync"
)
func BuildUnfocusCommand() *Command {
@@ -22,40 +29,152 @@ func BuildUnfocusCommand() *Command {
}
func unfocusSpecs([]string, []string) {
- unfocus("Describe")
- unfocus("Context")
- unfocus("It")
- unfocus("Measure")
- unfocus("DescribeTable")
- unfocus("Entry")
- unfocus("Specify")
- unfocus("When")
-}
-
-func unfocus(component string) {
- fmt.Printf("Removing F%s...\n", component)
- files, err := ioutil.ReadDir(".")
+ fmt.Println("Scanning for focus...")
+
+ goFiles := make(chan string)
+ go func() {
+ unfocusDir(goFiles, ".")
+ close(goFiles)
+ }()
+
+ const workers = 10
+ wg := sync.WaitGroup{}
+ wg.Add(workers)
+
+ for i := 0; i < workers; i++ {
+ go func() {
+ for path := range goFiles {
+ unfocusFile(path)
+ }
+ wg.Done()
+ }()
+ }
+
+ wg.Wait()
+}
+
+func unfocusDir(goFiles chan string, path string) {
+ files, err := ioutil.ReadDir(path)
if err != nil {
fmt.Println(err.Error())
return
}
+
for _, f := range files {
- // Exclude "vendor" directory
- if f.IsDir() && f.Name() == "vendor" {
- continue
+ switch {
+ case f.IsDir() && shouldProcessDir(f.Name()):
+ unfocusDir(goFiles, filepath.Join(path, f.Name()))
+ case !f.IsDir() && shouldProcessFile(f.Name()):
+ goFiles <- filepath.Join(path, f.Name())
}
- // Exclude non-go files in the current directory
- if !f.IsDir() && !strings.HasSuffix(f.Name(), ".go") {
- continue
+ }
+}
+
+func shouldProcessDir(basename string) bool {
+ return basename != "vendor" && !strings.HasPrefix(basename, ".")
+}
+
+func shouldProcessFile(basename string) bool {
+ return strings.HasSuffix(basename, ".go")
+}
+
+func unfocusFile(path string) {
+ data, err := ioutil.ReadFile(path)
+ if err != nil {
+ fmt.Printf("error reading file '%s': %s\n", path, err.Error())
+ return
+ }
+
+ ast, err := parser.ParseFile(token.NewFileSet(), path, bytes.NewReader(data), 0)
+ if err != nil {
+ fmt.Printf("error parsing file '%s': %s\n", path, err.Error())
+ return
+ }
+
+ eliminations := scanForFocus(ast)
+ if len(eliminations) == 0 {
+ return
+ }
+
+ fmt.Printf("...updating %s\n", path)
+ backup, err := writeBackup(path, data)
+ if err != nil {
+ fmt.Printf("error creating backup file: %s\n", err.Error())
+ return
+ }
+
+ if err := updateFile(path, data, eliminations); err != nil {
+ fmt.Printf("error writing file '%s': %s\n", path, err.Error())
+ return
+ }
+
+ os.Remove(backup)
+}
+
+func writeBackup(path string, data []byte) (string, error) {
+ t, err := ioutil.TempFile(filepath.Dir(path), filepath.Base(path))
+
+ if err != nil {
+ return "", fmt.Errorf("error creating temporary file: %w", err)
+ }
+ defer t.Close()
+
+ if _, err := io.Copy(t, bytes.NewReader(data)); err != nil {
+ return "", fmt.Errorf("error writing to temporary file: %w", err)
+ }
+
+ return t.Name(), nil
+}
+
+func updateFile(path string, data []byte, eliminations []int64) error {
+ to, err := os.Create(path)
+ if err != nil {
+ return fmt.Errorf("error opening file for writing '%s': %w\n", path, err)
+ }
+ defer to.Close()
+
+ from := bytes.NewReader(data)
+ var cursor int64
+ for _, byteToEliminate := range eliminations {
+ if _, err := io.CopyN(to, from, byteToEliminate-cursor); err != nil {
+ return fmt.Errorf("error copying data: %w", err)
}
- // Recursively run `gofmt` otherwise
- cmd := exec.Command("gofmt", fmt.Sprintf("-r=F%s -> %s", component, component), "-w", f.Name())
- out, err := cmd.CombinedOutput()
- if err != nil {
- fmt.Println(err.Error())
+
+ cursor = byteToEliminate + 1
+
+ if _, err := from.Seek(1, io.SeekCurrent); err != nil {
+ return fmt.Errorf("error seeking to position in buffer: %w", err)
}
- if string(out) != "" {
- fmt.Println(string(out))
+ }
+
+ if _, err := io.Copy(to, from); err != nil {
+ return fmt.Errorf("error copying end data: %w", err)
+ }
+
+ return nil
+}
+
+func scanForFocus(file *ast.File) (eliminations []int64) {
+ ast.Inspect(file, func(n ast.Node) bool {
+ if c, ok := n.(*ast.CallExpr); ok {
+ if i, ok := c.Fun.(*ast.Ident); ok {
+ if isFocus(i.Name) {
+ eliminations = append(eliminations, int64(i.Pos()-file.Pos()))
+ }
+ }
}
+
+ return true
+ })
+
+ return eliminations
+}
+
+func isFocus(name string) bool {
+ switch name {
+ case "FDescribe", "FContext", "FIt", "FMeasure", "FDescribeTable", "FEntry", "FSpecify", "FWhen":
+ return true
+ default:
+ return false
}
}
diff --git a/vendor/github.com/onsi/ginkgo/go.mod b/vendor/github.com/onsi/ginkgo/go.mod
index 15a4ab571..1f7125228 100644
--- a/vendor/github.com/onsi/ginkgo/go.mod
+++ b/vendor/github.com/onsi/ginkgo/go.mod
@@ -1,9 +1,11 @@
module github.com/onsi/ginkgo
require (
- github.com/hpcloud/tail v1.0.0
- github.com/onsi/gomega v1.7.1
- golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e
+ github.com/fsnotify/fsnotify v1.4.9 // indirect
+ github.com/nxadm/tail v1.4.4
+ github.com/onsi/gomega v1.10.1
+ golang.org/x/sys v0.0.0-20200519105757-fe76b779f299
+ golang.org/x/text v0.3.2 // indirect
)
-go 1.12
+go 1.13
diff --git a/vendor/github.com/onsi/ginkgo/go.sum b/vendor/github.com/onsi/ginkgo/go.sum
index 29adce41a..2b774f3e8 100644
--- a/vendor/github.com/onsi/ginkgo/go.sum
+++ b/vendor/github.com/onsi/ginkgo/go.sum
@@ -1,21 +1,60 @@
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
+github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
+github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/gomega v1.7.1 h1:K0jcRCwNQM3vFGh1ppMtDh/+7ApJrjldlX8fA0jDTLQ=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
+github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE=
+github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7 h1:AeiKBIuRw3UomYXSbLy0Mc2dDLfdtbT/IVn4keq83P0=
+golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e h1:N7DeIrjYszNmSW409R3frPPwglRwMkXSBzwVbkOjLLA=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200519105757-fe76b779f299 h1:DYfZAGf2WMFjMxbgTjaC+2HC7NkNAQs+6Q8b9WEB/F4=
+golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
@@ -24,3 +63,5 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkep
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
+gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go b/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go
index f9ab30067..992437d9e 100644
--- a/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go
+++ b/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go
@@ -197,11 +197,11 @@ func (aggregator *Aggregator) announceSpec(specSummary *types.SpecSummary) {
switch specSummary.State {
case types.SpecStatePassed:
if specSummary.IsMeasurement {
- aggregator.stenographer.AnnounceSuccesfulMeasurement(specSummary, aggregator.config.Succinct)
+ aggregator.stenographer.AnnounceSuccessfulMeasurement(specSummary, aggregator.config.Succinct)
} else if specSummary.RunTime.Seconds() >= aggregator.config.SlowSpecThreshold {
- aggregator.stenographer.AnnounceSuccesfulSlowSpec(specSummary, aggregator.config.Succinct)
+ aggregator.stenographer.AnnounceSuccessfulSlowSpec(specSummary, aggregator.config.Succinct)
} else {
- aggregator.stenographer.AnnounceSuccesfulSpec(specSummary)
+ aggregator.stenographer.AnnounceSuccessfulSpec(specSummary)
}
case types.SpecStatePending:
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go
index ab6622a29..774967db6 100644
--- a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go
+++ b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go
@@ -7,7 +7,8 @@ import (
"io/ioutil"
"os"
- "github.com/hpcloud/tail"
+ "github.com/nxadm/tail"
+ "golang.org/x/sys/unix"
)
func NewOutputInterceptor() OutputInterceptor {
@@ -35,12 +36,10 @@ func (interceptor *outputInterceptor) StartInterceptingOutput() error {
return err
}
- // Call a function in ./syscall_dup_*.go
- // If building for everything other than linux_arm64,
- // use a "normal" syscall.Dup2(oldfd, newfd) call. If building for linux_arm64 (which doesn't have syscall.Dup2)
- // call syscall.Dup3(oldfd, newfd, 0). They are nearly identical, see: http://linux.die.net/man/2/dup3
- syscallDup(int(interceptor.redirectFile.Fd()), 1)
- syscallDup(int(interceptor.redirectFile.Fd()), 2)
+ // This might call Dup3 if the dup2 syscall is not available, e.g. on
+ // linux/arm64 or linux/riscv64
+ unix.Dup2(int(interceptor.redirectFile.Fd()), 1)
+ unix.Dup2(int(interceptor.redirectFile.Fd()), 2)
if interceptor.streamTarget != nil {
interceptor.tailer, _ = tail.TailFile(interceptor.redirectFile.Name(), tail.Config{Follow: true})
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go
deleted file mode 100644
index 9550d37b3..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// +build linux,arm64
-
-package remote
-
-import "syscall"
-
-// linux_arm64 doesn't have syscall.Dup2 which ginkgo uses, so
-// use the nearly identical syscall.Dup3 instead
-func syscallDup(oldfd int, newfd int) (err error) {
- return syscall.Dup3(oldfd, newfd, 0)
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_riscv64.go b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_riscv64.go
deleted file mode 100644
index 0d40f0a54..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_riscv64.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// +build linux,riscv64
-
-package remote
-
-import "syscall"
-
-// linux_riscv64 doesn't have syscall.Dup2 which ginkgo uses, so
-// use the nearly identical syscall.Dup3 instead
-func syscallDup(oldfd int, newfd int) (err error) {
- return syscall.Dup3(oldfd, newfd, 0)
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_solaris.go b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_solaris.go
deleted file mode 100644
index 75ef7fb78..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_solaris.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build solaris
-
-package remote
-
-import "golang.org/x/sys/unix"
-
-func syscallDup(oldfd int, newfd int) (err error) {
- return unix.Dup2(oldfd, newfd)
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go
deleted file mode 100644
index 981aa7466..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// +build !linux !arm64
-// +build !linux !riscv64
-// +build !windows
-// +build !solaris
-
-package remote
-
-import "syscall"
-
-func syscallDup(oldfd int, newfd int) (err error) {
- return syscall.Dup2(oldfd, newfd)
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/suite/suite.go b/vendor/github.com/onsi/ginkgo/internal/suite/suite.go
index 3104bbc88..34f639ee4 100644
--- a/vendor/github.com/onsi/ginkgo/internal/suite/suite.go
+++ b/vendor/github.com/onsi/ginkgo/internal/suite/suite.go
@@ -102,6 +102,9 @@ func (suite *Suite) generateSpecsIterator(description string, config config.Gink
}
func (suite *Suite) CurrentRunningSpecSummary() (*types.SpecSummary, bool) {
+ if !suite.running {
+ return nil, false
+ }
return suite.runner.CurrentSpecSummary()
}
diff --git a/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go
index c76283b46..f0c9f6141 100644
--- a/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go
+++ b/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go
@@ -57,11 +57,11 @@ func (reporter *DefaultReporter) SpecDidComplete(specSummary *types.SpecSummary)
switch specSummary.State {
case types.SpecStatePassed:
if specSummary.IsMeasurement {
- reporter.stenographer.AnnounceSuccesfulMeasurement(specSummary, reporter.config.Succinct)
+ reporter.stenographer.AnnounceSuccessfulMeasurement(specSummary, reporter.config.Succinct)
} else if specSummary.RunTime.Seconds() >= reporter.config.SlowSpecThreshold {
- reporter.stenographer.AnnounceSuccesfulSlowSpec(specSummary, reporter.config.Succinct)
+ reporter.stenographer.AnnounceSuccessfulSlowSpec(specSummary, reporter.config.Succinct)
} else {
- reporter.stenographer.AnnounceSuccesfulSpec(specSummary)
+ reporter.stenographer.AnnounceSuccessfulSpec(specSummary)
if reporter.config.ReportPassed {
reporter.stenographer.AnnounceCapturedOutput(specSummary.CapturedOutput)
}
diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go
index 98854e7d9..1aa5b9db0 100644
--- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go
+++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go
@@ -105,16 +105,16 @@ func (stenographer *FakeStenographer) AnnounceCapturedOutput(output string) {
stenographer.registerCall("AnnounceCapturedOutput", output)
}
-func (stenographer *FakeStenographer) AnnounceSuccesfulSpec(spec *types.SpecSummary) {
- stenographer.registerCall("AnnounceSuccesfulSpec", spec)
+func (stenographer *FakeStenographer) AnnounceSuccessfulSpec(spec *types.SpecSummary) {
+ stenographer.registerCall("AnnounceSuccessfulSpec", spec)
}
-func (stenographer *FakeStenographer) AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool) {
- stenographer.registerCall("AnnounceSuccesfulSlowSpec", spec, succinct)
+func (stenographer *FakeStenographer) AnnounceSuccessfulSlowSpec(spec *types.SpecSummary, succinct bool) {
+ stenographer.registerCall("AnnounceSuccessfulSlowSpec", spec, succinct)
}
-func (stenographer *FakeStenographer) AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool) {
- stenographer.registerCall("AnnounceSuccesfulMeasurement", spec, succinct)
+func (stenographer *FakeStenographer) AnnounceSuccessfulMeasurement(spec *types.SpecSummary, succinct bool) {
+ stenographer.registerCall("AnnounceSuccessfulMeasurement", spec, succinct)
}
func (stenographer *FakeStenographer) AnnouncePendingSpec(spec *types.SpecSummary, noisy bool) {
diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go
index 601c74d66..638d6fbb1 100644
--- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go
+++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go
@@ -47,9 +47,9 @@ type Stenographer interface {
AnnounceCapturedOutput(output string)
- AnnounceSuccesfulSpec(spec *types.SpecSummary)
- AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool)
- AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool)
+ AnnounceSuccessfulSpec(spec *types.SpecSummary)
+ AnnounceSuccessfulSlowSpec(spec *types.SpecSummary, succinct bool)
+ AnnounceSuccessfulMeasurement(spec *types.SpecSummary, succinct bool)
AnnouncePendingSpec(spec *types.SpecSummary, noisy bool)
AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool)
@@ -245,12 +245,12 @@ func (s *consoleStenographer) AnnounceCapturedOutput(output string) {
s.midBlock()
}
-func (s *consoleStenographer) AnnounceSuccesfulSpec(spec *types.SpecSummary) {
+func (s *consoleStenographer) AnnounceSuccessfulSpec(spec *types.SpecSummary) {
s.print(0, s.colorize(greenColor, s.denoter))
s.stream()
}
-func (s *consoleStenographer) AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool) {
+func (s *consoleStenographer) AnnounceSuccessfulSlowSpec(spec *types.SpecSummary, succinct bool) {
s.printBlockWithMessage(
s.colorize(greenColor, "%s [SLOW TEST:%.3f seconds]", s.denoter, spec.RunTime.Seconds()),
"",
@@ -259,7 +259,7 @@ func (s *consoleStenographer) AnnounceSuccesfulSlowSpec(spec *types.SpecSummary,
)
}
-func (s *consoleStenographer) AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool) {
+func (s *consoleStenographer) AnnounceSuccessfulMeasurement(spec *types.SpecSummary, succinct bool) {
s.printBlockWithMessage(
s.colorize(greenColor, "%s [MEASUREMENT]", s.denoter),
s.measurementReport(spec, succinct),
diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md
index 35adfb8d7..3aafdbcfc 100644
--- a/vendor/github.com/onsi/gomega/CHANGELOG.md
+++ b/vendor/github.com/onsi/gomega/CHANGELOG.md
@@ -1,3 +1,8 @@
+## 1.10.1
+
+### Fixes
+- Update dependencies (#389) [9f5eecd]
+
## 1.10.0
### Features
diff --git a/vendor/github.com/onsi/gomega/go.mod b/vendor/github.com/onsi/gomega/go.mod
index 1eb0dfa68..778935141 100644
--- a/vendor/github.com/onsi/gomega/go.mod
+++ b/vendor/github.com/onsi/gomega/go.mod
@@ -1,17 +1,9 @@
module github.com/onsi/gomega
require (
- github.com/fsnotify/fsnotify v1.4.7 // indirect
- github.com/golang/protobuf v1.2.0
- github.com/hpcloud/tail v1.0.0 // indirect
- github.com/onsi/ginkgo v1.6.0
- golang.org/x/net v0.0.0-20180906233101-161cd47e91fd
- golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f // indirect
- golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e // indirect
- golang.org/x/text v0.3.0 // indirect
- golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7
- gopkg.in/fsnotify.v1 v1.4.7 // indirect
- gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
- gopkg.in/yaml.v2 v2.2.4
+ github.com/golang/protobuf v1.4.2
+ github.com/onsi/ginkgo v1.12.1
+ golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7
+ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543
+ gopkg.in/yaml.v2 v2.3.0
)
-
diff --git a/vendor/github.com/onsi/gomega/go.sum b/vendor/github.com/onsi/gomega/go.sum
index b872e8a0d..610b09bee 100644
--- a/vendor/github.com/onsi/gomega/go.sum
+++ b/vendor/github.com/onsi/gomega/go.sum
@@ -2,20 +2,52 @@ github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
+github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.12.1 h1:mFwc4LvZ0xpSvDZ3E+k8Yte0hLOMxXUlP+yXtJqkYfQ=
+github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
+github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7 h1:AeiKBIuRw3UomYXSbLy0Mc2dDLfdtbT/IVn4keq83P0=
+golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e h1:N7DeIrjYszNmSW409R3frPPwglRwMkXSBzwVbkOjLLA=
+golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc=
-golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
@@ -24,3 +56,5 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkep
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
+gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go
index 65e837e20..8ff9611d5 100644
--- a/vendor/github.com/onsi/gomega/gomega_dsl.go
+++ b/vendor/github.com/onsi/gomega/gomega_dsl.go
@@ -24,7 +24,7 @@ import (
"github.com/onsi/gomega/types"
)
-const GOMEGA_VERSION = "1.10.0"
+const GOMEGA_VERSION = "1.10.1"
const nilFailHandlerPanic = `You are trying to make an assertion, but Gomega's fail handler is nil.
If you're using Ginkgo then you probably forgot to put your assertion in an It().
diff --git a/vendor/github.com/opencontainers/go-digest/.mailmap b/vendor/github.com/opencontainers/go-digest/.mailmap
index ba611cb21..eaf8b2f9e 100644
--- a/vendor/github.com/opencontainers/go-digest/.mailmap
+++ b/vendor/github.com/opencontainers/go-digest/.mailmap
@@ -1 +1,4 @@
+Aaron Lehmann <aaronl@vitelus.com> <aaron.lehmann@docker.com>
+Derek McGowan <derek@mcg.dev> <derek@mcgstyle.net>
Stephen J Day <stephen.day@docker.com> <stevvooe@users.noreply.github.com>
+Haibing Zhou <zhouhaibing089@gmail.com>
diff --git a/vendor/github.com/opencontainers/go-digest/.pullapprove.yml b/vendor/github.com/opencontainers/go-digest/.pullapprove.yml
index 45fa4b9ec..b6165f83c 100644
--- a/vendor/github.com/opencontainers/go-digest/.pullapprove.yml
+++ b/vendor/github.com/opencontainers/go-digest/.pullapprove.yml
@@ -1,12 +1,28 @@
-approve_by_comment: true
-approve_regex: '^(Approved|lgtm|LGTM|:shipit:|:star:|:\+1:|:ship:)'
-reject_regex: ^Rejected
-reset_on_push: true
-author_approval: ignored
-signed_off_by:
- required: true
-reviewers:
- teams:
- - go-digest-maintainers
- name: default
+version: 2
+
+requirements:
+ signed_off_by:
+ required: true
+
+always_pending:
+ title_regex: '^WIP'
+ explanation: 'Work in progress...'
+
+group_defaults:
required: 2
+ approve_by_comment:
+ enabled: true
+ approve_regex: '^LGTM'
+ reject_regex: '^Rejected'
+ reset_on_push:
+ enabled: true
+ author_approval:
+ ignored: true
+ conditions:
+ branches:
+ - master
+
+groups:
+ go-digest:
+ teams:
+ - go-digest-maintainers
diff --git a/vendor/github.com/opencontainers/go-digest/.travis.yml b/vendor/github.com/opencontainers/go-digest/.travis.yml
index 7ea4ed1d2..5775f885c 100644
--- a/vendor/github.com/opencontainers/go-digest/.travis.yml
+++ b/vendor/github.com/opencontainers/go-digest/.travis.yml
@@ -1,4 +1,5 @@
language: go
go:
- - 1.7
+ - 1.12.x
+ - 1.13.x
- master
diff --git a/vendor/github.com/opencontainers/go-digest/LICENSE.code b/vendor/github.com/opencontainers/go-digest/LICENSE
index 0ea3ff81e..3ac8ab648 100644
--- a/vendor/github.com/opencontainers/go-digest/LICENSE.code
+++ b/vendor/github.com/opencontainers/go-digest/LICENSE
@@ -176,6 +176,7 @@
END OF TERMS AND CONDITIONS
+ Copyright 2019, 2020 OCI Contributors
Copyright 2016 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/vendor/github.com/opencontainers/go-digest/MAINTAINERS b/vendor/github.com/opencontainers/go-digest/MAINTAINERS
index 42a29795d..843b1b206 100644
--- a/vendor/github.com/opencontainers/go-digest/MAINTAINERS
+++ b/vendor/github.com/opencontainers/go-digest/MAINTAINERS
@@ -1,9 +1,5 @@
-Aaron Lehmann <aaron.lehmann@docker.com> (@aaronlehmann)
-Brandon Philips <brandon.philips@coreos.com> (@philips)
-Brendan Burns <bburns@microsoft.com> (@brendandburns)
Derek McGowan <derek@mcgstyle.net> (@dmcgowan)
-Jason Bouzane <jbouzane@google.com> (@jbouzane)
-John Starks <jostarks@microsoft.com> (@jstarks)
-Jonathan Boulle <jon.boulle@coreos.com> (@jonboulle)
-Stephen Day <stephen.day@docker.com> (@stevvooe)
-Vincent Batts <vbatts@redhat.com> (@vbatts)
+Stephen Day <stevvooe@gmail.com> (@stevvooe)
+Vincent Batts <vbatts@hashbangbash.com> (@vbatts)
+Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp> (@AkihiroSuda)
+Sebastiaan van Stijn <github@gone.nl> (@thaJeztah)
diff --git a/vendor/github.com/opencontainers/go-digest/README.md b/vendor/github.com/opencontainers/go-digest/README.md
index 0f5a04092..a11287207 100644
--- a/vendor/github.com/opencontainers/go-digest/README.md
+++ b/vendor/github.com/opencontainers/go-digest/README.md
@@ -8,20 +8,16 @@ Please see the [godoc](https://godoc.org/github.com/opencontainers/go-digest) fo
# What is a digest?
-A digest is just a hash.
+A digest is just a [hash](https://en.wikipedia.org/wiki/Hash_function).
-The most common use case for a digest is to create a content
-identifier for use in [Content Addressable Storage](https://en.wikipedia.org/wiki/Content-addressable_storage)
-systems:
+The most common use case for a digest is to create a content identifier for use in [Content Addressable Storage](https://en.wikipedia.org/wiki/Content-addressable_storage) systems:
```go
id := digest.FromBytes([]byte("my content"))
```
-In the example above, the id can be used to uniquely identify
-the byte slice "my content". This allows two disparate applications
-to agree on a verifiable identifier without having to trust one
-another.
+In the example above, the id can be used to uniquely identify the byte slice "my content".
+This allows two disparate applications to agree on a verifiable identifier without having to trust one another.
An identifying digest can be verified, as follows:
@@ -31,8 +27,7 @@ if id != digest.FromBytes([]byte("my content")) {
}
```
-A `Verifier` type can be used to handle cases where an `io.Reader`
-makes more sense:
+A `Verifier` type can be used to handle cases where an `io.Reader` makes more sense:
```go
rd := getContent()
@@ -44,33 +39,28 @@ if !verifier.Verified() {
}
```
-Using [Merkle DAGs](https://en.wikipedia.org/wiki/Merkle_tree), this
-can power a rich, safe, content distribution system.
+Using [Merkle DAGs](https://en.wikipedia.org/wiki/Merkle_tree), this can power a rich, safe, content distribution system.
# Usage
-While the [godoc](https://godoc.org/github.com/opencontainers/go-digest) is
-considered the best resource, a few important items need to be called
-out when using this package.
+While the [godoc](https://godoc.org/github.com/opencontainers/go-digest) is considered the best resource, a few important items need to be called out when using this package.
-1. Make sure to import the hash implementations into your application
- or the package will panic. You should have something like the
- following in the main (or other entrypoint) of your application:
+1. Make sure to import the hash implementations into your application or the package will panic.
+ You should have something like the following in the main (or other entrypoint) of your application:
```go
import (
_ "crypto/sha256"
- _ "crypto/sha512"
+ _ "crypto/sha512"
)
```
This may seem inconvenient but it allows you replace the hash
implementations with others, such as https://github.com/stevvooe/resumable.
-2. Even though `digest.Digest` may be assemable as a string, _always_
- verify your input with `digest.Parse` or use `Digest.Validate`
- when accepting untrusted input. While there are measures to
- avoid common problems, this will ensure you have valid digests
- in the rest of your application.
+2. Even though `digest.Digest` may be assemblable as a string, _always_ verify your input with `digest.Parse` or use `Digest.Validate` when accepting untrusted input.
+ While there are measures to avoid common problems, this will ensure you have valid digests in the rest of your application.
+
+3. While alternative encodings of hash values (digests) are possible (for example, base64), this package deals exclusively with hex-encoded digests.
# Stability
@@ -80,25 +70,27 @@ As always, before using a package export, read the [godoc](https://godoc.org/git
# Contributing
-This package is considered fairly complete. It has been in production
-in thousands (millions?) of deployments and is fairly battle-hardened.
-New additions will be met with skepticism. If you think there is a
-missing feature, please file a bug clearly describing the problem and
-the alternatives you tried before submitting a PR.
+This package is considered fairly complete.
+It has been in production in thousands (millions?) of deployments and is fairly battle-hardened.
+New additions will be met with skepticism.
+If you think there is a missing feature, please file a bug clearly describing the problem and the alternatives you tried before submitting a PR.
-# Reporting security issues
+## Code of Conduct
-Please DO NOT file a public issue, instead send your report privately to
-security@opencontainers.org.
+Participation in the OpenContainers community is governed by [OpenContainer's Code of Conduct][code-of-conduct].
-The maintainers take security seriously. If you discover a security issue,
-please bring it to their attention right away!
+## Security
-If you are reporting a security issue, do not create an issue or file a pull
-request on GitHub. Instead, disclose the issue responsibly by sending an email
-to security@opencontainers.org (which is inhabited only by the maintainers of
-the various OCI projects).
+If you find an issue, please follow the [security][security] protocol to report it.
# Copyright and license
-Copyright © 2016 Docker, Inc. All rights reserved, except as follows. Code is released under the [Apache 2.0 license](LICENSE.code). This `README.md` file and the [`CONTRIBUTING.md`](CONTRIBUTING.md) file are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file [`LICENSE.docs`](LICENSE.docs). You may obtain a duplicate copy of the same license, titled CC BY-SA 4.0, at http://creativecommons.org/licenses/by-sa/4.0/.
+Copyright © 2019, 2020 OCI Contributors
+Copyright © 2016 Docker, Inc.
+All rights reserved, except as follows.
+Code is released under the [Apache 2.0 license](LICENSE).
+This `README.md` file and the [`CONTRIBUTING.md`](CONTRIBUTING.md) file are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file [`LICENSE.docs`](LICENSE.docs).
+You may obtain a duplicate copy of the same license, titled CC BY-SA 4.0, at http://creativecommons.org/licenses/by-sa/4.0/.
+
+[security]: https://github.com/opencontainers/org/blob/master/security
+[code-of-conduct]: https://github.com/opencontainers/org/blob/master/CODE_OF_CONDUCT.md
diff --git a/vendor/github.com/opencontainers/go-digest/algorithm.go b/vendor/github.com/opencontainers/go-digest/algorithm.go
index 8813bd26f..490951dc3 100644
--- a/vendor/github.com/opencontainers/go-digest/algorithm.go
+++ b/vendor/github.com/opencontainers/go-digest/algorithm.go
@@ -1,3 +1,4 @@
+// Copyright 2019, 2020 OCI Contributors
// Copyright 2017 Docker, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/vendor/github.com/opencontainers/go-digest/digest.go b/vendor/github.com/opencontainers/go-digest/digest.go
index ad398cba2..518b5e715 100644
--- a/vendor/github.com/opencontainers/go-digest/digest.go
+++ b/vendor/github.com/opencontainers/go-digest/digest.go
@@ -1,3 +1,4 @@
+// Copyright 2019, 2020 OCI Contributors
// Copyright 2017 Docker, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/vendor/github.com/opencontainers/go-digest/digester.go b/vendor/github.com/opencontainers/go-digest/digester.go
index 36fa2728e..ede907757 100644
--- a/vendor/github.com/opencontainers/go-digest/digester.go
+++ b/vendor/github.com/opencontainers/go-digest/digester.go
@@ -1,3 +1,4 @@
+// Copyright 2019, 2020 OCI Contributors
// Copyright 2017 Docker, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/vendor/github.com/opencontainers/go-digest/doc.go b/vendor/github.com/opencontainers/go-digest/doc.go
index 491ea1ef1..83d3a936c 100644
--- a/vendor/github.com/opencontainers/go-digest/doc.go
+++ b/vendor/github.com/opencontainers/go-digest/doc.go
@@ -1,3 +1,4 @@
+// Copyright 2019, 2020 OCI Contributors
// Copyright 2017 Docker, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
@@ -29,8 +30,13 @@
//
// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc
//
-// In this case, the string "sha256" is the algorithm and the hex bytes are
-// the "digest".
+// The "algorithm" portion defines both the hashing algorithm used to calculate
+// the digest and the encoding of the resulting digest, which defaults to "hex"
+// if not otherwise specified. Currently, all supported algorithms have their
+// digests encoded in hex strings.
+//
+// In the example above, the string "sha256" is the algorithm and the hex bytes
+// are the "digest".
//
// Because the Digest type is simply a string, once a valid Digest is
// obtained, comparisons are cheap, quick and simple to express with the
diff --git a/vendor/github.com/opencontainers/go-digest/go.mod b/vendor/github.com/opencontainers/go-digest/go.mod
new file mode 100644
index 000000000..cf5d7b1d2
--- /dev/null
+++ b/vendor/github.com/opencontainers/go-digest/go.mod
@@ -0,0 +1,3 @@
+module github.com/opencontainers/go-digest
+
+go 1.13
diff --git a/vendor/github.com/opencontainers/go-digest/verifiers.go b/vendor/github.com/opencontainers/go-digest/verifiers.go
index 32125e918..afef506f4 100644
--- a/vendor/github.com/opencontainers/go-digest/verifiers.go
+++ b/vendor/github.com/opencontainers/go-digest/verifiers.go
@@ -1,3 +1,4 @@
+// Copyright 2019, 2020 OCI Contributors
// Copyright 2017 Docker, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go
index 48e621c99..7b60f8bb3 100644
--- a/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go
+++ b/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go
@@ -89,6 +89,8 @@ type User struct {
UID uint32 `json:"uid" platform:"linux,solaris"`
// GID is the group id.
GID uint32 `json:"gid" platform:"linux,solaris"`
+ // Umask is the umask for the init process.
+ Umask uint32 `json:"umask,omitempty" platform:"linux,solaris"`
// AdditionalGids are additional group ids set for the container's process.
AdditionalGids []uint32 `json:"additionalGids,omitempty" platform:"linux,solaris"`
// Username is the user name.
@@ -123,13 +125,26 @@ type Hook struct {
Timeout *int `json:"timeout,omitempty"`
}
+// Hooks specifies a command that is run in the container at a particular event in the lifecycle of a container
// Hooks for container setup and teardown
type Hooks struct {
- // Prestart is a list of hooks to be run before the container process is executed.
+ // Prestart is Deprecated. Prestart is a list of hooks to be run before the container process is executed.
+ // It is called in the Runtime Namespace
Prestart []Hook `json:"prestart,omitempty"`
+ // CreateRuntime is a list of hooks to be run after the container has been created but before pivot_root or any equivalent operation has been called
+ // It is called in the Runtime Namespace
+ CreateRuntime []Hook `json:"createRuntime,omitempty"`
+ // CreateContainer is a list of hooks to be run after the container has been created but before pivot_root or any equivalent operation has been called
+ // It is called in the Container Namespace
+ CreateContainer []Hook `json:"createContainer,omitempty"`
+ // StartContainer is a list of hooks to be run after the start operation is called but before the container process is started
+ // It is called in the Container Namespace
+ StartContainer []Hook `json:"startContainer,omitempty"`
// Poststart is a list of hooks to be run after the container process is started.
+ // It is called in the Runtime Namespace
Poststart []Hook `json:"poststart,omitempty"`
// Poststop is a list of hooks to be run after the container process exits.
+ // It is called in the Runtime Namespace
Poststop []Hook `json:"poststop,omitempty"`
}
@@ -165,6 +180,8 @@ type Linux struct {
// IntelRdt contains Intel Resource Director Technology (RDT) information for
// handling resource constraints (e.g., L3 cache, memory bandwidth) for the container
IntelRdt *LinuxIntelRdt `json:"intelRdt,omitempty"`
+ // Personality contains configuration for the Linux personality syscall
+ Personality *LinuxPersonality `json:"personality,omitempty"`
}
// LinuxNamespace is the configuration for a Linux namespace
@@ -291,6 +308,8 @@ type LinuxMemory struct {
Swappiness *uint64 `json:"swappiness,omitempty"`
// DisableOOMKiller disables the OOM killer for out of memory conditions
DisableOOMKiller *bool `json:"disableOOMKiller,omitempty"`
+ // Enables hierarchical memory accounting
+ UseHierarchy *bool `json:"useHierarchy,omitempty"`
}
// LinuxCPU for Linux cgroup 'cpu' resource management
@@ -387,6 +406,28 @@ type LinuxDeviceCgroup struct {
Access string `json:"access,omitempty"`
}
+// LinuxPersonalityDomain refers to a personality domain.
+type LinuxPersonalityDomain string
+
+// LinuxPersonalityFlag refers to an additional personality flag. None are currently defined.
+type LinuxPersonalityFlag string
+
+// Define domain and flags for Personality
+const (
+ // PerLinux is the standard Linux personality
+ PerLinux LinuxPersonalityDomain = "LINUX"
+ // PerLinux32 sets personality to 32 bit
+ PerLinux32 LinuxPersonalityDomain = "LINUX32"
+)
+
+// LinuxPersonality represents the Linux personality syscall input
+type LinuxPersonality struct {
+ // Domain for the personality
+ Domain LinuxPersonalityDomain `json:"domain"`
+ // Additional flags
+ Flags []LinuxPersonalityFlag `json:"flags,omitempty"`
+}
+
// Solaris contains platform-specific configuration for Solaris application containers.
type Solaris struct {
// SMF FMRI which should go "online" before we start the container process.
@@ -556,12 +597,16 @@ type VMImage struct {
type LinuxSeccomp struct {
DefaultAction LinuxSeccompAction `json:"defaultAction"`
Architectures []Arch `json:"architectures,omitempty"`
+ Flags []LinuxSeccompFlag `json:"flags,omitempty"`
Syscalls []LinuxSyscall `json:"syscalls,omitempty"`
}
// Arch used for additional architectures
type Arch string
+// LinuxSeccompFlag is a flag to pass to seccomp(2).
+type LinuxSeccompFlag string
+
// Additional architectures permitted to be used for system calls
// By default only the native architecture of the kernel is permitted
const (
@@ -595,6 +640,7 @@ const (
ActErrno LinuxSeccompAction = "SCMP_ACT_ERRNO"
ActTrace LinuxSeccompAction = "SCMP_ACT_TRACE"
ActAllow LinuxSeccompAction = "SCMP_ACT_ALLOW"
+ ActLog LinuxSeccompAction = "SCMP_ACT_LOG"
)
// LinuxSeccompOperator used to match syscall arguments in Seccomp
@@ -621,9 +667,10 @@ type LinuxSeccompArg struct {
// LinuxSyscall is used to match a syscall in Seccomp
type LinuxSyscall struct {
- Names []string `json:"names"`
- Action LinuxSeccompAction `json:"action"`
- Args []LinuxSeccompArg `json:"args,omitempty"`
+ Names []string `json:"names"`
+ Action LinuxSeccompAction `json:"action"`
+ ErrnoRet *uint `json:"errnoRet,omitempty"`
+ Args []LinuxSeccompArg `json:"args,omitempty"`
}
// LinuxIntelRdt has container runtime resource constraints for Intel RDT
diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go
index b920fc1b3..596af0c2f 100644
--- a/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go
+++ b/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go
@@ -8,7 +8,7 @@ const (
// VersionMinor is for functionality in a backwards-compatible manner
VersionMinor = 0
// VersionPatch is for backwards-compatible bug fixes
- VersionPatch = 1
+ VersionPatch = 2
// VersionDev indicates development branch. Releases will be empty string.
VersionDev = "-dev"
diff --git a/vendor/github.com/seccomp/containers-golang/.gitignore b/vendor/github.com/seccomp/containers-golang/.gitignore
index 2cad96a16..e433eef88 100644
--- a/vendor/github.com/seccomp/containers-golang/.gitignore
+++ b/vendor/github.com/seccomp/containers-golang/.gitignore
@@ -1,2 +1,2 @@
-default.json
-fixtures
+*.orig
+generate
diff --git a/vendor/github.com/seccomp/containers-golang/LICENSE b/vendor/github.com/seccomp/containers-golang/LICENSE
new file mode 100644
index 000000000..bd465fcf0
--- /dev/null
+++ b/vendor/github.com/seccomp/containers-golang/LICENSE
@@ -0,0 +1,190 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2018-2019 github.com/seccomp authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/seccomp/containers-golang/Makefile b/vendor/github.com/seccomp/containers-golang/Makefile
index 88569e87b..2d91917f9 100644
--- a/vendor/github.com/seccomp/containers-golang/Makefile
+++ b/vendor/github.com/seccomp/containers-golang/Makefile
@@ -1,3 +1,5 @@
+export GO111MODULE=off
+
TAGS ?= seccomp
BUILDFLAGS := -tags "$(AUTOTAGS) $(TAGS)"
GO := go
@@ -5,14 +7,26 @@ PACKAGE := github.com/seccomp/containers-golang
sources := $(wildcard *.go)
-default.json: $(sources)
+.PHONY: seccomp.json
+seccomp.json: $(sources)
$(GO) build -compiler gc $(BUILDFLAGS) ./cmd/generate.go
$(GO) build -compiler gc ./cmd/generate.go
$(GO) run ${BUILDFLAGS} cmd/generate.go
-all: default.json
+all: seccomp.json
.PHONY: test-unit
test-unit:
- $(GO) test $(BUILDFLAGS) $(shell $(GO) list ./... | grep -v ^$(PACKAGE)/vendor)
- $(GO) test $(shell $(GO) list ./... | grep -v ^$(PACKAGE)/vendor)
+ $(GO) test -v $(BUILDFLAGS) $(shell $(GO) list ./... | grep -v ^$(PACKAGE)/vendor)
+ $(GO) test -v $(shell $(GO) list ./... | grep -v ^$(PACKAGE)/vendor)
+
+.PHONY: vendor
+vendor:
+ export GO111MODULE=on \
+ $(GO) mod tidy && \
+ $(GO) mod vendor && \
+ $(GO) mod verify
+
+.PHONY: clean
+clean:
+ rm -f generate
diff --git a/vendor/github.com/seccomp/containers-golang/README.md b/vendor/github.com/seccomp/containers-golang/README.md
index 1012baec3..a44238432 100644
--- a/vendor/github.com/seccomp/containers-golang/README.md
+++ b/vendor/github.com/seccomp/containers-golang/README.md
@@ -1,9 +1,13 @@
+# containers-golang
+
+[![CircleCI](https://circleci.com/gh/seccomp/containers-golang.svg?style=shield)](https://circleci.com/gh/seccomp/containers-golang)
+
`containers-golang` is a set of Go libraries used by container runtimes to generate and load seccomp mappings into the kernel.
seccomp (short for secure computing mode) is a BPF based syscall filter language and present a more conventional function-call based filtering interface that should be familiar to, and easily adopted by, application developers.
## Building
- make - Generates default.json file, which containes the whitelisted syscalls that can be used by container runtime engines like [CRI-O][cri-o], [Buildah][buildah], [Podman][podman] and [Docker][docker], and container runtimes like OCI [Runc][runc] to controll the syscalls available to containers.
+ make - Generates seccomp.json file, which contains the whitelisted syscalls that can be used by container runtime engines like [CRI-O][cri-o], [Buildah][buildah], [Podman][podman] and [Docker][docker], and container runtimes like OCI [Runc][runc] to controll the syscalls available to containers.
### Supported build tags
@@ -13,13 +17,9 @@ seccomp (short for secure computing mode) is a BPF based syscall filter language
When developing this library, please use `make` (or `make … BUILDTAGS=…`) to take advantage of the tests and validation.
-## License
-
-ASL 2.0
-
## Contact
-- IRC: #[CRI-O](irc://irc.freenode.net:6667/#cri-o) on freenode.net
+- IRC: #[containers](irc://irc.freenode.net:6667/#containers) on freenode.net
[cri-o]: https://github.com/kubernetes-incubator/cri-o/pulls
[buildah]: https://github.com/projectatomic/buildah
diff --git a/vendor/github.com/seccomp/containers-golang/go.mod b/vendor/github.com/seccomp/containers-golang/go.mod
new file mode 100644
index 000000000..2b56d46fd
--- /dev/null
+++ b/vendor/github.com/seccomp/containers-golang/go.mod
@@ -0,0 +1,16 @@
+module github.com/seccomp/containers-golang
+
+go 1.13
+
+require (
+ github.com/blang/semver v3.5.1+incompatible // indirect
+ github.com/hashicorp/go-multierror v1.0.0 // indirect
+ github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2
+ github.com/opencontainers/runtime-tools v0.9.0
+ github.com/opencontainers/selinux v1.3.0 // indirect
+ github.com/seccomp/libseccomp-golang v0.9.1
+ github.com/sirupsen/logrus v1.4.2 // indirect
+ github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 // indirect
+ github.com/xeipuuv/gojsonschema v1.2.0 // indirect
+ golang.org/x/sys v0.0.0-20190921190940-14da1ac737cc
+)
diff --git a/vendor/github.com/seccomp/containers-golang/go.sum b/vendor/github.com/seccomp/containers-golang/go.sum
new file mode 100644
index 000000000..ba00acd09
--- /dev/null
+++ b/vendor/github.com/seccomp/containers-golang/go.sum
@@ -0,0 +1,48 @@
+github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
+github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
+github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
+github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/opencontainers/runtime-spec v0.1.2-0.20190618234442-a950415649c7 h1:Dliu5QO+4JYWu/yMshaMU7G3JN2POGpwjJN7gjy10Go=
+github.com/opencontainers/runtime-spec v0.1.2-0.20190618234442-a950415649c7/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.0.1 h1:wY4pOY8fBdSIvs9+IDHC55thBuEulhzfSgKeC1yFvzQ=
+github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.0.2-0.20191007145322-19e92ca81777 h1:7CkKaORyxoXsM8z56r+M0wf3uCpVGVqx4CWq7oJ/4DY=
+github.com/opencontainers/runtime-spec v1.0.2-0.20191007145322-19e92ca81777/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2 h1:9mv9SC7GWmRWE0J/+oD8w3GsN2KYGKtg6uwLN7hfP5E=
+github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-tools v0.9.0 h1:FYgwVsKRI/H9hU32MJ/4MLOzXWodKK5zsQavY8NPMkU=
+github.com/opencontainers/runtime-tools v0.9.0/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
+github.com/opencontainers/selinux v1.2.2 h1:Kx9J6eDG5/24A6DtUquGSpJQ+m2MUTahn4FtGEe8bFg=
+github.com/opencontainers/selinux v1.2.2/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs=
+github.com/opencontainers/selinux v1.3.0 h1:xsI95WzPZu5exzA6JzkLSfdr/DilzOhCJOqGe5TgR0g=
+github.com/opencontainers/selinux v1.3.0/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/seccomp/libseccomp-golang v0.9.1 h1:NJjM5DNFOs0s3kYE1WUOr6G8V97sdt46rlXTMfXGWBo=
+github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
+github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
+github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 h1:b6uOv7YOFK0TYG7HtkIgExQo+2RdLuwRft63jn2HWj8=
+github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
+github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c=
+github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
+github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
+github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
+github.com/xeipuuv/gojsonschema v1.1.0 h1:ngVtJC9TY/lg0AA/1k48FYhBrhRoFlEmWzsehpNAaZg=
+github.com/xeipuuv/gojsonschema v1.1.0/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
+github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
+github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/xC2Run6RzeW1SyHxpc=
+golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190921190940-14da1ac737cc h1:EinpED/Eb9JUgDi6pkoFjw+tz69c3lHUZr2+Va84S0w=
+golang.org/x/sys v0.0.0-20190921190940-14da1ac737cc/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
diff --git a/vendor/github.com/seccomp/containers-golang/seccomp.json b/vendor/github.com/seccomp/containers-golang/seccomp.json
index fe9eda56a..4c84d981f 100644
--- a/vendor/github.com/seccomp/containers-golang/seccomp.json
+++ b/vendor/github.com/seccomp/containers-golang/seccomp.json
@@ -52,6 +52,8 @@
"syscalls": [
{
"names": [
+ "_llseek",
+ "_newselect",
"accept",
"accept4",
"access",
@@ -118,6 +120,8 @@
"ftruncate64",
"futex",
"futimesat",
+ "get_robust_list",
+ "get_thread_area",
"getcpu",
"getcwd",
"getdents",
@@ -143,12 +147,10 @@
"getresuid",
"getresuid32",
"getrlimit",
- "get_robust_list",
"getrusage",
"getsid",
"getsockname",
"getsockopt",
- "get_thread_area",
"gettid",
"gettimeofday",
"getuid",
@@ -159,13 +161,13 @@
"inotify_init1",
"inotify_rm_watch",
"io_cancel",
- "ioctl",
"io_destroy",
"io_getevents",
- "ioprio_get",
- "ioprio_set",
"io_setup",
"io_submit",
+ "ioctl",
+ "ioprio_get",
+ "ioprio_set",
"ipc",
"kill",
"lchown",
@@ -176,7 +178,6 @@
"listen",
"listxattr",
"llistxattr",
- "_llseek",
"lremovexattr",
"lseek",
"lsetxattr",
@@ -194,6 +195,7 @@
"mlockall",
"mmap",
"mmap2",
+ "mount",
"mprotect",
"mq_getsetattr",
"mq_notify",
@@ -210,9 +212,9 @@
"munlock",
"munlockall",
"munmap",
+ "name_to_handle_at",
"nanosleep",
"newfstatat",
- "_newselect",
"open",
"openat",
"pause",
@@ -234,6 +236,7 @@
"readlink",
"readlinkat",
"readv",
+ "reboot",
"recv",
"recvfrom",
"recvmmsg",
@@ -253,11 +256,11 @@
"rt_sigsuspend",
"rt_sigtimedwait",
"rt_tgsigqueueinfo",
+ "sched_get_priority_max",
+ "sched_get_priority_min",
"sched_getaffinity",
"sched_getattr",
"sched_getparam",
- "sched_get_priority_max",
- "sched_get_priority_min",
"sched_getscheduler",
"sched_rr_get_interval",
"sched_setaffinity",
@@ -277,6 +280,9 @@
"sendmmsg",
"sendmsg",
"sendto",
+ "set_robust_list",
+ "set_thread_area",
+ "set_tid_address",
"setfsgid",
"setfsgid32",
"setfsuid",
@@ -297,11 +303,8 @@
"setreuid",
"setreuid32",
"setrlimit",
- "set_robust_list",
"setsid",
"setsockopt",
- "set_thread_area",
- "set_tid_address",
"setuid",
"setuid32",
"setxattr",
@@ -335,21 +338,24 @@
"time",
"timer_create",
"timer_delete",
- "timerfd_create",
- "timerfd_gettime",
- "timerfd_settime",
"timer_getoverrun",
"timer_gettime",
"timer_settime",
+ "timerfd_create",
+ "timerfd_gettime",
+ "timerfd_settime",
"times",
"tkill",
"truncate",
"truncate64",
"ugetrlimit",
"umask",
+ "umount",
+ "umount2",
"uname",
"unlink",
"unlinkat",
+ "unshare",
"utime",
"utimensat",
"utimes",
@@ -359,12 +365,7 @@
"waitid",
"waitpid",
"write",
- "writev",
- "mount",
- "umount2",
- "reboot",
- "name_to_handle_at",
- "unshare"
+ "writev"
],
"action": "SCMP_ACT_ALLOW",
"args": [],
@@ -770,4 +771,4 @@
"excludes": {}
}
]
-}
+} \ No newline at end of file
diff --git a/vendor/github.com/seccomp/containers-golang/seccomp_default_linux.go b/vendor/github.com/seccomp/containers-golang/seccomp_default_linux.go
index fde3cff75..e137a5887 100644
--- a/vendor/github.com/seccomp/containers-golang/seccomp_default_linux.go
+++ b/vendor/github.com/seccomp/containers-golang/seccomp_default_linux.go
@@ -1,5 +1,9 @@
// +build seccomp
+// SPDX-License-Identifier: Apache-2.0
+
+// Copyright 2013-2018 Docker, Inc.
+
package seccomp // import "github.com/seccomp/containers-golang"
import (
@@ -44,6 +48,8 @@ func DefaultProfile() *Seccomp {
syscalls := []*Syscall{
{
Names: []string{
+ "_llseek",
+ "_newselect",
"accept",
"accept4",
"access",
@@ -110,6 +116,8 @@ func DefaultProfile() *Seccomp {
"ftruncate64",
"futex",
"futimesat",
+ "get_robust_list",
+ "get_thread_area",
"getcpu",
"getcwd",
"getdents",
@@ -135,12 +143,10 @@ func DefaultProfile() *Seccomp {
"getresuid",
"getresuid32",
"getrlimit",
- "get_robust_list",
"getrusage",
"getsid",
"getsockname",
"getsockopt",
- "get_thread_area",
"gettid",
"gettimeofday",
"getuid",
@@ -151,13 +157,13 @@ func DefaultProfile() *Seccomp {
"inotify_init1",
"inotify_rm_watch",
"io_cancel",
- "ioctl",
"io_destroy",
"io_getevents",
- "ioprio_get",
- "ioprio_set",
"io_setup",
"io_submit",
+ "ioctl",
+ "ioprio_get",
+ "ioprio_set",
"ipc",
"kill",
"lchown",
@@ -168,7 +174,6 @@ func DefaultProfile() *Seccomp {
"listen",
"listxattr",
"llistxattr",
- "_llseek",
"lremovexattr",
"lseek",
"lsetxattr",
@@ -206,7 +211,6 @@ func DefaultProfile() *Seccomp {
"name_to_handle_at",
"nanosleep",
"newfstatat",
- "_newselect",
"open",
"openat",
"pause",
@@ -248,11 +252,11 @@ func DefaultProfile() *Seccomp {
"rt_sigsuspend",
"rt_sigtimedwait",
"rt_tgsigqueueinfo",
+ "sched_get_priority_max",
+ "sched_get_priority_min",
"sched_getaffinity",
"sched_getattr",
"sched_getparam",
- "sched_get_priority_max",
- "sched_get_priority_min",
"sched_getscheduler",
"sched_rr_get_interval",
"sched_setaffinity",
@@ -272,6 +276,9 @@ func DefaultProfile() *Seccomp {
"sendmmsg",
"sendmsg",
"sendto",
+ "set_robust_list",
+ "set_thread_area",
+ "set_tid_address",
"setfsgid",
"setfsgid32",
"setfsuid",
@@ -292,11 +299,8 @@ func DefaultProfile() *Seccomp {
"setreuid",
"setreuid32",
"setrlimit",
- "set_robust_list",
"setsid",
"setsockopt",
- "set_thread_area",
- "set_tid_address",
"setuid",
"setuid32",
"setxattr",
@@ -330,12 +334,12 @@ func DefaultProfile() *Seccomp {
"time",
"timer_create",
"timer_delete",
- "timerfd_create",
- "timerfd_gettime",
- "timerfd_settime",
"timer_getoverrun",
"timer_gettime",
"timer_settime",
+ "timerfd_create",
+ "timerfd_gettime",
+ "timerfd_settime",
"times",
"tkill",
"truncate",
@@ -343,9 +347,11 @@ func DefaultProfile() *Seccomp {
"ugetrlimit",
"umask",
"umount",
+ "umount2",
"uname",
"unlink",
"unlinkat",
+ "unshare",
"utime",
"utimensat",
"utimes",
diff --git a/vendor/github.com/seccomp/containers-golang/seccomp_linux.go b/vendor/github.com/seccomp/containers-golang/seccomp_linux.go
index 9a495e3e2..44dcd90b8 100644
--- a/vendor/github.com/seccomp/containers-golang/seccomp_linux.go
+++ b/vendor/github.com/seccomp/containers-golang/seccomp_linux.go
@@ -1,5 +1,9 @@
// +build seccomp
+// SPDX-License-Identifier: Apache-2.0
+
+// Copyright 2013-2018 Docker, Inc.
+
package seccomp // import "github.com/seccomp/containers-golang"
import (
@@ -9,6 +13,7 @@ import (
"github.com/opencontainers/runtime-spec/specs-go"
libseccomp "github.com/seccomp/libseccomp-golang"
+ "golang.org/x/sys/unix"
)
//go:generate go run -tags 'seccomp' generate.go
@@ -22,11 +27,25 @@ func GetDefaultProfile(rs *specs.Spec) (*specs.LinuxSeccomp, error) {
func LoadProfile(body string, rs *specs.Spec) (*specs.LinuxSeccomp, error) {
var config Seccomp
if err := json.Unmarshal([]byte(body), &config); err != nil {
- return nil, fmt.Errorf("Decoding seccomp profile failed: %v", err)
+ return nil, fmt.Errorf("decoding seccomp profile failed: %v", err)
}
return setupSeccomp(&config, rs)
}
+// LoadProfileFromBytes takes a byte slice and decodes the seccomp profile.
+func LoadProfileFromBytes(body []byte, rs *specs.Spec) (*specs.LinuxSeccomp, error) {
+ config := &Seccomp{}
+ if err := json.Unmarshal(body, config); err != nil {
+ return nil, fmt.Errorf("decoding seccomp profile failed: %v", err)
+ }
+ return setupSeccomp(config, rs)
+}
+
+// LoadProfileFromConfig takes a Seccomp struct and a spec to retrieve a LinuxSeccomp
+func LoadProfileFromConfig(config *Seccomp, specgen *specs.Spec) (*specs.LinuxSeccomp, error) {
+ return setupSeccomp(config, specgen)
+}
+
var nativeToSeccomp = map[string]Arch{
"amd64": ArchX86_64,
"arm64": ArchAARCH64,
@@ -127,21 +146,22 @@ Loop:
}
if call.Name != "" {
- newConfig.Syscalls = append(newConfig.Syscalls, createSpecsSyscall(call.Name, call.Action, call.Args))
+ newConfig.Syscalls = append(newConfig.Syscalls, createSpecsSyscall([]string{call.Name}, call.Action, call.Args, call.ErrnoRet))
}
- for _, n := range call.Names {
- newConfig.Syscalls = append(newConfig.Syscalls, createSpecsSyscall(n, call.Action, call.Args))
+ if len(call.Names) > 0 {
+ newConfig.Syscalls = append(newConfig.Syscalls, createSpecsSyscall(call.Names, call.Action, call.Args, call.ErrnoRet))
}
}
return newConfig, nil
}
-func createSpecsSyscall(name string, action Action, args []*Arg) specs.LinuxSyscall {
+func createSpecsSyscall(names []string, action Action, args []*Arg, errnoRet *uint) specs.LinuxSyscall {
newCall := specs.LinuxSyscall{
- Names: []string{name},
- Action: specs.LinuxSeccompAction(action),
+ Names: names,
+ Action: specs.LinuxSeccompAction(action),
+ ErrnoRet: errnoRet,
}
// Loop through all the arguments of the syscall and convert them
@@ -157,3 +177,15 @@ func createSpecsSyscall(name string, action Action, args []*Arg) specs.LinuxSysc
}
return newCall
}
+
+// IsEnabled returns true if seccomp is enabled for the host.
+func IsEnabled() bool {
+ // Check if Seccomp is supported, via CONFIG_SECCOMP.
+ if err := unix.Prctl(unix.PR_GET_SECCOMP, 0, 0, 0, 0); err != unix.EINVAL {
+ // Make sure the kernel has CONFIG_SECCOMP_FILTER.
+ if err := unix.Prctl(unix.PR_SET_SECCOMP, unix.SECCOMP_MODE_FILTER, 0, 0, 0); err != unix.EINVAL {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/seccomp/containers-golang/seccomp_unsupported.go b/vendor/github.com/seccomp/containers-golang/seccomp_unsupported.go
index 279340426..936a9a641 100644
--- a/vendor/github.com/seccomp/containers-golang/seccomp_unsupported.go
+++ b/vendor/github.com/seccomp/containers-golang/seccomp_unsupported.go
@@ -1,5 +1,9 @@
// +build !seccomp
+// SPDX-License-Identifier: Apache-2.0
+
+// Copyright 2013-2018 Docker, Inc.
+
package seccomp // import "github.com/seccomp/containers-golang"
import (
@@ -22,3 +26,18 @@ func LoadProfile(body string, rs *specs.Spec) (*specs.LinuxSeccomp, error) {
func GetDefaultProfile(rs *specs.Spec) (*specs.LinuxSeccomp, error) {
return nil, fmt.Errorf("Seccomp not supported on this platform")
}
+
+// LoadProfileFromBytes takes a byte slice and decodes the seccomp profile.
+func LoadProfileFromBytes(body []byte, rs *specs.Spec) (*specs.LinuxSeccomp, error) {
+ return nil, fmt.Errorf("Seccomp not supported on this platform")
+}
+
+// LoadProfileFromConfig takes a Seccomp struct and a spec to retrieve a LinuxSeccomp
+func LoadProfileFromConfig(config *Seccomp, specgen *specs.Spec) (*specs.LinuxSeccomp, error) {
+ return nil, fmt.Errorf("Seccomp not supported on this platform")
+}
+
+// IsEnabled returns true if seccomp is enabled for the host.
+func IsEnabled() bool {
+ return false
+}
diff --git a/vendor/github.com/seccomp/containers-golang/types.go b/vendor/github.com/seccomp/containers-golang/types.go
index b549a55fe..6651c423f 100644
--- a/vendor/github.com/seccomp/containers-golang/types.go
+++ b/vendor/github.com/seccomp/containers-golang/types.go
@@ -1,5 +1,9 @@
package seccomp // import "github.com/seccomp/containers-golang"
+// SPDX-License-Identifier: Apache-2.0
+
+// Copyright 2013-2018 Docker, Inc.
+
// Seccomp represents the config for a seccomp profile for syscall restriction.
type Seccomp struct {
DefaultAction Action `json:"defaultAction"`
@@ -90,4 +94,5 @@ type Syscall struct {
Comment string `json:"comment"`
Includes Filter `json:"includes"`
Excludes Filter `json:"excludes"`
+ ErrnoRet *uint `json:"errnoRet,omitempty"`
}
diff --git a/vendor/github.com/seccomp/containers-golang/vendor.conf b/vendor/github.com/seccomp/containers-golang/vendor.conf
deleted file mode 100644
index 6111c475b..000000000
--- a/vendor/github.com/seccomp/containers-golang/vendor.conf
+++ /dev/null
@@ -1,9 +0,0 @@
-github.com/opencontainers/runtime-tools master
-github.com/blang/semver master
-github.com/hashicorp/go-multierror master
-github.com/hashicorp/errwrap master
-github.com/syndtr/gocapability master
-github.com/xeipuuv/gojsonschema master
-github.com/xeipuuv/gojsonreference master
-github.com/xeipuuv/gojsonpointer master
-
diff --git a/vendor/github.com/vbauerster/mpb/v5/bar.go b/vendor/github.com/vbauerster/mpb/v5/bar.go
index 1a4c66fe1..13bda2247 100644
--- a/vendor/github.com/vbauerster/mpb/v5/bar.go
+++ b/vendor/github.com/vbauerster/mpb/v5/bar.go
@@ -69,6 +69,7 @@ type bState struct {
trimSpace bool
toComplete bool
completeFlushed bool
+ ignoreComplete bool
noPop bool
aDecorators []decor.Decorator
pDecorators []decor.Decorator
@@ -170,17 +171,18 @@ func (b *Bar) TraverseDecorators(cb func(decor.Decorator)) {
}
// SetTotal sets total dynamically.
-// If total is less or equal to zero it takes progress' current value.
-// If complete is true, complete event will be triggered.
+// If total is less than or equal to zero it takes progress' current value.
+// A complete flag enables or disables complete event on `current >= total`.
func (b *Bar) SetTotal(total int64, complete bool) {
select {
case b.operateState <- func(s *bState) {
+ s.ignoreComplete = !complete
if total <= 0 {
s.total = s.current
} else {
s.total = total
}
- if complete && !s.toComplete {
+ if !s.ignoreComplete && !s.toComplete {
s.current = s.total
s.toComplete = true
go b.refreshTillShutdown()
@@ -197,7 +199,7 @@ func (b *Bar) SetCurrent(current int64) {
s.iterated = true
s.lastN = current - s.current
s.current = current
- if s.total > 0 && s.current >= s.total {
+ if !s.ignoreComplete && s.current >= s.total {
s.current = s.total
s.toComplete = true
go b.refreshTillShutdown()
@@ -224,7 +226,7 @@ func (b *Bar) IncrInt64(n int64) {
s.iterated = true
s.lastN = n
s.current += n
- if s.total > 0 && s.current >= s.total {
+ if !s.ignoreComplete && s.current >= s.total {
s.current = s.total
s.toComplete = true
go b.refreshTillShutdown()
diff --git a/vendor/github.com/vbauerster/mpb/v5/bar_filler.go b/vendor/github.com/vbauerster/mpb/v5/bar_filler.go
index 00bf0a494..33dbf191d 100644
--- a/vendor/github.com/vbauerster/mpb/v5/bar_filler.go
+++ b/vendor/github.com/vbauerster/mpb/v5/bar_filler.go
@@ -76,7 +76,7 @@ func (s *barFiller) SetReverse(reverse bool) {
s.flush = reverseFlush
} else {
s.tip = s.format[rTip]
- s.flush = normalFlush
+ s.flush = regularFlush
}
s.reverse = reverse
}
@@ -125,7 +125,7 @@ func (s *barFiller) Fill(w io.Writer, width int, stat *decor.Statistics) {
s.flush(w, bb)
}
-func normalFlush(w io.Writer, bb [][]byte) {
+func regularFlush(w io.Writer, bb [][]byte) {
for i := 0; i < len(bb); i++ {
w.Write(bb[i])
}
diff --git a/vendor/github.com/vbauerster/mpb/v5/go.mod b/vendor/github.com/vbauerster/mpb/v5/go.mod
index 672191fc8..1d8d52934 100644
--- a/vendor/github.com/vbauerster/mpb/v5/go.mod
+++ b/vendor/github.com/vbauerster/mpb/v5/go.mod
@@ -3,8 +3,8 @@ module github.com/vbauerster/mpb/v5
require (
github.com/VividCortex/ewma v1.1.1
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d
- golang.org/x/crypto v0.0.0-20200311171314-f7b00557c8c4
- golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 // indirect
+ golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5
+ golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f // indirect
)
go 1.14
diff --git a/vendor/github.com/vbauerster/mpb/v5/go.sum b/vendor/github.com/vbauerster/mpb/v5/go.sum
index 9a411976a..99ca1bf67 100644
--- a/vendor/github.com/vbauerster/mpb/v5/go.sum
+++ b/vendor/github.com/vbauerster/mpb/v5/go.sum
@@ -3,11 +3,11 @@ github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmx
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8=
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20200311171314-f7b00557c8c4 h1:QmwruyY+bKbDDL0BaglrbZABEali68eoMFhTZpCjYVA=
-golang.org/x/crypto v0.0.0-20200311171314-f7b00557c8c4/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5 h1:Q7tZBpemrlsc2I7IyODzhtallWRSm4Q0d09pL6XbQtU=
+golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 h1:uYVVQ9WP/Ds2ROhcaGPeIdVq0RIXVLwsHlnvJ+cT1So=
-golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f h1:gWF768j/LaZugp8dyS4UwsslYCYz9XgFxvlgsn0n9H8=
+golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
diff --git a/vendor/github.com/vbauerster/mpb/v5/internal/percentage.go b/vendor/github.com/vbauerster/mpb/v5/internal/percentage.go
index 7e261cb22..e321e0a6b 100644
--- a/vendor/github.com/vbauerster/mpb/v5/internal/percentage.go
+++ b/vendor/github.com/vbauerster/mpb/v5/internal/percentage.go
@@ -7,6 +7,9 @@ func Percentage(total, current int64, width int) float64 {
if total <= 0 {
return 0
}
+ if current >= total {
+ return float64(width)
+ }
return float64(int64(width)*current) / float64(total)
}
diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_generic.go b/vendor/golang.org/x/crypto/chacha20/chacha_generic.go
index 7c498e90d..a2ecf5c32 100644
--- a/vendor/golang.org/x/crypto/chacha20/chacha_generic.go
+++ b/vendor/golang.org/x/crypto/chacha20/chacha_generic.go
@@ -42,10 +42,14 @@ type Cipher struct {
// The last len bytes of buf are leftover key stream bytes from the previous
// XORKeyStream invocation. The size of buf depends on how many blocks are
- // computed at a time.
+ // computed at a time by xorKeyStreamBlocks.
buf [bufSize]byte
len int
+ // overflow is set when the counter overflowed, no more blocks can be
+ // generated, and the next XORKeyStream call should panic.
+ overflow bool
+
// The counter-independent results of the first round are cached after they
// are computed the first time.
precompDone bool
@@ -89,6 +93,7 @@ func newUnauthenticatedCipher(c *Cipher, key, nonce []byte) (*Cipher, error) {
return nil, errors.New("chacha20: wrong nonce size")
}
+ key, nonce = key[:KeySize], nonce[:NonceSize] // bounds check elimination hint
c.key = [8]uint32{
binary.LittleEndian.Uint32(key[0:4]),
binary.LittleEndian.Uint32(key[4:8]),
@@ -139,15 +144,18 @@ func quarterRound(a, b, c, d uint32) (uint32, uint32, uint32, uint32) {
// SetCounter sets the Cipher counter. The next invocation of XORKeyStream will
// behave as if (64 * counter) bytes had been encrypted so far.
//
-// To prevent accidental counter reuse, SetCounter panics if counter is
-// less than the current value.
+// To prevent accidental counter reuse, SetCounter panics if counter is less
+// than the current value.
+//
+// Note that the execution time of XORKeyStream is not independent of the
+// counter value.
func (s *Cipher) SetCounter(counter uint32) {
// Internally, s may buffer multiple blocks, which complicates this
// implementation slightly. When checking whether the counter has rolled
// back, we must use both s.counter and s.len to determine how many blocks
// we have already output.
outputCounter := s.counter - uint32(s.len)/blockSize
- if counter < outputCounter {
+ if s.overflow || counter < outputCounter {
panic("chacha20: SetCounter attempted to rollback counter")
}
@@ -196,34 +204,52 @@ func (s *Cipher) XORKeyStream(dst, src []byte) {
dst[i] = src[i] ^ b
}
s.len -= len(keyStream)
- src = src[len(keyStream):]
- dst = dst[len(keyStream):]
+ dst, src = dst[len(keyStream):], src[len(keyStream):]
+ }
+ if len(src) == 0 {
+ return
}
- const blocksPerBuf = bufSize / blockSize
- numBufs := (uint64(len(src)) + bufSize - 1) / bufSize
- if uint64(s.counter)+numBufs*blocksPerBuf >= 1<<32 {
+ // If we'd need to let the counter overflow and keep generating output,
+ // panic immediately. If instead we'd only reach the last block, remember
+ // not to generate any more output after the buffer is drained.
+ numBlocks := (uint64(len(src)) + blockSize - 1) / blockSize
+ if s.overflow || uint64(s.counter)+numBlocks > 1<<32 {
panic("chacha20: counter overflow")
+ } else if uint64(s.counter)+numBlocks == 1<<32 {
+ s.overflow = true
}
// xorKeyStreamBlocks implementations expect input lengths that are a
// multiple of bufSize. Platform-specific ones process multiple blocks at a
// time, so have bufSizes that are a multiple of blockSize.
- rem := len(src) % bufSize
- full := len(src) - rem
-
+ full := len(src) - len(src)%bufSize
if full > 0 {
s.xorKeyStreamBlocks(dst[:full], src[:full])
}
+ dst, src = dst[full:], src[full:]
+
+ // If using a multi-block xorKeyStreamBlocks would overflow, use the generic
+ // one that does one block at a time.
+ const blocksPerBuf = bufSize / blockSize
+ if uint64(s.counter)+blocksPerBuf > 1<<32 {
+ s.buf = [bufSize]byte{}
+ numBlocks := (len(src) + blockSize - 1) / blockSize
+ buf := s.buf[bufSize-numBlocks*blockSize:]
+ copy(buf, src)
+ s.xorKeyStreamBlocksGeneric(buf, buf)
+ s.len = len(buf) - copy(dst, buf)
+ return
+ }
// If we have a partial (multi-)block, pad it for xorKeyStreamBlocks, and
// keep the leftover keystream for the next XORKeyStream invocation.
- if rem > 0 {
+ if len(src) > 0 {
s.buf = [bufSize]byte{}
- copy(s.buf[:], src[full:])
+ copy(s.buf[:], src)
s.xorKeyStreamBlocks(s.buf[:], s.buf[:])
- s.len = bufSize - copy(dst[full:], s.buf[:])
+ s.len = bufSize - copy(dst, s.buf[:])
}
}
@@ -260,7 +286,9 @@ func (s *Cipher) xorKeyStreamBlocksGeneric(dst, src []byte) {
s.precompDone = true
}
- for i := 0; i < len(src); i += blockSize {
+ // A condition of len(src) > 0 would be sufficient, but this also
+ // acts as a bounds check elimination hint.
+ for len(src) >= 64 && len(dst) >= 64 {
// The remainder of the first column round.
fcr0, fcr4, fcr8, fcr12 := quarterRound(c0, c4, c8, s.counter)
@@ -285,49 +313,28 @@ func (s *Cipher) xorKeyStreamBlocksGeneric(dst, src []byte) {
x3, x4, x9, x14 = quarterRound(x3, x4, x9, x14)
}
- // Finally, add back the initial state to generate the key stream.
- x0 += c0
- x1 += c1
- x2 += c2
- x3 += c3
- x4 += c4
- x5 += c5
- x6 += c6
- x7 += c7
- x8 += c8
- x9 += c9
- x10 += c10
- x11 += c11
- x12 += s.counter
- x13 += c13
- x14 += c14
- x15 += c15
+ // Add back the initial state to generate the key stream, then
+ // XOR the key stream with the source and write out the result.
+ addXor(dst[0:4], src[0:4], x0, c0)
+ addXor(dst[4:8], src[4:8], x1, c1)
+ addXor(dst[8:12], src[8:12], x2, c2)
+ addXor(dst[12:16], src[12:16], x3, c3)
+ addXor(dst[16:20], src[16:20], x4, c4)
+ addXor(dst[20:24], src[20:24], x5, c5)
+ addXor(dst[24:28], src[24:28], x6, c6)
+ addXor(dst[28:32], src[28:32], x7, c7)
+ addXor(dst[32:36], src[32:36], x8, c8)
+ addXor(dst[36:40], src[36:40], x9, c9)
+ addXor(dst[40:44], src[40:44], x10, c10)
+ addXor(dst[44:48], src[44:48], x11, c11)
+ addXor(dst[48:52], src[48:52], x12, s.counter)
+ addXor(dst[52:56], src[52:56], x13, c13)
+ addXor(dst[56:60], src[56:60], x14, c14)
+ addXor(dst[60:64], src[60:64], x15, c15)
s.counter += 1
- if s.counter == 0 {
- panic("chacha20: internal error: counter overflow")
- }
- in, out := src[i:], dst[i:]
- in, out = in[:blockSize], out[:blockSize] // bounds check elimination hint
-
- // XOR the key stream with the source and write out the result.
- xor(out[0:], in[0:], x0)
- xor(out[4:], in[4:], x1)
- xor(out[8:], in[8:], x2)
- xor(out[12:], in[12:], x3)
- xor(out[16:], in[16:], x4)
- xor(out[20:], in[20:], x5)
- xor(out[24:], in[24:], x6)
- xor(out[28:], in[28:], x7)
- xor(out[32:], in[32:], x8)
- xor(out[36:], in[36:], x9)
- xor(out[40:], in[40:], x10)
- xor(out[44:], in[44:], x11)
- xor(out[48:], in[48:], x12)
- xor(out[52:], in[52:], x13)
- xor(out[56:], in[56:], x14)
- xor(out[60:], in[60:], x15)
+ src, dst = src[blockSize:], dst[blockSize:]
}
}
diff --git a/vendor/golang.org/x/crypto/chacha20/xor.go b/vendor/golang.org/x/crypto/chacha20/xor.go
index 0110c9865..c2d04851e 100644
--- a/vendor/golang.org/x/crypto/chacha20/xor.go
+++ b/vendor/golang.org/x/crypto/chacha20/xor.go
@@ -13,10 +13,10 @@ const unaligned = runtime.GOARCH == "386" ||
runtime.GOARCH == "ppc64le" ||
runtime.GOARCH == "s390x"
-// xor reads a little endian uint32 from src, XORs it with u and
+// addXor reads a little endian uint32 from src, XORs it with (a + b) and
// places the result in little endian byte order in dst.
-func xor(dst, src []byte, u uint32) {
- _, _ = src[3], dst[3] // eliminate bounds checks
+func addXor(dst, src []byte, a, b uint32) {
+ _, _ = src[3], dst[3] // bounds check elimination hint
if unaligned {
// The compiler should optimize this code into
// 32-bit unaligned little endian loads and stores.
@@ -27,15 +27,16 @@ func xor(dst, src []byte, u uint32) {
v |= uint32(src[1]) << 8
v |= uint32(src[2]) << 16
v |= uint32(src[3]) << 24
- v ^= u
+ v ^= a + b
dst[0] = byte(v)
dst[1] = byte(v >> 8)
dst[2] = byte(v >> 16)
dst[3] = byte(v >> 24)
} else {
- dst[0] = src[0] ^ byte(u)
- dst[1] = src[1] ^ byte(u>>8)
- dst[2] = src[2] ^ byte(u>>16)
- dst[3] = src[3] ^ byte(u>>24)
+ a += b
+ dst[0] = src[0] ^ byte(a)
+ dst[1] = src[1] ^ byte(a>>8)
+ dst[2] = src[2] ^ byte(a>>16)
+ dst[3] = src[3] ^ byte(a>>24)
}
}
diff --git a/vendor/golang.org/x/crypto/poly1305/mac_noasm.go b/vendor/golang.org/x/crypto/poly1305/mac_noasm.go
index b0c2cd056..347c8b15f 100644
--- a/vendor/golang.org/x/crypto/poly1305/mac_noasm.go
+++ b/vendor/golang.org/x/crypto/poly1305/mac_noasm.go
@@ -7,5 +7,3 @@
package poly1305
type mac struct{ macGeneric }
-
-func newMAC(key *[32]byte) mac { return mac{newMACGeneric(key)} }
diff --git a/vendor/golang.org/x/crypto/poly1305/poly1305.go b/vendor/golang.org/x/crypto/poly1305/poly1305.go
index 066159b79..3c75c2a67 100644
--- a/vendor/golang.org/x/crypto/poly1305/poly1305.go
+++ b/vendor/golang.org/x/crypto/poly1305/poly1305.go
@@ -46,10 +46,9 @@ func Verify(mac *[16]byte, m []byte, key *[32]byte) bool {
// two different messages with the same key allows an attacker
// to forge messages at will.
func New(key *[32]byte) *MAC {
- return &MAC{
- mac: newMAC(key),
- finalized: false,
- }
+ m := &MAC{}
+ initialize(key, &m.macState)
+ return m
}
// MAC is an io.Writer computing an authentication tag
@@ -58,7 +57,7 @@ func New(key *[32]byte) *MAC {
// MAC cannot be used like common hash.Hash implementations,
// because using a poly1305 key twice breaks its security.
// Therefore writing data to a running MAC after calling
-// Sum causes it to panic.
+// Sum or Verify causes it to panic.
type MAC struct {
mac // platform-dependent implementation
@@ -71,10 +70,10 @@ func (h *MAC) Size() int { return TagSize }
// Write adds more data to the running message authentication code.
// It never returns an error.
//
-// It must not be called after the first call of Sum.
+// It must not be called after the first call of Sum or Verify.
func (h *MAC) Write(p []byte) (n int, err error) {
if h.finalized {
- panic("poly1305: write to MAC after Sum")
+ panic("poly1305: write to MAC after Sum or Verify")
}
return h.mac.Write(p)
}
@@ -87,3 +86,12 @@ func (h *MAC) Sum(b []byte) []byte {
h.finalized = true
return append(b, mac[:]...)
}
+
+// Verify returns whether the authenticator of all data written to
+// the message authentication code matches the expected value.
+func (h *MAC) Verify(expected []byte) bool {
+ var mac [TagSize]byte
+ h.mac.Sum(&mac)
+ h.finalized = true
+ return subtle.ConstantTimeCompare(expected, mac[:]) == 1
+}
diff --git a/vendor/golang.org/x/crypto/poly1305/sum_amd64.go b/vendor/golang.org/x/crypto/poly1305/sum_amd64.go
index 35b9e38c9..99e5a1d50 100644
--- a/vendor/golang.org/x/crypto/poly1305/sum_amd64.go
+++ b/vendor/golang.org/x/crypto/poly1305/sum_amd64.go
@@ -9,17 +9,6 @@ package poly1305
//go:noescape
func update(state *macState, msg []byte)
-func sum(out *[16]byte, m []byte, key *[32]byte) {
- h := newMAC(key)
- h.Write(m)
- h.Sum(out)
-}
-
-func newMAC(key *[32]byte) (h mac) {
- initialize(key, &h.r, &h.s)
- return
-}
-
// mac is a wrapper for macGeneric that redirects calls that would have gone to
// updateGeneric to update.
//
diff --git a/vendor/golang.org/x/crypto/poly1305/sum_generic.go b/vendor/golang.org/x/crypto/poly1305/sum_generic.go
index 1187eab78..c77ff179d 100644
--- a/vendor/golang.org/x/crypto/poly1305/sum_generic.go
+++ b/vendor/golang.org/x/crypto/poly1305/sum_generic.go
@@ -31,9 +31,10 @@ func sumGeneric(out *[TagSize]byte, msg []byte, key *[32]byte) {
h.Sum(out)
}
-func newMACGeneric(key *[32]byte) (h macGeneric) {
- initialize(key, &h.r, &h.s)
- return
+func newMACGeneric(key *[32]byte) macGeneric {
+ m := macGeneric{}
+ initialize(key, &m.macState)
+ return m
}
// macState holds numbers in saturated 64-bit little-endian limbs. That is,
@@ -97,11 +98,12 @@ const (
rMask1 = 0x0FFFFFFC0FFFFFFC
)
-func initialize(key *[32]byte, r, s *[2]uint64) {
- r[0] = binary.LittleEndian.Uint64(key[0:8]) & rMask0
- r[1] = binary.LittleEndian.Uint64(key[8:16]) & rMask1
- s[0] = binary.LittleEndian.Uint64(key[16:24])
- s[1] = binary.LittleEndian.Uint64(key[24:32])
+// initialize loads the 256-bit key into the two 128-bit secret values r and s.
+func initialize(key *[32]byte, m *macState) {
+ m.r[0] = binary.LittleEndian.Uint64(key[0:8]) & rMask0
+ m.r[1] = binary.LittleEndian.Uint64(key[8:16]) & rMask1
+ m.s[0] = binary.LittleEndian.Uint64(key[16:24])
+ m.s[1] = binary.LittleEndian.Uint64(key[24:32])
}
// uint128 holds a 128-bit number as two 64-bit limbs, for use with the
diff --git a/vendor/golang.org/x/crypto/poly1305/sum_noasm.go b/vendor/golang.org/x/crypto/poly1305/sum_noasm.go
index 2e3ae34c7..2b55a29c5 100644
--- a/vendor/golang.org/x/crypto/poly1305/sum_noasm.go
+++ b/vendor/golang.org/x/crypto/poly1305/sum_noasm.go
@@ -2,12 +2,17 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build s390x,!go1.11 !amd64,!s390x,!ppc64le gccgo purego
+// At this point only s390x has an assembly implementation of sum. All other
+// platforms have assembly implementations of mac, and just define sum as using
+// that through New. Once s390x is ported, this file can be deleted and the body
+// of sum moved into Sum.
+
+// +build !go1.11 !s390x gccgo purego
package poly1305
func sum(out *[TagSize]byte, msg []byte, key *[32]byte) {
- h := newMAC(key)
+ h := New(key)
h.Write(msg)
- h.Sum(out)
+ h.Sum(out[:0])
}
diff --git a/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go b/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go
index 92597bb8c..2e7a120b1 100644
--- a/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go
+++ b/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go
@@ -9,17 +9,6 @@ package poly1305
//go:noescape
func update(state *macState, msg []byte)
-func sum(out *[16]byte, m []byte, key *[32]byte) {
- h := newMAC(key)
- h.Write(m)
- h.Sum(out)
-}
-
-func newMAC(key *[32]byte) (h mac) {
- initialize(key, &h.r, &h.s)
- return
-}
-
// mac is a wrapper for macGeneric that redirects calls that would have gone to
// updateGeneric to update.
//
diff --git a/vendor/golang.org/x/crypto/ssh/cipher.go b/vendor/golang.org/x/crypto/ssh/cipher.go
index b0204ee59..8bd6b3daf 100644
--- a/vendor/golang.org/x/crypto/ssh/cipher.go
+++ b/vendor/golang.org/x/crypto/ssh/cipher.go
@@ -119,7 +119,7 @@ var cipherModes = map[string]*cipherMode{
chacha20Poly1305ID: {64, 0, newChaCha20Cipher},
// CBC mode is insecure and so is not included in the default config.
- // (See http://www.isg.rhul.ac.uk/~kp/SandPfinal.pdf). If absolutely
+ // (See https://www.ieee-security.org/TC/SP2013/papers/4977a526.pdf). If absolutely
// needed, it's possible to specify a custom Config to enable it.
// You should expect that an active attacker can recover plaintext if
// you do.
diff --git a/vendor/golang.org/x/crypto/ssh/terminal/terminal.go b/vendor/golang.org/x/crypto/ssh/terminal/terminal.go
index d1b4fca3a..2ffb97bfb 100644
--- a/vendor/golang.org/x/crypto/ssh/terminal/terminal.go
+++ b/vendor/golang.org/x/crypto/ssh/terminal/terminal.go
@@ -113,6 +113,7 @@ func NewTerminal(c io.ReadWriter, prompt string) *Terminal {
}
const (
+ keyCtrlC = 3
keyCtrlD = 4
keyCtrlU = 21
keyEnter = '\r'
@@ -151,8 +152,12 @@ func bytesToKey(b []byte, pasteActive bool) (rune, []byte) {
switch b[0] {
case 1: // ^A
return keyHome, b[1:]
+ case 2: // ^B
+ return keyLeft, b[1:]
case 5: // ^E
return keyEnd, b[1:]
+ case 6: // ^F
+ return keyRight, b[1:]
case 8: // ^H
return keyBackspace, b[1:]
case 11: // ^K
@@ -738,6 +743,9 @@ func (t *Terminal) readLine() (line string, err error) {
return "", io.EOF
}
}
+ if key == keyCtrlC {
+ return "", io.EOF
+ }
if key == keyPasteStart {
t.pasteActive = true
if len(t.line) == 0 {
diff --git a/vendor/golang.org/x/net/http2/client_conn_pool.go b/vendor/golang.org/x/net/http2/client_conn_pool.go
index f4d9b5ece..3a67636fe 100644
--- a/vendor/golang.org/x/net/http2/client_conn_pool.go
+++ b/vendor/golang.org/x/net/http2/client_conn_pool.go
@@ -107,6 +107,7 @@ func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMis
// dialCall is an in-flight Transport dial call to a host.
type dialCall struct {
+ _ incomparable
p *clientConnPool
done chan struct{} // closed when done
res *ClientConn // valid after done is closed
@@ -180,6 +181,7 @@ func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c *tls.Conn)
}
type addConnCall struct {
+ _ incomparable
p *clientConnPool
done chan struct{} // closed when done
err error
@@ -200,12 +202,6 @@ func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) {
close(c.done)
}
-func (p *clientConnPool) addConn(key string, cc *ClientConn) {
- p.mu.Lock()
- p.addConnLocked(key, cc)
- p.mu.Unlock()
-}
-
// p.mu must be held
func (p *clientConnPool) addConnLocked(key string, cc *ClientConn) {
for _, v := range p.conns[key] {
diff --git a/vendor/golang.org/x/net/http2/flow.go b/vendor/golang.org/x/net/http2/flow.go
index cea601fcd..b51f0e0cf 100644
--- a/vendor/golang.org/x/net/http2/flow.go
+++ b/vendor/golang.org/x/net/http2/flow.go
@@ -8,6 +8,8 @@ package http2
// flow is the flow control window's size.
type flow struct {
+ _ incomparable
+
// n is the number of DATA bytes we're allowed to send.
// A flow is kept both on a conn and a per-stream.
n int32
diff --git a/vendor/golang.org/x/net/http2/hpack/huffman.go b/vendor/golang.org/x/net/http2/hpack/huffman.go
index b412a96c5..a1ab2f056 100644
--- a/vendor/golang.org/x/net/http2/hpack/huffman.go
+++ b/vendor/golang.org/x/net/http2/hpack/huffman.go
@@ -105,7 +105,14 @@ func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error {
return nil
}
+// incomparable is a zero-width, non-comparable type. Adding it to a struct
+// makes that struct also non-comparable, and generally doesn't add
+// any size (as long as it's first).
+type incomparable [0]func()
+
type node struct {
+ _ incomparable
+
// children is non-nil for internal nodes
children *[256]*node
diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go
index 27cc893cc..5571ccfd2 100644
--- a/vendor/golang.org/x/net/http2/http2.go
+++ b/vendor/golang.org/x/net/http2/http2.go
@@ -241,6 +241,7 @@ func (cw closeWaiter) Wait() {
// Its buffered writer is lazily allocated as needed, to minimize
// idle memory usage with many connections.
type bufferedWriter struct {
+ _ incomparable
w io.Writer // immutable
bw *bufio.Writer // non-nil when data is buffered
}
@@ -313,6 +314,7 @@ func bodyAllowedForStatus(status int) bool {
}
type httpError struct {
+ _ incomparable
msg string
timeout bool
}
@@ -376,3 +378,8 @@ func (s *sorter) SortStrings(ss []string) {
func validPseudoPath(v string) bool {
return (len(v) > 0 && v[0] == '/') || v == "*"
}
+
+// incomparable is a zero-width, non-comparable type. Adding it to a struct
+// makes that struct also non-comparable, and generally doesn't add
+// any size (as long as it's first).
+type incomparable [0]func()
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go
index bc9e41a1b..345b7cd85 100644
--- a/vendor/golang.org/x/net/http2/server.go
+++ b/vendor/golang.org/x/net/http2/server.go
@@ -761,6 +761,7 @@ func (sc *serverConn) readFrames() {
// frameWriteResult is the message passed from writeFrameAsync to the serve goroutine.
type frameWriteResult struct {
+ _ incomparable
wr FrameWriteRequest // what was written (or attempted)
err error // result of the writeFrame call
}
@@ -771,7 +772,7 @@ type frameWriteResult struct {
// serverConn.
func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest) {
err := wr.write.writeFrame(sc)
- sc.wroteFrameCh <- frameWriteResult{wr, err}
+ sc.wroteFrameCh <- frameWriteResult{wr: wr, err: err}
}
func (sc *serverConn) closeAllStreamsOnConnClose() {
@@ -1161,7 +1162,7 @@ func (sc *serverConn) startFrameWrite(wr FrameWriteRequest) {
if wr.write.staysWithinBuffer(sc.bw.Available()) {
sc.writingFrameAsync = false
err := wr.write.writeFrame(sc)
- sc.wroteFrame(frameWriteResult{wr, err})
+ sc.wroteFrame(frameWriteResult{wr: wr, err: err})
} else {
sc.writingFrameAsync = true
go sc.writeFrameAsync(wr)
@@ -2057,7 +2058,7 @@ func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*r
var trailer http.Header
for _, v := range rp.header["Trailer"] {
for _, key := range strings.Split(v, ",") {
- key = http.CanonicalHeaderKey(strings.TrimSpace(key))
+ key = http.CanonicalHeaderKey(textproto.TrimString(key))
switch key {
case "Transfer-Encoding", "Trailer", "Content-Length":
// Bogus. (copy of http1 rules)
@@ -2275,6 +2276,7 @@ func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) {
// requestBody is the Handler's Request.Body type.
// Read and Close may be called concurrently.
type requestBody struct {
+ _ incomparable
stream *stream
conn *serverConn
closed bool // for use by Close only
diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go
index e4fb02530..54acc1e36 100644
--- a/vendor/golang.org/x/net/http2/transport.go
+++ b/vendor/golang.org/x/net/http2/transport.go
@@ -916,7 +916,7 @@ func commaSeparatedTrailers(req *http.Request) (string, error) {
k = http.CanonicalHeaderKey(k)
switch k {
case "Transfer-Encoding", "Trailer", "Content-Length":
- return "", &badStringError{"invalid Trailer key", k}
+ return "", fmt.Errorf("invalid Trailer key %q", k)
}
keys = append(keys, k)
}
@@ -1394,13 +1394,6 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error)
}
}
-type badStringError struct {
- what string
- str string
-}
-
-func (e *badStringError) Error() string { return fmt.Sprintf("%s %q", e.what, e.str) }
-
// requires cc.mu be held.
func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) {
cc.hbuf.Reset()
@@ -1616,6 +1609,7 @@ func (cc *ClientConn) writeHeader(name, value string) {
}
type resAndError struct {
+ _ incomparable
res *http.Response
err error
}
@@ -1663,6 +1657,7 @@ func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream {
// clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop.
type clientConnReadLoop struct {
+ _ incomparable
cc *ClientConn
closeWhenIdle bool
}
@@ -2479,6 +2474,7 @@ func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) {
// gzipReader wraps a response body so it can lazily
// call gzip.NewReader on the first call to Read
type gzipReader struct {
+ _ incomparable
body io.ReadCloser // underlying Response.Body
zr *gzip.Reader // lazily-initialized gzip reader
zerr error // sticky error
diff --git a/vendor/golang.org/x/sys/cpu/cpu_aix_ppc64.go b/vendor/golang.org/x/sys/cpu/cpu_aix.go
index be6027224..da2989668 100644
--- a/vendor/golang.org/x/sys/cpu/cpu_aix_ppc64.go
+++ b/vendor/golang.org/x/sys/cpu/cpu_aix.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build aix,ppc64
+// +build aix
package cpu
diff --git a/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go b/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go
new file mode 100644
index 000000000..76fbe40b7
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go
@@ -0,0 +1,27 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Recreate a getsystemcfg syscall handler instead of
+// using the one provided by x/sys/unix to avoid having
+// the dependency between them. (See golang.org/issue/32102)
+// Morever, this file will be used during the building of
+// gccgo's libgo and thus must not used a CGo method.
+
+// +build aix
+// +build gccgo
+
+package cpu
+
+import (
+ "syscall"
+)
+
+//extern getsystemcfg
+func gccgoGetsystemcfg(label uint32) (r uint64)
+
+func callgetsystemcfg(label int) (r1 uintptr, e1 syscall.Errno) {
+ r1 = uintptr(gccgoGetsystemcfg(uint32(label)))
+ e1 = syscall.GetErrno()
+ return
+}
diff --git a/vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go b/vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go
new file mode 100644
index 000000000..e07899b90
--- /dev/null
+++ b/vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go
@@ -0,0 +1,30 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package unsafeheader contains header declarations for the Go runtime's
+// slice and string implementations.
+//
+// This package allows x/sys to use types equivalent to
+// reflect.SliceHeader and reflect.StringHeader without introducing
+// a dependency on the (relatively heavy) "reflect" package.
+package unsafeheader
+
+import (
+ "unsafe"
+)
+
+// Slice is the runtime representation of a slice.
+// It cannot be used safely or portably and its representation may change in a later release.
+type Slice struct {
+ Data unsafe.Pointer
+ Len int
+ Cap int
+}
+
+// String is the runtime representation of a string.
+// It cannot be used safely or portably and its representation may change in a later release.
+type String struct {
+ Data unsafe.Pointer
+ Len int
+}
diff --git a/vendor/golang.org/x/sys/unix/README.md b/vendor/golang.org/x/sys/unix/README.md
index ab433ccfb..579d2d735 100644
--- a/vendor/golang.org/x/sys/unix/README.md
+++ b/vendor/golang.org/x/sys/unix/README.md
@@ -89,7 +89,7 @@ constants.
Adding new syscall numbers is mostly done by running the build on a sufficiently
new installation of the target OS (or updating the source checkouts for the
-new build system). However, depending on the OS, you make need to update the
+new build system). However, depending on the OS, you may need to update the
parsing in mksysnum.
### mksyscall.go
@@ -163,7 +163,7 @@ The merge is performed in the following steps:
## Generated files
-### `zerror_${GOOS}_${GOARCH}.go`
+### `zerrors_${GOOS}_${GOARCH}.go`
A file containing all of the system's generated error numbers, error strings,
signal numbers, and constants. Generated by `mkerrors.sh` (see above).
diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh
index 2979bc9ac..780e387e3 100644
--- a/vendor/golang.org/x/sys/unix/mkerrors.sh
+++ b/vendor/golang.org/x/sys/unix/mkerrors.sh
@@ -187,6 +187,7 @@ struct ltchars {
#include <sys/select.h>
#include <sys/signalfd.h>
#include <sys/socket.h>
+#include <sys/timerfd.h>
#include <sys/uio.h>
#include <sys/xattr.h>
#include <linux/bpf.h>
@@ -480,12 +481,13 @@ ccflags="$@"
$2 ~ /^(MS|MNT|UMOUNT)_/ ||
$2 ~ /^NS_GET_/ ||
$2 ~ /^TUN(SET|GET|ATTACH|DETACH)/ ||
- $2 ~ /^(O|F|[ES]?FD|NAME|S|PTRACE|PT)_/ ||
+ $2 ~ /^(O|F|[ES]?FD|NAME|S|PTRACE|PT|TFD)_/ ||
$2 ~ /^KEXEC_/ ||
$2 ~ /^LINUX_REBOOT_CMD_/ ||
$2 ~ /^LINUX_REBOOT_MAGIC[12]$/ ||
$2 ~ /^MODULE_INIT_/ ||
$2 !~ "NLA_TYPE_MASK" &&
+ $2 !~ /^RTC_VL_(ACCURACY|BACKUP|DATA)/ &&
$2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTC|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P|NETNSA)_/ ||
$2 ~ /^SIOC/ ||
$2 ~ /^TIOC/ ||
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go b/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go
index f911617be..dc0befee3 100644
--- a/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go
+++ b/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go
@@ -6,7 +6,11 @@
package unix
-import "unsafe"
+import (
+ "unsafe"
+
+ "golang.org/x/sys/internal/unsafeheader"
+)
//sys closedir(dir uintptr) (err error)
//sys readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno)
@@ -71,6 +75,7 @@ func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) {
cnt++
continue
}
+
reclen := int(entry.Reclen)
if reclen > len(buf) {
// Not enough room. Return for now.
@@ -79,13 +84,15 @@ func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) {
// restarting is O(n^2) in the length of the directory. Oh well.
break
}
+
// Copy entry into return buffer.
- s := struct {
- ptr unsafe.Pointer
- siz int
- cap int
- }{ptr: unsafe.Pointer(&entry), siz: reclen, cap: reclen}
- copy(buf, *(*[]byte)(unsafe.Pointer(&s)))
+ var s []byte
+ hdr := (*unsafeheader.Slice)(unsafe.Pointer(&s))
+ hdr.Data = unsafe.Pointer(&entry)
+ hdr.Cap = reclen
+ hdr.Len = reclen
+ copy(buf, s)
+
buf = buf[reclen:]
n += reclen
cnt++
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go
index 9a5a6ee54..0cf31acf0 100644
--- a/vendor/golang.org/x/sys/unix/syscall_darwin.go
+++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go
@@ -423,6 +423,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
//sysnb Getrlimit(which int, lim *Rlimit) (err error)
//sysnb Getrusage(who int, rusage *Rusage) (err error)
//sysnb Getsid(pid int) (sid int, err error)
+//sysnb Gettimeofday(tp *Timeval) (err error)
//sysnb Getuid() (uid int)
//sysnb Issetugid() (tainted bool)
//sys Kqueue() (fd int, err error)
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_386.go b/vendor/golang.org/x/sys/unix/syscall_darwin_386.go
index 707ba4f59..2724e3a51 100644
--- a/vendor/golang.org/x/sys/unix/syscall_darwin_386.go
+++ b/vendor/golang.org/x/sys/unix/syscall_darwin_386.go
@@ -20,17 +20,6 @@ func setTimeval(sec, usec int64) Timeval {
return Timeval{Sec: int32(sec), Usec: int32(usec)}
}
-//sysnb gettimeofday(tp *Timeval) (sec int32, usec int32, err error)
-func Gettimeofday(tv *Timeval) (err error) {
- // The tv passed to gettimeofday must be non-nil
- // but is otherwise unused. The answers come back
- // in the two registers.
- sec, usec, err := gettimeofday(tv)
- tv.Sec = int32(sec)
- tv.Usec = int32(usec)
- return err
-}
-
func SetKevent(k *Kevent_t, fd, mode, flags int) {
k.Ident = uint32(fd)
k.Filter = int16(mode)
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go
index fdbfb5911..ce2e0d249 100644
--- a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go
@@ -20,17 +20,6 @@ func setTimeval(sec, usec int64) Timeval {
return Timeval{Sec: sec, Usec: int32(usec)}
}
-//sysnb gettimeofday(tp *Timeval) (sec int64, usec int32, err error)
-func Gettimeofday(tv *Timeval) (err error) {
- // The tv passed to gettimeofday must be non-nil
- // but is otherwise unused. The answers come back
- // in the two registers.
- sec, usec, err := gettimeofday(tv)
- tv.Sec = sec
- tv.Usec = usec
- return err
-}
-
func SetKevent(k *Kevent_t, fd, mode, flags int) {
k.Ident = uint64(fd)
k.Filter = int16(mode)
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go
index f8bc4cfb1..fc17a3f23 100644
--- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go
+++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go
@@ -20,17 +20,6 @@ func setTimeval(sec, usec int64) Timeval {
return Timeval{Sec: int32(sec), Usec: int32(usec)}
}
-//sysnb gettimeofday(tp *Timeval) (sec int32, usec int32, err error)
-func Gettimeofday(tv *Timeval) (err error) {
- // The tv passed to gettimeofday must be non-nil
- // but is otherwise unused. The answers come back
- // in the two registers.
- sec, usec, err := gettimeofday(tv)
- tv.Sec = int32(sec)
- tv.Usec = int32(usec)
- return err
-}
-
func SetKevent(k *Kevent_t, fd, mode, flags int) {
k.Ident = uint32(fd)
k.Filter = int16(mode)
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go
index 5ede3ac31..1e91ddf32 100644
--- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go
@@ -22,17 +22,6 @@ func setTimeval(sec, usec int64) Timeval {
return Timeval{Sec: sec, Usec: int32(usec)}
}
-//sysnb gettimeofday(tp *Timeval) (sec int64, usec int32, err error)
-func Gettimeofday(tv *Timeval) (err error) {
- // The tv passed to gettimeofday must be non-nil
- // but is otherwise unused. The answers come back
- // in the two registers.
- sec, usec, err := gettimeofday(tv)
- tv.Sec = sec
- tv.Usec = usec
- return err
-}
-
func SetKevent(k *Kevent_t, fd, mode, flags int) {
k.Ident = uint64(fd)
k.Filter = int16(mode)
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go
index bbe1abbce..942a4bbf7 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux.go
@@ -1633,6 +1633,15 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
//sys CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error)
//sys DeleteModule(name string, flags int) (err error)
//sys Dup(oldfd int) (fd int, err error)
+
+func Dup2(oldfd, newfd int) error {
+ // Android O and newer blocks dup2; riscv and arm64 don't implement dup2.
+ if runtime.GOOS == "android" || runtime.GOARCH == "riscv64" || runtime.GOARCH == "arm64" {
+ return Dup3(oldfd, newfd, 0)
+ }
+ return dup2(oldfd, newfd)
+}
+
//sys Dup3(oldfd int, newfd int, flags int) (err error)
//sysnb EpollCreate1(flag int) (fd int, err error)
//sysnb EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error)
@@ -1757,6 +1766,9 @@ func Signalfd(fd int, sigmask *Sigset_t, flags int) (newfd int, err error) {
//sys Syncfs(fd int) (err error)
//sysnb Sysinfo(info *Sysinfo_t) (err error)
//sys Tee(rfd int, wfd int, len int, flags int) (n int64, err error)
+//sysnb TimerfdCreate(clockid int, flags int) (fd int, err error)
+//sysnb TimerfdGettime(fd int, currValue *ItimerSpec) (err error)
+//sysnb TimerfdSettime(fd int, flags int, newValue *ItimerSpec, oldValue *ItimerSpec) (err error)
//sysnb Tgkill(tgid int, tid int, sig syscall.Signal) (err error)
//sysnb Times(tms *Tms) (ticks uintptr, err error)
//sysnb Umask(mask int) (oldmask int)
@@ -2178,7 +2190,6 @@ func Klogset(typ int, arg int) (err error) {
// TimerGetoverrun
// TimerGettime
// TimerSettime
-// Timerfd
// Tkill (obsolete)
// Tuxcall
// Umount2
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_386.go
index a8374b67c..048d18e3c 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_386.go
@@ -49,7 +49,7 @@ func Pipe2(p []int, flags int) (err error) {
// 64-bit file system and 32-bit uid calls
// (386 default is 32-bit file system and 16-bit uid).
-//sys Dup2(oldfd int, newfd int) (err error)
+//sys dup2(oldfd int, newfd int) (err error)
//sysnb EpollCreate(size int) (fd int, err error)
//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error)
//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64_64
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go
index 8ed1d546f..72efe86ed 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go
@@ -6,7 +6,7 @@
package unix
-//sys Dup2(oldfd int, newfd int) (err error)
+//sys dup2(oldfd int, newfd int) (err error)
//sysnb EpollCreate(size int) (fd int, err error)
//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error)
//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go
index 99ae61373..e1913e2c9 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go
@@ -80,7 +80,7 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) {
// 64-bit file system and 32-bit uid calls
// (16-bit uid calls are not always supported in newer kernels)
-//sys Dup2(oldfd int, newfd int) (err error)
+//sys dup2(oldfd int, newfd int) (err error)
//sysnb EpollCreate(size int) (fd int, err error)
//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error)
//sys Fchown(fd int, uid int, gid int) (err error) = SYS_FCHOWN32
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go
index 807a0b20c..c6de6b913 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go
@@ -25,7 +25,7 @@ func EpollCreate(size int) (fd int, err error) {
//sysnb Getegid() (egid int)
//sysnb Geteuid() (euid int)
//sysnb Getgid() (gid int)
-//sysnb Getrlimit(resource int, rlim *Rlimit) (err error)
+//sysnb getrlimit(resource int, rlim *Rlimit) (err error)
//sysnb Getuid() (uid int)
//sys Listen(s int, n int) (err error)
//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
@@ -47,7 +47,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err
//sysnb Setregid(rgid int, egid int) (err error)
//sysnb Setresgid(rgid int, egid int, sgid int) (err error)
//sysnb Setresuid(ruid int, euid int, suid int) (err error)
-//sysnb Setrlimit(resource int, rlim *Rlimit) (err error)
+//sysnb setrlimit(resource int, rlim *Rlimit) (err error)
//sysnb Setreuid(ruid int, euid int) (err error)
//sys Shutdown(fd int, how int) (err error)
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)
@@ -168,6 +168,24 @@ func Pipe2(p []int, flags int) (err error) {
return
}
+// Getrlimit prefers the prlimit64 system call. See issue 38604.
+func Getrlimit(resource int, rlim *Rlimit) error {
+ err := prlimit(0, resource, nil, rlim)
+ if err != ENOSYS {
+ return err
+ }
+ return getrlimit(resource, rlim)
+}
+
+// Setrlimit prefers the prlimit64 system call. See issue 38604.
+func Setrlimit(resource int, rlim *Rlimit) error {
+ err := prlimit(0, resource, rlim, nil)
+ if err != ENOSYS {
+ return err
+ }
+ return setrlimit(resource, rlim)
+}
+
func (r *PtraceRegs) PC() uint64 { return r.Pc }
func (r *PtraceRegs) SetPC(pc uint64) { r.Pc = pc }
@@ -192,9 +210,9 @@ func InotifyInit() (fd int, err error) {
return InotifyInit1(0)
}
-func Dup2(oldfd int, newfd int) (err error) {
- return Dup3(oldfd, newfd, 0)
-}
+// dup2 exists because func Dup3 in syscall_linux.go references
+// it in an unreachable path. dup2 isn't available on arm64.
+func dup2(oldfd int, newfd int) error
func Pause() error {
_, err := ppoll(nil, 0, nil, nil)
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go
index af77e6e25..f0287476c 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go
@@ -7,7 +7,7 @@
package unix
-//sys Dup2(oldfd int, newfd int) (err error)
+//sys dup2(oldfd int, newfd int) (err error)
//sysnb EpollCreate(size int) (fd int, err error)
//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error)
//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go
index e286c6ba3..c11328111 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go
@@ -14,7 +14,7 @@ import (
func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
-//sys Dup2(oldfd int, newfd int) (err error)
+//sys dup2(oldfd int, newfd int) (err error)
//sysnb EpollCreate(size int) (fd int, err error)
//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error)
//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go
index ca0345aab..349374409 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go
@@ -7,7 +7,7 @@
package unix
-//sys Dup2(oldfd int, newfd int) (err error)
+//sys dup2(oldfd int, newfd int) (err error)
//sysnb EpollCreate(size int) (fd int, err error)
//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error)
//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go
index abdabbac3..b0b150556 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go
@@ -191,10 +191,6 @@ func InotifyInit() (fd int, err error) {
return InotifyInit1(0)
}
-func Dup2(oldfd int, newfd int) (err error) {
- return Dup3(oldfd, newfd, 0)
-}
-
func Pause() error {
_, err := ppoll(nil, 0, nil, nil)
return err
@@ -228,3 +224,7 @@ func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error
}
return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags)
}
+
+// dup2 exists because func Dup3 in syscall_linux.go references
+// it in an unreachable path. dup2 isn't available on arm64.
+func dup2(oldfd int, newfd int) error
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go
index 533e9305e..2363f7499 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go
@@ -10,7 +10,7 @@ import (
"unsafe"
)
-//sys Dup2(oldfd int, newfd int) (err error)
+//sys dup2(oldfd int, newfd int) (err error)
//sysnb EpollCreate(size int) (fd int, err error)
//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error)
//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go
index d890a227b..d389f1518 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go
@@ -8,7 +8,7 @@ package unix
//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error)
//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64
-//sys Dup2(oldfd int, newfd int) (err error)
+//sys dup2(oldfd int, newfd int) (err error)
//sys Fchown(fd int, uid int, gid int) (err error)
//sys Fstat(fd int, stat *Stat_t) (err error)
//sys Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) = SYS_FSTATAT64
diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go
index 8f710d014..400ba9fbc 100644
--- a/vendor/golang.org/x/sys/unix/syscall_unix.go
+++ b/vendor/golang.org/x/sys/unix/syscall_unix.go
@@ -12,6 +12,8 @@ import (
"sync"
"syscall"
"unsafe"
+
+ "golang.org/x/sys/internal/unsafeheader"
)
var (
@@ -113,15 +115,12 @@ func (m *mmapper) Mmap(fd int, offset int64, length int, prot int, flags int) (d
return nil, errno
}
- // Slice memory layout
- var sl = struct {
- addr uintptr
- len int
- cap int
- }{addr, length, length}
-
- // Use unsafe to turn sl into a []byte.
- b := *(*[]byte)(unsafe.Pointer(&sl))
+ // Use unsafe to convert addr into a []byte.
+ var b []byte
+ hdr := (*unsafeheader.Slice)(unsafe.Pointer(&b))
+ hdr.Data = unsafe.Pointer(addr)
+ hdr.Cap = length
+ hdr.Len = length
// Register mapping in m and return it.
p := &b[cap(b)-1]
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go
index 99a59d685..6e3cfec46 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go
@@ -216,6 +216,7 @@ const (
BPF_F_RDONLY = 0x8
BPF_F_RDONLY_PROG = 0x80
BPF_F_RECOMPUTE_CSUM = 0x1
+ BPF_F_REPLACE = 0x4
BPF_F_REUSE_STACKID = 0x400
BPF_F_SEQ_NUMBER = 0x8
BPF_F_SKIP_FIELD_MASK = 0xff
@@ -389,6 +390,7 @@ const (
CLONE_NEWNET = 0x40000000
CLONE_NEWNS = 0x20000
CLONE_NEWPID = 0x20000000
+ CLONE_NEWTIME = 0x80
CLONE_NEWUSER = 0x10000000
CLONE_NEWUTS = 0x4000000
CLONE_PARENT = 0x8000
@@ -737,6 +739,7 @@ const (
GENL_NAMSIZ = 0x10
GENL_START_ALLOC = 0x13
GENL_UNS_ADMIN_PERM = 0x10
+ GRND_INSECURE = 0x4
GRND_NONBLOCK = 0x1
GRND_RANDOM = 0x2
HDIO_DRIVE_CMD = 0x31f
@@ -1487,6 +1490,7 @@ const (
PR_GET_FPEMU = 0x9
PR_GET_FPEXC = 0xb
PR_GET_FP_MODE = 0x2e
+ PR_GET_IO_FLUSHER = 0x3a
PR_GET_KEEPCAPS = 0x7
PR_GET_NAME = 0x10
PR_GET_NO_NEW_PRIVS = 0x27
@@ -1522,6 +1526,7 @@ const (
PR_SET_FPEMU = 0xa
PR_SET_FPEXC = 0xc
PR_SET_FP_MODE = 0x2d
+ PR_SET_IO_FLUSHER = 0x39
PR_SET_KEEPCAPS = 0x8
PR_SET_MM = 0x23
PR_SET_MM_ARG_END = 0x9
@@ -1750,12 +1755,15 @@ const (
RTM_DELRULE = 0x21
RTM_DELTCLASS = 0x29
RTM_DELTFILTER = 0x2d
+ RTM_DELVLAN = 0x71
RTM_F_CLONED = 0x200
RTM_F_EQUALIZE = 0x400
RTM_F_FIB_MATCH = 0x2000
RTM_F_LOOKUP_TABLE = 0x1000
RTM_F_NOTIFY = 0x100
+ RTM_F_OFFLOAD = 0x4000
RTM_F_PREFIX = 0x800
+ RTM_F_TRAP = 0x8000
RTM_GETACTION = 0x32
RTM_GETADDR = 0x16
RTM_GETADDRLABEL = 0x4a
@@ -1777,7 +1785,8 @@ const (
RTM_GETSTATS = 0x5e
RTM_GETTCLASS = 0x2a
RTM_GETTFILTER = 0x2e
- RTM_MAX = 0x6f
+ RTM_GETVLAN = 0x72
+ RTM_MAX = 0x73
RTM_NEWACTION = 0x30
RTM_NEWADDR = 0x14
RTM_NEWADDRLABEL = 0x48
@@ -1792,6 +1801,7 @@ const (
RTM_NEWNETCONF = 0x50
RTM_NEWNEXTHOP = 0x68
RTM_NEWNSID = 0x58
+ RTM_NEWNVLAN = 0x70
RTM_NEWPREFIX = 0x34
RTM_NEWQDISC = 0x24
RTM_NEWROUTE = 0x18
@@ -1799,8 +1809,8 @@ const (
RTM_NEWSTATS = 0x5c
RTM_NEWTCLASS = 0x28
RTM_NEWTFILTER = 0x2c
- RTM_NR_FAMILIES = 0x18
- RTM_NR_MSGTYPES = 0x60
+ RTM_NR_FAMILIES = 0x19
+ RTM_NR_MSGTYPES = 0x64
RTM_SETDCB = 0x4f
RTM_SETLINK = 0x13
RTM_SETNEIGHTBL = 0x43
@@ -2090,7 +2100,7 @@ const (
TASKSTATS_GENL_NAME = "TASKSTATS"
TASKSTATS_GENL_VERSION = 0x1
TASKSTATS_TYPE_MAX = 0x6
- TASKSTATS_VERSION = 0x9
+ TASKSTATS_VERSION = 0xa
TCIFLUSH = 0x0
TCIOFF = 0x2
TCIOFLUSH = 0x2
@@ -2155,6 +2165,8 @@ const (
TCP_USER_TIMEOUT = 0x12
TCP_WINDOW_CLAMP = 0xa
TCP_ZEROCOPY_RECEIVE = 0x23
+ TFD_TIMER_ABSTIME = 0x1
+ TFD_TIMER_CANCEL_ON_SET = 0x2
TIMER_ABSTIME = 0x1
TIOCM_DTR = 0x2
TIOCM_LE = 0x1
@@ -2271,7 +2283,7 @@ const (
VMADDR_CID_ANY = 0xffffffff
VMADDR_CID_HOST = 0x2
VMADDR_CID_HYPERVISOR = 0x0
- VMADDR_CID_RESERVED = 0x1
+ VMADDR_CID_LOCAL = 0x1
VMADDR_PORT_ANY = 0xffffffff
VM_SOCKETS_INVALID_VERSION = 0xffffffff
VQUIT = 0x1
@@ -2398,6 +2410,7 @@ const (
XENFS_SUPER_MAGIC = 0xabba1974
XFS_SUPER_MAGIC = 0x58465342
Z3FOLD_MAGIC = 0x33
+ ZONEFS_MAGIC = 0x5a4f4653
ZSMALLOC_MAGIC = 0x58295829
)
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
index 028c9d878..5e974110d 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
@@ -342,6 +342,8 @@ const (
TCSETXF = 0x5434
TCSETXW = 0x5435
TCXONC = 0x540a
+ TFD_CLOEXEC = 0x80000
+ TFD_NONBLOCK = 0x800
TIOCCBRK = 0x5428
TIOCCONS = 0x541d
TIOCEXCL = 0x540c
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
index 005970f71..47a57fe46 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
@@ -343,6 +343,8 @@ const (
TCSETXF = 0x5434
TCSETXW = 0x5435
TCXONC = 0x540a
+ TFD_CLOEXEC = 0x80000
+ TFD_NONBLOCK = 0x800
TIOCCBRK = 0x5428
TIOCCONS = 0x541d
TIOCEXCL = 0x540c
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
index 0541f36ee..df2eea4bb 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
@@ -349,6 +349,8 @@ const (
TCSETXF = 0x5434
TCSETXW = 0x5435
TCXONC = 0x540a
+ TFD_CLOEXEC = 0x80000
+ TFD_NONBLOCK = 0x800
TIOCCBRK = 0x5428
TIOCCONS = 0x541d
TIOCEXCL = 0x540c
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
index 9ee8d1bc8..4e1214217 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
@@ -336,6 +336,8 @@ const (
TCSETXF = 0x5434
TCSETXW = 0x5435
TCXONC = 0x540a
+ TFD_CLOEXEC = 0x80000
+ TFD_NONBLOCK = 0x800
TIOCCBRK = 0x5428
TIOCCONS = 0x541d
TIOCEXCL = 0x540c
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
index 4826bd705..a23b08029 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
@@ -339,6 +339,8 @@ const (
TCSETSW = 0x540f
TCSETSW2 = 0x8030542c
TCXONC = 0x5406
+ TFD_CLOEXEC = 0x80000
+ TFD_NONBLOCK = 0x80
TIOCCBRK = 0x5428
TIOCCONS = 0x80047478
TIOCEXCL = 0x740d
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
index 2346dc554..a5a921e43 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
@@ -339,6 +339,8 @@ const (
TCSETSW = 0x540f
TCSETSW2 = 0x8030542c
TCXONC = 0x5406
+ TFD_CLOEXEC = 0x80000
+ TFD_NONBLOCK = 0x80
TIOCCBRK = 0x5428
TIOCCONS = 0x80047478
TIOCEXCL = 0x740d
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
index e758b61e3..d088e197b 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
@@ -339,6 +339,8 @@ const (
TCSETSW = 0x540f
TCSETSW2 = 0x8030542c
TCXONC = 0x5406
+ TFD_CLOEXEC = 0x80000
+ TFD_NONBLOCK = 0x80
TIOCCBRK = 0x5428
TIOCCONS = 0x80047478
TIOCEXCL = 0x740d
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
index 2dfe6bba1..0ddf9d5fe 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
@@ -339,6 +339,8 @@ const (
TCSETSW = 0x540f
TCSETSW2 = 0x8030542c
TCXONC = 0x5406
+ TFD_CLOEXEC = 0x80000
+ TFD_NONBLOCK = 0x80
TIOCCBRK = 0x5428
TIOCCONS = 0x80047478
TIOCEXCL = 0x740d
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
index 518586670..a93ffc180 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
@@ -393,6 +393,8 @@ const (
TCSETSF = 0x802c7416
TCSETSW = 0x802c7415
TCXONC = 0x2000741e
+ TFD_CLOEXEC = 0x80000
+ TFD_NONBLOCK = 0x800
TIOCCBRK = 0x5428
TIOCCONS = 0x541d
TIOCEXCL = 0x540c
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
index 4231b20b5..c1ea48b95 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
@@ -393,6 +393,8 @@ const (
TCSETSF = 0x802c7416
TCSETSW = 0x802c7415
TCXONC = 0x2000741e
+ TFD_CLOEXEC = 0x80000
+ TFD_NONBLOCK = 0x800
TIOCCBRK = 0x5428
TIOCCONS = 0x541d
TIOCEXCL = 0x540c
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
index 6a0b2d293..7def950ba 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
@@ -330,6 +330,8 @@ const (
TCSETXF = 0x5434
TCSETXW = 0x5435
TCXONC = 0x540a
+ TFD_CLOEXEC = 0x80000
+ TFD_NONBLOCK = 0x800
TIOCCBRK = 0x5428
TIOCCONS = 0x541d
TIOCEXCL = 0x540c
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
index 95e950fc8..d39293c87 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
@@ -403,6 +403,8 @@ const (
TCSETXF = 0x5434
TCSETXW = 0x5435
TCXONC = 0x540a
+ TFD_CLOEXEC = 0x80000
+ TFD_NONBLOCK = 0x800
TIOCCBRK = 0x5428
TIOCCONS = 0x541d
TIOCEXCL = 0x540c
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
index 079762fa9..3ff3ec681 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
@@ -392,6 +392,8 @@ const (
TCSETSW = 0x8024540a
TCSETSW2 = 0x802c540e
TCXONC = 0x20005406
+ TFD_CLOEXEC = 0x400000
+ TFD_NONBLOCK = 0x4000
TIOCCBRK = 0x2000747a
TIOCCONS = 0x20007424
TIOCEXCL = 0x2000740d
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go
index c1cc0a415..23e94d366 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go
@@ -966,6 +966,16 @@ func Getsid(pid int) (sid int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Gettimeofday(tp *Timeval) (err error) {
+ _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Getuid() (uid int) {
r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0)
uid = int(r0)
@@ -1709,18 +1719,6 @@ func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func gettimeofday(tp *Timeval) (sec int32, usec int32, err error) {
- r0, r1, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0)
- sec = int32(r0)
- usec = int32(r1)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func Fstat(fd int, stat *Stat_t) (err error) {
_, _, e1 := Syscall(SYS_FSTAT64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go
index a3fc49004..e2ffb3bed 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go
@@ -1376,6 +1376,21 @@ func libc_getsid_trampoline()
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Gettimeofday(tp *Timeval) (err error) {
+ _, _, e1 := syscall_rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_gettimeofday_trampoline()
+
+//go:linkname libc_gettimeofday libc_gettimeofday
+//go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Getuid() (uid int) {
r0, _, _ := syscall_rawSyscall(funcPC(libc_getuid_trampoline), 0, 0, 0)
uid = int(r0)
@@ -2357,23 +2372,6 @@ func libc_ptrace_trampoline()
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func gettimeofday(tp *Timeval) (sec int32, usec int32, err error) {
- r0, r1, e1 := syscall_rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0)
- sec = int32(r0)
- usec = int32(r1)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_gettimeofday_trampoline()
-
-//go:linkname libc_gettimeofday libc_gettimeofday
-//go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func Fstat(fd int, stat *Stat_t) (err error) {
_, _, e1 := syscall_syscall(funcPC(libc_fstat64_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go
index f8e5c37c5..102561730 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go
@@ -966,6 +966,16 @@ func Getsid(pid int) (sid int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Gettimeofday(tp *Timeval) (err error) {
+ _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Getuid() (uid int) {
r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0)
uid = int(r0)
@@ -1709,18 +1719,6 @@ func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func gettimeofday(tp *Timeval) (sec int64, usec int32, err error) {
- r0, r1, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0)
- sec = int64(r0)
- usec = int32(r1)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func Fstat(fd int, stat *Stat_t) (err error) {
_, _, e1 := Syscall(SYS_FSTAT64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
index 50d6437e6..c67e336e2 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
@@ -1376,6 +1376,21 @@ func libc_getsid_trampoline()
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Gettimeofday(tp *Timeval) (err error) {
+ _, _, e1 := syscall_rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_gettimeofday_trampoline()
+
+//go:linkname libc_gettimeofday libc_gettimeofday
+//go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Getuid() (uid int) {
r0, _, _ := syscall_rawSyscall(funcPC(libc_getuid_trampoline), 0, 0, 0)
uid = int(r0)
@@ -2357,23 +2372,6 @@ func libc_ptrace_trampoline()
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func gettimeofday(tp *Timeval) (sec int64, usec int32, err error) {
- r0, r1, e1 := syscall_rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0)
- sec = int64(r0)
- usec = int32(r1)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_gettimeofday_trampoline()
-
-//go:linkname libc_gettimeofday libc_gettimeofday
-//go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func Fstat(fd int, stat *Stat_t) (err error) {
_, _, e1 := syscall_syscall(funcPC(libc_fstat64_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go
index cea04e041..d34e6df2f 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go
@@ -966,6 +966,16 @@ func Getsid(pid int) (sid int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Gettimeofday(tp *Timeval) (err error) {
+ _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Getuid() (uid int) {
r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0)
uid = int(r0)
@@ -1682,18 +1692,6 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func gettimeofday(tp *Timeval) (sec int32, usec int32, err error) {
- r0, r1, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0)
- sec = int32(r0)
- usec = int32(r1)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func Fstat(fd int, stat *Stat_t) (err error) {
_, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go
index 63103950c..b759757a7 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go
@@ -1376,6 +1376,21 @@ func libc_getsid_trampoline()
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Gettimeofday(tp *Timeval) (err error) {
+ _, _, e1 := syscall_rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_gettimeofday_trampoline()
+
+//go:linkname libc_gettimeofday libc_gettimeofday
+//go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Getuid() (uid int) {
r0, _, _ := syscall_rawSyscall(funcPC(libc_getuid_trampoline), 0, 0, 0)
uid = int(r0)
@@ -2342,23 +2357,6 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func gettimeofday(tp *Timeval) (sec int32, usec int32, err error) {
- r0, r1, e1 := syscall_rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0)
- sec = int32(r0)
- usec = int32(r1)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_gettimeofday_trampoline()
-
-//go:linkname libc_gettimeofday libc_gettimeofday
-//go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func Fstat(fd int, stat *Stat_t) (err error) {
_, _, e1 := syscall_syscall(funcPC(libc_fstat_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_11.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_11.go
index 8c3bb3a25..8d39a09f7 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_11.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_11.go
@@ -966,6 +966,16 @@ func Getsid(pid int) (sid int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Gettimeofday(tp *Timeval) (err error) {
+ _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Getuid() (uid int) {
r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0)
uid = int(r0)
@@ -1682,18 +1692,6 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func gettimeofday(tp *Timeval) (sec int64, usec int32, err error) {
- r0, r1, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0)
- sec = int64(r0)
- usec = int32(r1)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func Fstat(fd int, stat *Stat_t) (err error) {
_, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go
index a8709f72d..b28861260 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go
@@ -1376,6 +1376,21 @@ func libc_getsid_trampoline()
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Gettimeofday(tp *Timeval) (err error) {
+ _, _, e1 := syscall_rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_gettimeofday_trampoline()
+
+//go:linkname libc_gettimeofday libc_gettimeofday
+//go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Getuid() (uid int) {
r0, _, _ := syscall_rawSyscall(funcPC(libc_getuid_trampoline), 0, 0, 0)
uid = int(r0)
@@ -2342,23 +2357,6 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func gettimeofday(tp *Timeval) (sec int64, usec int32, err error) {
- r0, r1, e1 := syscall_rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0)
- sec = int64(r0)
- usec = int32(r1)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_gettimeofday_trampoline()
-
-//go:linkname libc_gettimeofday libc_gettimeofday
-//go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func Fstat(fd int, stat *Stat_t) (err error) {
_, _, e1 := syscall_syscall(funcPC(libc_fstat_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go
index fd2dae8e5..df217825f 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go
@@ -1450,6 +1450,37 @@ func Sysinfo(info *Sysinfo_t) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func TimerfdCreate(clockid int, flags int) (fd int, err error) {
+ r0, _, e1 := RawSyscall(SYS_TIMERFD_CREATE, uintptr(clockid), uintptr(flags), 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func TimerfdGettime(fd int, currValue *ItimerSpec) (err error) {
+ _, _, e1 := RawSyscall(SYS_TIMERFD_GETTIME, uintptr(fd), uintptr(unsafe.Pointer(currValue)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func TimerfdSettime(fd int, flags int, newValue *ItimerSpec, oldValue *ItimerSpec) (err error) {
+ _, _, e1 := RawSyscall6(SYS_TIMERFD_SETTIME, uintptr(fd), uintptr(flags), uintptr(unsafe.Pointer(newValue)), uintptr(unsafe.Pointer(oldValue)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) {
_, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig))
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go
index ba63af7b0..19ebd3ff7 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go
@@ -55,7 +55,7 @@ func pipe(p *[2]_C_int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Dup2(oldfd int, newfd int) (err error) {
+func dup2(oldfd int, newfd int) (err error) {
_, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0)
if e1 != 0 {
err = errnoErr(e1)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go
index f64adef41..5c562182a 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go
@@ -45,7 +45,7 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Dup2(oldfd int, newfd int) (err error) {
+func dup2(oldfd int, newfd int) (err error) {
_, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0)
if e1 != 0 {
err = errnoErr(e1)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go
index ac19523e8..dc69d99c6 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go
@@ -234,7 +234,7 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Dup2(oldfd int, newfd int) (err error) {
+func dup2(oldfd int, newfd int) (err error) {
_, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0)
if e1 != 0 {
err = errnoErr(e1)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go
index f0d2890b1..1b897dee0 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go
@@ -151,7 +151,7 @@ func Getgid() (gid int) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Getrlimit(resource int, rlim *Rlimit) (err error) {
+func getrlimit(resource int, rlim *Rlimit) (err error) {
_, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0)
if e1 != 0 {
err = errnoErr(e1)
@@ -307,7 +307,7 @@ func Setresuid(ruid int, euid int, suid int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Setrlimit(resource int, rlim *Rlimit) (err error) {
+func setrlimit(resource int, rlim *Rlimit) (err error) {
_, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0)
if e1 != 0 {
err = errnoErr(e1)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go
index aecbbca75..49186843a 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go
@@ -45,7 +45,7 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Dup2(oldfd int, newfd int) (err error) {
+func dup2(oldfd int, newfd int) (err error) {
_, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0)
if e1 != 0 {
err = errnoErr(e1)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go
index 424fb7fb6..9171d3bd2 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go
@@ -45,7 +45,7 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Dup2(oldfd int, newfd int) (err error) {
+func dup2(oldfd int, newfd int) (err error) {
_, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0)
if e1 != 0 {
err = errnoErr(e1)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go
index 28c7239cf..82286f04f 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go
@@ -45,7 +45,7 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Dup2(oldfd int, newfd int) (err error) {
+func dup2(oldfd int, newfd int) (err error) {
_, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0)
if e1 != 0 {
err = errnoErr(e1)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go
index 84596b300..15920621c 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go
@@ -45,7 +45,7 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Dup2(oldfd int, newfd int) (err error) {
+func dup2(oldfd int, newfd int) (err error) {
_, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0)
if e1 != 0 {
err = errnoErr(e1)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go
index de022639d..73a42e2cc 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go
@@ -45,7 +45,7 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Dup2(oldfd int, newfd int) (err error) {
+func dup2(oldfd int, newfd int) (err error) {
_, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0)
if e1 != 0 {
err = errnoErr(e1)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go
index 888f21d37..6b8559536 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go
@@ -45,7 +45,7 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Dup2(oldfd int, newfd int) (err error) {
+func dup2(oldfd int, newfd int) (err error) {
_, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0)
if e1 != 0 {
err = errnoErr(e1)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go
index 9bc353f0c..d7032ab1e 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go
@@ -45,7 +45,7 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Dup2(oldfd int, newfd int) (err error) {
+func dup2(oldfd int, newfd int) (err error) {
_, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0)
if e1 != 0 {
err = errnoErr(e1)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go
index 854e816d6..bcbbdd906 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go
@@ -72,7 +72,7 @@ func Fadvise(fd int, offset int64, length int64, advice int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Dup2(oldfd int, newfd int) (err error) {
+func dup2(oldfd int, newfd int) (err error) {
_, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0)
if e1 != 0 {
err = errnoErr(e1)
diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go
index 37dcc74c2..102f1ab47 100644
--- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go
+++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go
@@ -1,4 +1,4 @@
-// mksysctl_openbsd.pl
+// go run mksysctl_openbsd.go
// Code generated by the command above; DO NOT EDIT.
// +build 386,openbsd
@@ -30,6 +30,7 @@ var sysctlMib = []mibentry{
{"hw.model", []_C_int{6, 2}},
{"hw.ncpu", []_C_int{6, 3}},
{"hw.ncpufound", []_C_int{6, 21}},
+ {"hw.ncpuonline", []_C_int{6, 25}},
{"hw.pagesize", []_C_int{6, 7}},
{"hw.physmem", []_C_int{6, 19}},
{"hw.product", []_C_int{6, 15}},
diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go
index fe6caa6eb..4866fced8 100644
--- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go
@@ -31,6 +31,7 @@ var sysctlMib = []mibentry{
{"hw.model", []_C_int{6, 2}},
{"hw.ncpu", []_C_int{6, 3}},
{"hw.ncpufound", []_C_int{6, 21}},
+ {"hw.ncpuonline", []_C_int{6, 25}},
{"hw.pagesize", []_C_int{6, 7}},
{"hw.perfpolicy", []_C_int{6, 23}},
{"hw.physmem", []_C_int{6, 19}},
diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go
index 6eb8c0b08..d3801eb24 100644
--- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go
+++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go
@@ -30,6 +30,7 @@ var sysctlMib = []mibentry{
{"hw.model", []_C_int{6, 2}},
{"hw.ncpu", []_C_int{6, 3}},
{"hw.ncpufound", []_C_int{6, 21}},
+ {"hw.ncpuonline", []_C_int{6, 25}},
{"hw.pagesize", []_C_int{6, 7}},
{"hw.physmem", []_C_int{6, 19}},
{"hw.product", []_C_int{6, 15}},
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
index 7aae554f2..54559a895 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
@@ -431,4 +431,6 @@ const (
SYS_FSPICK = 433
SYS_PIDFD_OPEN = 434
SYS_CLONE3 = 435
+ SYS_OPENAT2 = 437
+ SYS_PIDFD_GETFD = 438
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
index 7968439a9..054a741b7 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
@@ -353,4 +353,6 @@ const (
SYS_FSPICK = 433
SYS_PIDFD_OPEN = 434
SYS_CLONE3 = 435
+ SYS_OPENAT2 = 437
+ SYS_PIDFD_GETFD = 438
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
index 3c663c69d..307f2ba12 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
@@ -395,4 +395,6 @@ const (
SYS_FSPICK = 433
SYS_PIDFD_OPEN = 434
SYS_CLONE3 = 435
+ SYS_OPENAT2 = 437
+ SYS_PIDFD_GETFD = 438
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
index 1f3b4d150..e9404dd54 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
@@ -298,4 +298,6 @@ const (
SYS_FSPICK = 433
SYS_PIDFD_OPEN = 434
SYS_CLONE3 = 435
+ SYS_OPENAT2 = 437
+ SYS_PIDFD_GETFD = 438
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
index 00da3de90..68bb6d29b 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
@@ -416,4 +416,6 @@ const (
SYS_FSPICK = 4433
SYS_PIDFD_OPEN = 4434
SYS_CLONE3 = 4435
+ SYS_OPENAT2 = 4437
+ SYS_PIDFD_GETFD = 4438
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
index d404fbd4d..4e5251185 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
@@ -346,4 +346,6 @@ const (
SYS_FSPICK = 5433
SYS_PIDFD_OPEN = 5434
SYS_CLONE3 = 5435
+ SYS_OPENAT2 = 5437
+ SYS_PIDFD_GETFD = 5438
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
index bfbf242f3..4d9aa3003 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
@@ -346,4 +346,6 @@ const (
SYS_FSPICK = 5433
SYS_PIDFD_OPEN = 5434
SYS_CLONE3 = 5435
+ SYS_OPENAT2 = 5437
+ SYS_PIDFD_GETFD = 5438
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
index 3826f497a..64af0707d 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
@@ -416,4 +416,6 @@ const (
SYS_FSPICK = 4433
SYS_PIDFD_OPEN = 4434
SYS_CLONE3 = 4435
+ SYS_OPENAT2 = 4437
+ SYS_PIDFD_GETFD = 4438
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
index 52e3da649..cc3c067ba 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
@@ -395,4 +395,6 @@ const (
SYS_FSPICK = 433
SYS_PIDFD_OPEN = 434
SYS_CLONE3 = 435
+ SYS_OPENAT2 = 437
+ SYS_PIDFD_GETFD = 438
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
index 6141f90a8..4050ff983 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
@@ -395,4 +395,6 @@ const (
SYS_FSPICK = 433
SYS_PIDFD_OPEN = 434
SYS_CLONE3 = 435
+ SYS_OPENAT2 = 437
+ SYS_PIDFD_GETFD = 438
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
index 4f7261a88..529abb6a7 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
@@ -297,4 +297,6 @@ const (
SYS_FSPICK = 433
SYS_PIDFD_OPEN = 434
SYS_CLONE3 = 435
+ SYS_OPENAT2 = 437
+ SYS_PIDFD_GETFD = 438
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
index f47014ac0..276650010 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
@@ -360,4 +360,6 @@ const (
SYS_FSPICK = 433
SYS_PIDFD_OPEN = 434
SYS_CLONE3 = 435
+ SYS_OPENAT2 = 437
+ SYS_PIDFD_GETFD = 438
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go
index dd78abb0d..4dc82bb24 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go
@@ -374,4 +374,6 @@ const (
SYS_FSMOUNT = 432
SYS_FSPICK = 433
SYS_PIDFD_OPEN = 434
+ SYS_OPENAT2 = 437
+ SYS_PIDFD_GETFD = 438
)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go
index a8d0eac81..416f7767e 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go
@@ -18,6 +18,11 @@ type (
_C_long_long int64
)
+type ItimerSpec struct {
+ Interval Timespec
+ Value Timespec
+}
+
const (
TIME_OK = 0x0
TIME_INS = 0x1
@@ -114,7 +119,8 @@ type FscryptKeySpecifier struct {
type FscryptAddKeyArg struct {
Key_spec FscryptKeySpecifier
Raw_size uint32
- _ [9]uint32
+ Key_id uint32
+ _ [8]uint32
}
type FscryptRemoveKeyArg struct {
@@ -479,7 +485,7 @@ const (
IFLA_NEW_IFINDEX = 0x31
IFLA_MIN_MTU = 0x32
IFLA_MAX_MTU = 0x33
- IFLA_MAX = 0x35
+ IFLA_MAX = 0x36
IFLA_INFO_KIND = 0x1
IFLA_INFO_DATA = 0x2
IFLA_INFO_XSTATS = 0x3
@@ -2308,3 +2314,32 @@ type FsverityEnableArg struct {
Sig_ptr uint64
_ [11]uint64
}
+
+type Nhmsg struct {
+ Family uint8
+ Scope uint8
+ Protocol uint8
+ Resvd uint8
+ Flags uint32
+}
+
+type NexthopGrp struct {
+ Id uint32
+ Weight uint8
+ Resvd1 uint8
+ Resvd2 uint16
+}
+
+const (
+ NHA_UNSPEC = 0x0
+ NHA_ID = 0x1
+ NHA_GROUP = 0x2
+ NHA_GROUP_TYPE = 0x3
+ NHA_BLACKHOLE = 0x4
+ NHA_OIF = 0x5
+ NHA_GATEWAY = 0x6
+ NHA_ENCAP_TYPE = 0x7
+ NHA_ENCAP = 0x8
+ NHA_GROUPS = 0x9
+ NHA_MASTER = 0xa
+)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
index fc6b3fb5c..761b67c86 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
@@ -287,6 +287,7 @@ type Taskstats struct {
Freepages_delay_total uint64
Thrashing_count uint64
Thrashing_delay_total uint64
+ Ac_btime64 uint64
}
type cpuMask uint32
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
index 26c30b84d..201fb3482 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
@@ -298,6 +298,7 @@ type Taskstats struct {
Freepages_delay_total uint64
Thrashing_count uint64
Thrashing_delay_total uint64
+ Ac_btime64 uint64
}
type cpuMask uint64
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
index 814d42d54..8051b5610 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
@@ -276,6 +276,7 @@ type Taskstats struct {
Freepages_delay_total uint64
Thrashing_count uint64
Thrashing_delay_total uint64
+ Ac_btime64 uint64
}
type cpuMask uint32
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
index d9664c713..a936f2169 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
@@ -277,6 +277,7 @@ type Taskstats struct {
Freepages_delay_total uint64
Thrashing_count uint64
Thrashing_delay_total uint64
+ Ac_btime64 uint64
}
type cpuMask uint64
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
index 0d721454f..aaca03dd7 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
@@ -281,6 +281,7 @@ type Taskstats struct {
Freepages_delay_total uint64
Thrashing_count uint64
Thrashing_delay_total uint64
+ Ac_btime64 uint64
}
type cpuMask uint32
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
index ef697684d..2e7f3b8ca 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
@@ -280,6 +280,7 @@ type Taskstats struct {
Freepages_delay_total uint64
Thrashing_count uint64
Thrashing_delay_total uint64
+ Ac_btime64 uint64
}
type cpuMask uint64
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
index 485fda70b..16add5a25 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
@@ -280,6 +280,7 @@ type Taskstats struct {
Freepages_delay_total uint64
Thrashing_count uint64
Thrashing_delay_total uint64
+ Ac_btime64 uint64
}
type cpuMask uint64
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
index 569477eef..4ed2c8e54 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
@@ -281,6 +281,7 @@ type Taskstats struct {
Freepages_delay_total uint64
Thrashing_count uint64
Thrashing_delay_total uint64
+ Ac_btime64 uint64
}
type cpuMask uint32
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
index 602d8b4ee..741519099 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
@@ -287,6 +287,7 @@ type Taskstats struct {
Freepages_delay_total uint64
Thrashing_count uint64
Thrashing_delay_total uint64
+ Ac_btime64 uint64
}
type cpuMask uint64
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
index 6db9a7b73..046c2debd 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
@@ -287,6 +287,7 @@ type Taskstats struct {
Freepages_delay_total uint64
Thrashing_count uint64
Thrashing_delay_total uint64
+ Ac_btime64 uint64
}
type cpuMask uint64
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go
index 52b5348c2..0f2f61a6a 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go
@@ -305,6 +305,7 @@ type Taskstats struct {
Freepages_delay_total uint64
Thrashing_count uint64
Thrashing_delay_total uint64
+ Ac_btime64 uint64
}
type cpuMask uint64
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
index a111387b3..cca1b6be2 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
@@ -300,6 +300,7 @@ type Taskstats struct {
Freepages_delay_total uint64
Thrashing_count uint64
Thrashing_delay_total uint64
+ Ac_btime64 uint64
}
type cpuMask uint64
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go
index 8153af181..33a73bf18 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go
@@ -282,6 +282,7 @@ type Taskstats struct {
Freepages_delay_total uint64
Thrashing_count uint64
Thrashing_delay_total uint64
+ Ac_btime64 uint64
}
type cpuMask uint64
diff --git a/vendor/golang.org/x/sys/windows/dll_windows.go b/vendor/golang.org/x/sys/windows/dll_windows.go
index d77711341..82076fb74 100644
--- a/vendor/golang.org/x/sys/windows/dll_windows.go
+++ b/vendor/golang.org/x/sys/windows/dll_windows.go
@@ -104,6 +104,35 @@ func (d *DLL) MustFindProc(name string) *Proc {
return p
}
+// FindProcByOrdinal searches DLL d for procedure by ordinal and returns *Proc
+// if found. It returns an error if search fails.
+func (d *DLL) FindProcByOrdinal(ordinal uintptr) (proc *Proc, err error) {
+ a, e := GetProcAddressByOrdinal(d.Handle, ordinal)
+ name := "#" + itoa(int(ordinal))
+ if e != nil {
+ return nil, &DLLError{
+ Err: e,
+ ObjName: name,
+ Msg: "Failed to find " + name + " procedure in " + d.Name + ": " + e.Error(),
+ }
+ }
+ p := &Proc{
+ Dll: d,
+ Name: name,
+ addr: a,
+ }
+ return p, nil
+}
+
+// MustFindProcByOrdinal is like FindProcByOrdinal but panics if search fails.
+func (d *DLL) MustFindProcByOrdinal(ordinal uintptr) *Proc {
+ p, e := d.FindProcByOrdinal(ordinal)
+ if e != nil {
+ panic(e)
+ }
+ return p
+}
+
// Release unloads DLL d from memory.
func (d *DLL) Release() (err error) {
return FreeLibrary(d.Handle)
diff --git a/vendor/golang.org/x/sys/windows/env_windows.go b/vendor/golang.org/x/sys/windows/env_windows.go
index f482a9fab..92ac05ff4 100644
--- a/vendor/golang.org/x/sys/windows/env_windows.go
+++ b/vendor/golang.org/x/sys/windows/env_windows.go
@@ -8,7 +8,6 @@ package windows
import (
"syscall"
- "unicode/utf16"
"unsafe"
)
@@ -40,17 +39,11 @@ func (token Token) Environ(inheritExisting bool) (env []string, err error) {
defer DestroyEnvironmentBlock(block)
blockp := uintptr(unsafe.Pointer(block))
for {
- entry := (*[(1 << 30) - 1]uint16)(unsafe.Pointer(blockp))[:]
- for i, v := range entry {
- if v == 0 {
- entry = entry[:i]
- break
- }
- }
+ entry := UTF16PtrToString((*uint16)(unsafe.Pointer(blockp)))
if len(entry) == 0 {
break
}
- env = append(env, string(utf16.Decode(entry)))
+ env = append(env, entry)
blockp += 2 * (uintptr(len(entry)) + 1)
}
return env, nil
diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go
index 4b6eff186..9e3c44a85 100644
--- a/vendor/golang.org/x/sys/windows/security_windows.go
+++ b/vendor/golang.org/x/sys/windows/security_windows.go
@@ -7,6 +7,8 @@ package windows
import (
"syscall"
"unsafe"
+
+ "golang.org/x/sys/internal/unsafeheader"
)
const (
@@ -1229,7 +1231,7 @@ func (sd *SECURITY_DESCRIPTOR) String() string {
return ""
}
defer LocalFree(Handle(unsafe.Pointer(sddl)))
- return UTF16ToString((*[(1 << 30) - 1]uint16)(unsafe.Pointer(sddl))[:])
+ return UTF16PtrToString(sddl)
}
// ToAbsolute converts a self-relative security descriptor into an absolute one.
@@ -1307,9 +1309,17 @@ func (absoluteSD *SECURITY_DESCRIPTOR) ToSelfRelative() (selfRelativeSD *SECURIT
}
func (selfRelativeSD *SECURITY_DESCRIPTOR) copySelfRelativeSecurityDescriptor() *SECURITY_DESCRIPTOR {
- sdBytes := make([]byte, selfRelativeSD.Length())
- copy(sdBytes, (*[(1 << 31) - 1]byte)(unsafe.Pointer(selfRelativeSD))[:len(sdBytes)])
- return (*SECURITY_DESCRIPTOR)(unsafe.Pointer(&sdBytes[0]))
+ sdLen := (int)(selfRelativeSD.Length())
+
+ var src []byte
+ h := (*unsafeheader.Slice)(unsafe.Pointer(&src))
+ h.Data = unsafe.Pointer(selfRelativeSD)
+ h.Len = sdLen
+ h.Cap = sdLen
+
+ dst := make([]byte, sdLen)
+ copy(dst, src)
+ return (*SECURITY_DESCRIPTOR)(unsafe.Pointer(&dst[0]))
}
// SecurityDescriptorFromString converts an SDDL string describing a security descriptor into a
@@ -1391,6 +1401,6 @@ func ACLFromEntries(explicitEntries []EXPLICIT_ACCESS, mergedACL *ACL) (acl *ACL
}
defer LocalFree(Handle(unsafe.Pointer(winHeapACL)))
aclBytes := make([]byte, winHeapACL.aclSize)
- copy(aclBytes, (*[(1 << 31) - 1]byte)(unsafe.Pointer(winHeapACL))[:len(aclBytes)])
+ copy(aclBytes, (*[(1 << 31) - 1]byte)(unsafe.Pointer(winHeapACL))[:len(aclBytes):len(aclBytes)])
return (*ACL)(unsafe.Pointer(&aclBytes[0])), nil
}
diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go
index 053d664d0..12c0544cb 100644
--- a/vendor/golang.org/x/sys/windows/syscall_windows.go
+++ b/vendor/golang.org/x/sys/windows/syscall_windows.go
@@ -13,6 +13,8 @@ import (
"time"
"unicode/utf16"
"unsafe"
+
+ "golang.org/x/sys/internal/unsafeheader"
)
type Handle uintptr
@@ -117,6 +119,32 @@ func UTF16PtrFromString(s string) (*uint16, error) {
return &a[0], nil
}
+// UTF16PtrToString takes a pointer to a UTF-16 sequence and returns the corresponding UTF-8 encoded string.
+// If the pointer is nil, this returns the empty string. This assumes that the UTF-16 sequence is terminated
+// at a zero word; if the zero word is not present, the program may crash.
+func UTF16PtrToString(p *uint16) string {
+ if p == nil {
+ return ""
+ }
+ if *p == 0 {
+ return ""
+ }
+
+ // Find NUL terminator.
+ n := 0
+ for ptr := unsafe.Pointer(p); *(*uint16)(ptr) != 0; n++ {
+ ptr = unsafe.Pointer(uintptr(ptr) + unsafe.Sizeof(*p))
+ }
+
+ var s []uint16
+ h := (*unsafeheader.Slice)(unsafe.Pointer(&s))
+ h.Data = unsafe.Pointer(p)
+ h.Len = n
+ h.Cap = n
+
+ return string(utf16.Decode(s))
+}
+
func Getpagesize() int { return 4096 }
// NewCallback converts a Go function to a function pointer conforming to the stdcall calling convention.
@@ -1181,7 +1209,12 @@ type IPv6Mreq struct {
Interface uint32
}
-func GetsockoptInt(fd Handle, level, opt int) (int, error) { return -1, syscall.EWINDOWS }
+func GetsockoptInt(fd Handle, level, opt int) (int, error) {
+ v := int32(0)
+ l := int32(unsafe.Sizeof(v))
+ err := Getsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(&v)), &l)
+ return int(v), err
+}
func SetsockoptLinger(fd Handle, level, opt int, l *Linger) (err error) {
sys := sysLinger{Onoff: uint16(l.Onoff), Linger: uint16(l.Linger)}
@@ -1378,7 +1411,7 @@ func (t Token) KnownFolderPath(folderID *KNOWNFOLDERID, flags uint32) (string, e
return "", err
}
defer CoTaskMemFree(unsafe.Pointer(p))
- return UTF16ToString((*[(1 << 30) - 1]uint16)(unsafe.Pointer(p))[:]), nil
+ return UTF16PtrToString(p), nil
}
// RtlGetVersion returns the version of the underlying operating system, ignoring
diff --git a/vendor/google.golang.org/protobuf/AUTHORS b/vendor/google.golang.org/protobuf/AUTHORS
new file mode 100644
index 000000000..2b00ddba0
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at https://tip.golang.org/AUTHORS.
diff --git a/vendor/google.golang.org/protobuf/CONTRIBUTORS b/vendor/google.golang.org/protobuf/CONTRIBUTORS
new file mode 100644
index 000000000..1fbd3e976
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at https://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/google.golang.org/protobuf/LICENSE b/vendor/google.golang.org/protobuf/LICENSE
new file mode 100644
index 000000000..49ea0f928
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2018 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/google.golang.org/protobuf/PATENTS b/vendor/google.golang.org/protobuf/PATENTS
new file mode 100644
index 000000000..733099041
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go
new file mode 100644
index 000000000..77dbe1b5c
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go
@@ -0,0 +1,789 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package prototext
+
+import (
+ "fmt"
+ "strings"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/internal/encoding/messageset"
+ "google.golang.org/protobuf/internal/encoding/text"
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/internal/fieldnum"
+ "google.golang.org/protobuf/internal/flags"
+ "google.golang.org/protobuf/internal/pragma"
+ "google.golang.org/protobuf/internal/set"
+ "google.golang.org/protobuf/internal/strs"
+ "google.golang.org/protobuf/proto"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+)
+
+// Unmarshal reads the given []byte into the given proto.Message.
+func Unmarshal(b []byte, m proto.Message) error {
+ return UnmarshalOptions{}.Unmarshal(b, m)
+}
+
+// UnmarshalOptions is a configurable textproto format unmarshaler.
+type UnmarshalOptions struct {
+ pragma.NoUnkeyedLiterals
+
+ // AllowPartial accepts input for messages that will result in missing
+ // required fields. If AllowPartial is false (the default), Unmarshal will
+ // return error if there are any missing required fields.
+ AllowPartial bool
+
+ // DiscardUnknown specifies whether to ignore unknown fields when parsing.
+ // An unknown field is any field whose field name or field number does not
+ // resolve to any known or extension field in the message.
+ // By default, unmarshal rejects unknown fields as an error.
+ DiscardUnknown bool
+
+ // Resolver is used for looking up types when unmarshaling
+ // google.protobuf.Any messages or extension fields.
+ // If nil, this defaults to using protoregistry.GlobalTypes.
+ Resolver interface {
+ protoregistry.MessageTypeResolver
+ protoregistry.ExtensionTypeResolver
+ }
+}
+
+// Unmarshal reads the given []byte and populates the given proto.Message using options in
+// UnmarshalOptions object.
+func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error {
+ proto.Reset(m)
+
+ if o.Resolver == nil {
+ o.Resolver = protoregistry.GlobalTypes
+ }
+
+ dec := decoder{text.NewDecoder(b), o}
+ if err := dec.unmarshalMessage(m.ProtoReflect(), false); err != nil {
+ return err
+ }
+ if o.AllowPartial {
+ return nil
+ }
+ return proto.CheckInitialized(m)
+}
+
+type decoder struct {
+ *text.Decoder
+ opts UnmarshalOptions
+}
+
+// newError returns an error object with position info.
+func (d decoder) newError(pos int, f string, x ...interface{}) error {
+ line, column := d.Position(pos)
+ head := fmt.Sprintf("(line %d:%d): ", line, column)
+ return errors.New(head+f, x...)
+}
+
+// unexpectedTokenError returns a syntax error for the given unexpected token.
+func (d decoder) unexpectedTokenError(tok text.Token) error {
+ return d.syntaxError(tok.Pos(), "unexpected token: %s", tok.RawString())
+}
+
+// syntaxError returns a syntax error for given position.
+func (d decoder) syntaxError(pos int, f string, x ...interface{}) error {
+ line, column := d.Position(pos)
+ head := fmt.Sprintf("syntax error (line %d:%d): ", line, column)
+ return errors.New(head+f, x...)
+}
+
+// unmarshalMessage unmarshals into the given protoreflect.Message.
+func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error {
+ messageDesc := m.Descriptor()
+ if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) {
+ return errors.New("no support for proto1 MessageSets")
+ }
+
+ if messageDesc.FullName() == "google.protobuf.Any" {
+ return d.unmarshalAny(m, checkDelims)
+ }
+
+ if checkDelims {
+ tok, err := d.Read()
+ if err != nil {
+ return err
+ }
+
+ if tok.Kind() != text.MessageOpen {
+ return d.unexpectedTokenError(tok)
+ }
+ }
+
+ var seenNums set.Ints
+ var seenOneofs set.Ints
+ fieldDescs := messageDesc.Fields()
+
+ for {
+ // Read field name.
+ tok, err := d.Read()
+ if err != nil {
+ return err
+ }
+ switch typ := tok.Kind(); typ {
+ case text.Name:
+ // Continue below.
+ case text.EOF:
+ if checkDelims {
+ return text.ErrUnexpectedEOF
+ }
+ return nil
+ default:
+ if checkDelims && typ == text.MessageClose {
+ return nil
+ }
+ return d.unexpectedTokenError(tok)
+ }
+
+ // Resolve the field descriptor.
+ var name pref.Name
+ var fd pref.FieldDescriptor
+ var xt pref.ExtensionType
+ var xtErr error
+ var isFieldNumberName bool
+
+ switch tok.NameKind() {
+ case text.IdentName:
+ name = pref.Name(tok.IdentName())
+ fd = fieldDescs.ByName(name)
+ if fd == nil {
+ // The proto name of a group field is in all lowercase,
+ // while the textproto field name is the group message name.
+ gd := fieldDescs.ByName(pref.Name(strings.ToLower(string(name))))
+ if gd != nil && gd.Kind() == pref.GroupKind && gd.Message().Name() == name {
+ fd = gd
+ }
+ } else if fd.Kind() == pref.GroupKind && fd.Message().Name() != name {
+ fd = nil // reset since field name is actually the message name
+ }
+
+ case text.TypeName:
+ // Handle extensions only. This code path is not for Any.
+ xt, xtErr = d.findExtension(pref.FullName(tok.TypeName()))
+
+ case text.FieldNumber:
+ isFieldNumberName = true
+ num := pref.FieldNumber(tok.FieldNumber())
+ if !num.IsValid() {
+ return d.newError(tok.Pos(), "invalid field number: %d", num)
+ }
+ fd = fieldDescs.ByNumber(num)
+ if fd == nil {
+ xt, xtErr = d.opts.Resolver.FindExtensionByNumber(messageDesc.FullName(), num)
+ }
+ }
+
+ if xt != nil {
+ fd = xt.TypeDescriptor()
+ if !messageDesc.ExtensionRanges().Has(fd.Number()) || fd.ContainingMessage().FullName() != messageDesc.FullName() {
+ return d.newError(tok.Pos(), "message %v cannot be extended by %v", messageDesc.FullName(), fd.FullName())
+ }
+ } else if xtErr != nil && xtErr != protoregistry.NotFound {
+ return d.newError(tok.Pos(), "unable to resolve [%s]: %v", tok.RawString(), xtErr)
+ }
+ if flags.ProtoLegacy {
+ if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() {
+ fd = nil // reset since the weak reference is not linked in
+ }
+ }
+
+ // Handle unknown fields.
+ if fd == nil {
+ if d.opts.DiscardUnknown || messageDesc.ReservedNames().Has(name) {
+ d.skipValue()
+ continue
+ }
+ return d.newError(tok.Pos(), "unknown field: %v", tok.RawString())
+ }
+
+ // Handle fields identified by field number.
+ if isFieldNumberName {
+ // TODO: Add an option to permit parsing field numbers.
+ //
+ // This requires careful thought as the MarshalOptions.EmitUnknown
+ // option allows formatting unknown fields as the field number and the
+ // best-effort textual representation of the field value. In that case,
+ // it may not be possible to unmarshal the value from a parser that does
+ // have information about the unknown field.
+ return d.newError(tok.Pos(), "cannot specify field by number: %v", tok.RawString())
+ }
+
+ switch {
+ case fd.IsList():
+ kind := fd.Kind()
+ if kind != pref.MessageKind && kind != pref.GroupKind && !tok.HasSeparator() {
+ return d.syntaxError(tok.Pos(), "missing field separator :")
+ }
+
+ list := m.Mutable(fd).List()
+ if err := d.unmarshalList(fd, list); err != nil {
+ return err
+ }
+
+ case fd.IsMap():
+ mmap := m.Mutable(fd).Map()
+ if err := d.unmarshalMap(fd, mmap); err != nil {
+ return err
+ }
+
+ default:
+ kind := fd.Kind()
+ if kind != pref.MessageKind && kind != pref.GroupKind && !tok.HasSeparator() {
+ return d.syntaxError(tok.Pos(), "missing field separator :")
+ }
+
+ // If field is a oneof, check if it has already been set.
+ if od := fd.ContainingOneof(); od != nil {
+ idx := uint64(od.Index())
+ if seenOneofs.Has(idx) {
+ return d.newError(tok.Pos(), "error parsing %q, oneof %v is already set", tok.RawString(), od.FullName())
+ }
+ seenOneofs.Set(idx)
+ }
+
+ num := uint64(fd.Number())
+ if seenNums.Has(num) {
+ return d.newError(tok.Pos(), "non-repeated field %q is repeated", tok.RawString())
+ }
+
+ if err := d.unmarshalSingular(fd, m); err != nil {
+ return err
+ }
+ seenNums.Set(num)
+ }
+ }
+
+ return nil
+}
+
+// findExtension returns protoreflect.ExtensionType from the Resolver if found.
+func (d decoder) findExtension(xtName pref.FullName) (pref.ExtensionType, error) {
+ xt, err := d.opts.Resolver.FindExtensionByName(xtName)
+ if err == nil {
+ return xt, nil
+ }
+ return messageset.FindMessageSetExtension(d.opts.Resolver, xtName)
+}
+
+// unmarshalSingular unmarshals a non-repeated field value specified by the
+// given FieldDescriptor.
+func (d decoder) unmarshalSingular(fd pref.FieldDescriptor, m pref.Message) error {
+ var val pref.Value
+ var err error
+ switch fd.Kind() {
+ case pref.MessageKind, pref.GroupKind:
+ val = m.NewField(fd)
+ err = d.unmarshalMessage(val.Message(), true)
+ default:
+ val, err = d.unmarshalScalar(fd)
+ }
+ if err == nil {
+ m.Set(fd, val)
+ }
+ return err
+}
+
+// unmarshalScalar unmarshals a scalar/enum protoreflect.Value specified by the
+// given FieldDescriptor.
+func (d decoder) unmarshalScalar(fd pref.FieldDescriptor) (pref.Value, error) {
+ tok, err := d.Read()
+ if err != nil {
+ return pref.Value{}, err
+ }
+
+ if tok.Kind() != text.Scalar {
+ return pref.Value{}, d.unexpectedTokenError(tok)
+ }
+
+ kind := fd.Kind()
+ switch kind {
+ case pref.BoolKind:
+ if b, ok := tok.Bool(); ok {
+ return pref.ValueOfBool(b), nil
+ }
+
+ case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind:
+ if n, ok := tok.Int32(); ok {
+ return pref.ValueOfInt32(n), nil
+ }
+
+ case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind:
+ if n, ok := tok.Int64(); ok {
+ return pref.ValueOfInt64(n), nil
+ }
+
+ case pref.Uint32Kind, pref.Fixed32Kind:
+ if n, ok := tok.Uint32(); ok {
+ return pref.ValueOfUint32(n), nil
+ }
+
+ case pref.Uint64Kind, pref.Fixed64Kind:
+ if n, ok := tok.Uint64(); ok {
+ return pref.ValueOfUint64(n), nil
+ }
+
+ case pref.FloatKind:
+ if n, ok := tok.Float32(); ok {
+ return pref.ValueOfFloat32(n), nil
+ }
+
+ case pref.DoubleKind:
+ if n, ok := tok.Float64(); ok {
+ return pref.ValueOfFloat64(n), nil
+ }
+
+ case pref.StringKind:
+ if s, ok := tok.String(); ok {
+ if strs.EnforceUTF8(fd) && !utf8.ValidString(s) {
+ return pref.Value{}, d.newError(tok.Pos(), "contains invalid UTF-8")
+ }
+ return pref.ValueOfString(s), nil
+ }
+
+ case pref.BytesKind:
+ if b, ok := tok.String(); ok {
+ return pref.ValueOfBytes([]byte(b)), nil
+ }
+
+ case pref.EnumKind:
+ if lit, ok := tok.Enum(); ok {
+ // Lookup EnumNumber based on name.
+ if enumVal := fd.Enum().Values().ByName(pref.Name(lit)); enumVal != nil {
+ return pref.ValueOfEnum(enumVal.Number()), nil
+ }
+ }
+ if num, ok := tok.Int32(); ok {
+ return pref.ValueOfEnum(pref.EnumNumber(num)), nil
+ }
+
+ default:
+ panic(fmt.Sprintf("invalid scalar kind %v", kind))
+ }
+
+ return pref.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString())
+}
+
+// unmarshalList unmarshals into given protoreflect.List. A list value can
+// either be in [] syntax or simply just a single scalar/message value.
+func (d decoder) unmarshalList(fd pref.FieldDescriptor, list pref.List) error {
+ tok, err := d.Peek()
+ if err != nil {
+ return err
+ }
+
+ switch fd.Kind() {
+ case pref.MessageKind, pref.GroupKind:
+ switch tok.Kind() {
+ case text.ListOpen:
+ d.Read()
+ for {
+ tok, err := d.Peek()
+ if err != nil {
+ return err
+ }
+
+ switch tok.Kind() {
+ case text.ListClose:
+ d.Read()
+ return nil
+ case text.MessageOpen:
+ pval := list.NewElement()
+ if err := d.unmarshalMessage(pval.Message(), true); err != nil {
+ return err
+ }
+ list.Append(pval)
+ default:
+ return d.unexpectedTokenError(tok)
+ }
+ }
+
+ case text.MessageOpen:
+ pval := list.NewElement()
+ if err := d.unmarshalMessage(pval.Message(), true); err != nil {
+ return err
+ }
+ list.Append(pval)
+ return nil
+ }
+
+ default:
+ switch tok.Kind() {
+ case text.ListOpen:
+ d.Read()
+ for {
+ tok, err := d.Peek()
+ if err != nil {
+ return err
+ }
+
+ switch tok.Kind() {
+ case text.ListClose:
+ d.Read()
+ return nil
+ case text.Scalar:
+ pval, err := d.unmarshalScalar(fd)
+ if err != nil {
+ return err
+ }
+ list.Append(pval)
+ default:
+ return d.unexpectedTokenError(tok)
+ }
+ }
+
+ case text.Scalar:
+ pval, err := d.unmarshalScalar(fd)
+ if err != nil {
+ return err
+ }
+ list.Append(pval)
+ return nil
+ }
+ }
+
+ return d.unexpectedTokenError(tok)
+}
+
+// unmarshalMap unmarshals into given protoreflect.Map. A map value is a
+// textproto message containing {key: <kvalue>, value: <mvalue>}.
+func (d decoder) unmarshalMap(fd pref.FieldDescriptor, mmap pref.Map) error {
+ // Determine ahead whether map entry is a scalar type or a message type in
+ // order to call the appropriate unmarshalMapValue func inside
+ // unmarshalMapEntry.
+ var unmarshalMapValue func() (pref.Value, error)
+ switch fd.MapValue().Kind() {
+ case pref.MessageKind, pref.GroupKind:
+ unmarshalMapValue = func() (pref.Value, error) {
+ pval := mmap.NewValue()
+ if err := d.unmarshalMessage(pval.Message(), true); err != nil {
+ return pref.Value{}, err
+ }
+ return pval, nil
+ }
+ default:
+ unmarshalMapValue = func() (pref.Value, error) {
+ return d.unmarshalScalar(fd.MapValue())
+ }
+ }
+
+ tok, err := d.Read()
+ if err != nil {
+ return err
+ }
+ switch tok.Kind() {
+ case text.MessageOpen:
+ return d.unmarshalMapEntry(fd, mmap, unmarshalMapValue)
+
+ case text.ListOpen:
+ for {
+ tok, err := d.Read()
+ if err != nil {
+ return err
+ }
+ switch tok.Kind() {
+ case text.ListClose:
+ return nil
+ case text.MessageOpen:
+ if err := d.unmarshalMapEntry(fd, mmap, unmarshalMapValue); err != nil {
+ return err
+ }
+ default:
+ return d.unexpectedTokenError(tok)
+ }
+ }
+
+ default:
+ return d.unexpectedTokenError(tok)
+ }
+}
+
+// unmarshalMap unmarshals into given protoreflect.Map. A map value is a
+// textproto message containing {key: <kvalue>, value: <mvalue>}.
+func (d decoder) unmarshalMapEntry(fd pref.FieldDescriptor, mmap pref.Map, unmarshalMapValue func() (pref.Value, error)) error {
+ var key pref.MapKey
+ var pval pref.Value
+Loop:
+ for {
+ // Read field name.
+ tok, err := d.Read()
+ if err != nil {
+ return err
+ }
+ switch tok.Kind() {
+ case text.Name:
+ if tok.NameKind() != text.IdentName {
+ if !d.opts.DiscardUnknown {
+ return d.newError(tok.Pos(), "unknown map entry field %q", tok.RawString())
+ }
+ d.skipValue()
+ continue Loop
+ }
+ // Continue below.
+ case text.MessageClose:
+ break Loop
+ default:
+ return d.unexpectedTokenError(tok)
+ }
+
+ name := tok.IdentName()
+ switch name {
+ case "key":
+ if !tok.HasSeparator() {
+ return d.syntaxError(tok.Pos(), "missing field separator :")
+ }
+ if key.IsValid() {
+ return d.newError(tok.Pos(), `map entry "key" cannot be repeated`)
+ }
+ val, err := d.unmarshalScalar(fd.MapKey())
+ if err != nil {
+ return err
+ }
+ key = val.MapKey()
+
+ case "value":
+ if kind := fd.MapValue().Kind(); (kind != pref.MessageKind) && (kind != pref.GroupKind) {
+ if !tok.HasSeparator() {
+ return d.syntaxError(tok.Pos(), "missing field separator :")
+ }
+ }
+ if pval.IsValid() {
+ return d.newError(tok.Pos(), `map entry "value" cannot be repeated`)
+ }
+ pval, err = unmarshalMapValue()
+ if err != nil {
+ return err
+ }
+
+ default:
+ if !d.opts.DiscardUnknown {
+ return d.newError(tok.Pos(), "unknown map entry field %q", name)
+ }
+ d.skipValue()
+ }
+ }
+
+ if !key.IsValid() {
+ key = fd.MapKey().Default().MapKey()
+ }
+ if !pval.IsValid() {
+ switch fd.MapValue().Kind() {
+ case pref.MessageKind, pref.GroupKind:
+ // If value field is not set for message/group types, construct an
+ // empty one as default.
+ pval = mmap.NewValue()
+ default:
+ pval = fd.MapValue().Default()
+ }
+ }
+ mmap.Set(key, pval)
+ return nil
+}
+
+// unmarshalAny unmarshals an Any textproto. It can either be in expanded form
+// or non-expanded form.
+func (d decoder) unmarshalAny(m pref.Message, checkDelims bool) error {
+ var typeURL string
+ var bValue []byte
+
+ // hasFields tracks which valid fields have been seen in the loop below in
+ // order to flag an error if there are duplicates or conflicts. It may
+ // contain the strings "type_url", "value" and "expanded". The literal
+ // "expanded" is used to indicate that the expanded form has been
+ // encountered already.
+ hasFields := map[string]bool{}
+
+ if checkDelims {
+ tok, err := d.Read()
+ if err != nil {
+ return err
+ }
+
+ if tok.Kind() != text.MessageOpen {
+ return d.unexpectedTokenError(tok)
+ }
+ }
+
+Loop:
+ for {
+ // Read field name. Can only have 3 possible field names, i.e. type_url,
+ // value and type URL name inside [].
+ tok, err := d.Read()
+ if err != nil {
+ return err
+ }
+ if typ := tok.Kind(); typ != text.Name {
+ if checkDelims {
+ if typ == text.MessageClose {
+ break Loop
+ }
+ } else if typ == text.EOF {
+ break Loop
+ }
+ return d.unexpectedTokenError(tok)
+ }
+
+ switch tok.NameKind() {
+ case text.IdentName:
+ // Both type_url and value fields require field separator :.
+ if !tok.HasSeparator() {
+ return d.syntaxError(tok.Pos(), "missing field separator :")
+ }
+
+ switch tok.IdentName() {
+ case "type_url":
+ if hasFields["type_url"] {
+ return d.newError(tok.Pos(), "duplicate Any type_url field")
+ }
+ if hasFields["expanded"] {
+ return d.newError(tok.Pos(), "conflict with [%s] field", typeURL)
+ }
+ tok, err := d.Read()
+ if err != nil {
+ return err
+ }
+ var ok bool
+ typeURL, ok = tok.String()
+ if !ok {
+ return d.newError(tok.Pos(), "invalid Any type_url: %v", tok.RawString())
+ }
+ hasFields["type_url"] = true
+
+ case "value":
+ if hasFields["value"] {
+ return d.newError(tok.Pos(), "duplicate Any value field")
+ }
+ if hasFields["expanded"] {
+ return d.newError(tok.Pos(), "conflict with [%s] field", typeURL)
+ }
+ tok, err := d.Read()
+ if err != nil {
+ return err
+ }
+ s, ok := tok.String()
+ if !ok {
+ return d.newError(tok.Pos(), "invalid Any value: %v", tok.RawString())
+ }
+ bValue = []byte(s)
+ hasFields["value"] = true
+
+ default:
+ if !d.opts.DiscardUnknown {
+ return d.newError(tok.Pos(), "invalid field name %q in google.protobuf.Any message", tok.RawString())
+ }
+ }
+
+ case text.TypeName:
+ if hasFields["expanded"] {
+ return d.newError(tok.Pos(), "cannot have more than one type")
+ }
+ if hasFields["type_url"] {
+ return d.newError(tok.Pos(), "conflict with type_url field")
+ }
+ typeURL = tok.TypeName()
+ var err error
+ bValue, err = d.unmarshalExpandedAny(typeURL, tok.Pos())
+ if err != nil {
+ return err
+ }
+ hasFields["expanded"] = true
+
+ default:
+ if !d.opts.DiscardUnknown {
+ return d.newError(tok.Pos(), "invalid field name %q in google.protobuf.Any message", tok.RawString())
+ }
+ }
+ }
+
+ fds := m.Descriptor().Fields()
+ if len(typeURL) > 0 {
+ m.Set(fds.ByNumber(fieldnum.Any_TypeUrl), pref.ValueOfString(typeURL))
+ }
+ if len(bValue) > 0 {
+ m.Set(fds.ByNumber(fieldnum.Any_Value), pref.ValueOfBytes(bValue))
+ }
+ return nil
+}
+
+func (d decoder) unmarshalExpandedAny(typeURL string, pos int) ([]byte, error) {
+ mt, err := d.opts.Resolver.FindMessageByURL(typeURL)
+ if err != nil {
+ return nil, d.newError(pos, "unable to resolve message [%v]: %v", typeURL, err)
+ }
+ // Create new message for the embedded message type and unmarshal the value
+ // field into it.
+ m := mt.New()
+ if err := d.unmarshalMessage(m, true); err != nil {
+ return nil, err
+ }
+ // Serialize the embedded message and return the resulting bytes.
+ b, err := proto.MarshalOptions{
+ AllowPartial: true, // Never check required fields inside an Any.
+ Deterministic: true,
+ }.Marshal(m.Interface())
+ if err != nil {
+ return nil, d.newError(pos, "error in marshaling message into Any.value: %v", err)
+ }
+ return b, nil
+}
+
+// skipValue makes the decoder parse a field value in order to advance the read
+// to the next field. It relies on Read returning an error if the types are not
+// in valid sequence.
+func (d decoder) skipValue() error {
+ tok, err := d.Read()
+ if err != nil {
+ return err
+ }
+ // Only need to continue reading for messages and lists.
+ switch tok.Kind() {
+ case text.MessageOpen:
+ return d.skipMessageValue()
+
+ case text.ListOpen:
+ for {
+ tok, err := d.Read()
+ if err != nil {
+ return err
+ }
+ switch tok.Kind() {
+ case text.ListClose:
+ return nil
+ case text.MessageOpen:
+ return d.skipMessageValue()
+ default:
+ // Skip items. This will not validate whether skipped values are
+ // of the same type or not, same behavior as C++
+ // TextFormat::Parser::AllowUnknownField(true) version 3.8.0.
+ if err := d.skipValue(); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+// skipMessageValue makes the decoder parse and skip over all fields in a
+// message. It assumes that the previous read type is MessageOpen.
+func (d decoder) skipMessageValue() error {
+ for {
+ tok, err := d.Read()
+ if err != nil {
+ return err
+ }
+ switch tok.Kind() {
+ case text.MessageClose:
+ return nil
+ case text.Name:
+ if err := d.skipValue(); err != nil {
+ return err
+ }
+ }
+ }
+}
diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/doc.go b/vendor/google.golang.org/protobuf/encoding/prototext/doc.go
new file mode 100644
index 000000000..162b4f98a
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/encoding/prototext/doc.go
@@ -0,0 +1,7 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package prototext marshals and unmarshals protocol buffer messages as the
+// textproto format.
+package prototext
diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go
new file mode 100644
index 000000000..dece22973
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go
@@ -0,0 +1,426 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package prototext
+
+import (
+ "fmt"
+ "sort"
+ "strconv"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/encoding/messageset"
+ "google.golang.org/protobuf/internal/encoding/text"
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/internal/fieldnum"
+ "google.golang.org/protobuf/internal/flags"
+ "google.golang.org/protobuf/internal/mapsort"
+ "google.golang.org/protobuf/internal/pragma"
+ "google.golang.org/protobuf/internal/strs"
+ "google.golang.org/protobuf/proto"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+)
+
+const defaultIndent = " "
+
+// Format formats the message as a multiline string.
+// This function is only intended for human consumption and ignores errors.
+// Do not depend on the output being stable. It may change over time across
+// different versions of the program.
+func Format(m proto.Message) string {
+ return MarshalOptions{Multiline: true}.Format(m)
+}
+
+// Marshal writes the given proto.Message in textproto format using default
+// options. Do not depend on the output being stable. It may change over time
+// across different versions of the program.
+func Marshal(m proto.Message) ([]byte, error) {
+ return MarshalOptions{}.Marshal(m)
+}
+
+// MarshalOptions is a configurable text format marshaler.
+type MarshalOptions struct {
+ pragma.NoUnkeyedLiterals
+
+ // Multiline specifies whether the marshaler should format the output in
+ // indented-form with every textual element on a new line.
+ // If Indent is an empty string, then an arbitrary indent is chosen.
+ Multiline bool
+
+ // Indent specifies the set of indentation characters to use in a multiline
+ // formatted output such that every entry is preceded by Indent and
+ // terminated by a newline. If non-empty, then Multiline is treated as true.
+ // Indent can only be composed of space or tab characters.
+ Indent string
+
+ // EmitASCII specifies whether to format strings and bytes as ASCII only
+ // as opposed to using UTF-8 encoding when possible.
+ EmitASCII bool
+
+ // allowInvalidUTF8 specifies whether to permit the encoding of strings
+ // with invalid UTF-8. This is unexported as it is intended to only
+ // be specified by the Format method.
+ allowInvalidUTF8 bool
+
+ // AllowPartial allows messages that have missing required fields to marshal
+ // without returning an error. If AllowPartial is false (the default),
+ // Marshal will return error if there are any missing required fields.
+ AllowPartial bool
+
+ // EmitUnknown specifies whether to emit unknown fields in the output.
+ // If specified, the unmarshaler may be unable to parse the output.
+ // The default is to exclude unknown fields.
+ EmitUnknown bool
+
+ // Resolver is used for looking up types when expanding google.protobuf.Any
+ // messages. If nil, this defaults to using protoregistry.GlobalTypes.
+ Resolver interface {
+ protoregistry.ExtensionTypeResolver
+ protoregistry.MessageTypeResolver
+ }
+}
+
+// Format formats the message as a string.
+// This method is only intended for human consumption and ignores errors.
+// Do not depend on the output being stable. It may change over time across
+// different versions of the program.
+func (o MarshalOptions) Format(m proto.Message) string {
+ if m == nil || !m.ProtoReflect().IsValid() {
+ return "<nil>" // invalid syntax, but okay since this is for debugging
+ }
+ o.allowInvalidUTF8 = true
+ o.AllowPartial = true
+ o.EmitUnknown = true
+ b, _ := o.Marshal(m)
+ return string(b)
+}
+
+// Marshal writes the given proto.Message in textproto format using options in
+// MarshalOptions object. Do not depend on the output being stable. It may
+// change over time across different versions of the program.
+func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) {
+ var delims = [2]byte{'{', '}'}
+
+ if o.Multiline && o.Indent == "" {
+ o.Indent = defaultIndent
+ }
+ if o.Resolver == nil {
+ o.Resolver = protoregistry.GlobalTypes
+ }
+
+ internalEnc, err := text.NewEncoder(o.Indent, delims, o.EmitASCII)
+ if err != nil {
+ return nil, err
+ }
+
+ // Treat nil message interface as an empty message,
+ // in which case there is nothing to output.
+ if m == nil {
+ return []byte{}, nil
+ }
+
+ enc := encoder{internalEnc, o}
+ err = enc.marshalMessage(m.ProtoReflect(), false)
+ if err != nil {
+ return nil, err
+ }
+ out := enc.Bytes()
+ if len(o.Indent) > 0 && len(out) > 0 {
+ out = append(out, '\n')
+ }
+ if o.AllowPartial {
+ return out, nil
+ }
+ return out, proto.CheckInitialized(m)
+}
+
+type encoder struct {
+ *text.Encoder
+ opts MarshalOptions
+}
+
+// marshalMessage marshals the given protoreflect.Message.
+func (e encoder) marshalMessage(m pref.Message, inclDelims bool) error {
+ messageDesc := m.Descriptor()
+ if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) {
+ return errors.New("no support for proto1 MessageSets")
+ }
+
+ if inclDelims {
+ e.StartMessage()
+ defer e.EndMessage()
+ }
+
+ // Handle Any expansion.
+ if messageDesc.FullName() == "google.protobuf.Any" {
+ if e.marshalAny(m) {
+ return nil
+ }
+ // If unable to expand, continue on to marshal Any as a regular message.
+ }
+
+ // Marshal known fields.
+ fieldDescs := messageDesc.Fields()
+ size := fieldDescs.Len()
+ for i := 0; i < size; {
+ fd := fieldDescs.Get(i)
+ if od := fd.ContainingOneof(); od != nil {
+ fd = m.WhichOneof(od)
+ i += od.Fields().Len()
+ } else {
+ i++
+ }
+
+ if fd == nil || !m.Has(fd) {
+ continue
+ }
+
+ name := fd.Name()
+ // Use type name for group field name.
+ if fd.Kind() == pref.GroupKind {
+ name = fd.Message().Name()
+ }
+ val := m.Get(fd)
+ if err := e.marshalField(string(name), val, fd); err != nil {
+ return err
+ }
+ }
+
+ // Marshal extensions.
+ if err := e.marshalExtensions(m); err != nil {
+ return err
+ }
+
+ // Marshal unknown fields.
+ if e.opts.EmitUnknown {
+ e.marshalUnknown(m.GetUnknown())
+ }
+
+ return nil
+}
+
+// marshalField marshals the given field with protoreflect.Value.
+func (e encoder) marshalField(name string, val pref.Value, fd pref.FieldDescriptor) error {
+ switch {
+ case fd.IsList():
+ return e.marshalList(name, val.List(), fd)
+ case fd.IsMap():
+ return e.marshalMap(name, val.Map(), fd)
+ default:
+ e.WriteName(name)
+ return e.marshalSingular(val, fd)
+ }
+}
+
+// marshalSingular marshals the given non-repeated field value. This includes
+// all scalar types, enums, messages, and groups.
+func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error {
+ kind := fd.Kind()
+ switch kind {
+ case pref.BoolKind:
+ e.WriteBool(val.Bool())
+
+ case pref.StringKind:
+ s := val.String()
+ if !e.opts.allowInvalidUTF8 && strs.EnforceUTF8(fd) && !utf8.ValidString(s) {
+ return errors.InvalidUTF8(string(fd.FullName()))
+ }
+ e.WriteString(s)
+
+ case pref.Int32Kind, pref.Int64Kind,
+ pref.Sint32Kind, pref.Sint64Kind,
+ pref.Sfixed32Kind, pref.Sfixed64Kind:
+ e.WriteInt(val.Int())
+
+ case pref.Uint32Kind, pref.Uint64Kind,
+ pref.Fixed32Kind, pref.Fixed64Kind:
+ e.WriteUint(val.Uint())
+
+ case pref.FloatKind:
+ // Encoder.WriteFloat handles the special numbers NaN and infinites.
+ e.WriteFloat(val.Float(), 32)
+
+ case pref.DoubleKind:
+ // Encoder.WriteFloat handles the special numbers NaN and infinites.
+ e.WriteFloat(val.Float(), 64)
+
+ case pref.BytesKind:
+ e.WriteString(string(val.Bytes()))
+
+ case pref.EnumKind:
+ num := val.Enum()
+ if desc := fd.Enum().Values().ByNumber(num); desc != nil {
+ e.WriteLiteral(string(desc.Name()))
+ } else {
+ // Use numeric value if there is no enum description.
+ e.WriteInt(int64(num))
+ }
+
+ case pref.MessageKind, pref.GroupKind:
+ return e.marshalMessage(val.Message(), true)
+
+ default:
+ panic(fmt.Sprintf("%v has unknown kind: %v", fd.FullName(), kind))
+ }
+ return nil
+}
+
+// marshalList marshals the given protoreflect.List as multiple name-value fields.
+func (e encoder) marshalList(name string, list pref.List, fd pref.FieldDescriptor) error {
+ size := list.Len()
+ for i := 0; i < size; i++ {
+ e.WriteName(name)
+ if err := e.marshalSingular(list.Get(i), fd); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// marshalMap marshals the given protoreflect.Map as multiple name-value fields.
+func (e encoder) marshalMap(name string, mmap pref.Map, fd pref.FieldDescriptor) error {
+ var err error
+ mapsort.Range(mmap, fd.MapKey().Kind(), func(key pref.MapKey, val pref.Value) bool {
+ e.WriteName(name)
+ e.StartMessage()
+ defer e.EndMessage()
+
+ e.WriteName("key")
+ err = e.marshalSingular(key.Value(), fd.MapKey())
+ if err != nil {
+ return false
+ }
+
+ e.WriteName("value")
+ err = e.marshalSingular(val, fd.MapValue())
+ if err != nil {
+ return false
+ }
+ return true
+ })
+ return err
+}
+
+// marshalExtensions marshals extension fields.
+func (e encoder) marshalExtensions(m pref.Message) error {
+ type entry struct {
+ key string
+ value pref.Value
+ desc pref.FieldDescriptor
+ }
+
+ // Get a sorted list based on field key first.
+ var entries []entry
+ m.Range(func(fd pref.FieldDescriptor, v pref.Value) bool {
+ if !fd.IsExtension() {
+ return true
+ }
+ // For MessageSet extensions, the name used is the parent message.
+ name := fd.FullName()
+ if messageset.IsMessageSetExtension(fd) {
+ name = name.Parent()
+ }
+ entries = append(entries, entry{
+ key: string(name),
+ value: v,
+ desc: fd,
+ })
+ return true
+ })
+ // Sort extensions lexicographically.
+ sort.Slice(entries, func(i, j int) bool {
+ return entries[i].key < entries[j].key
+ })
+
+ // Write out sorted list.
+ for _, entry := range entries {
+ // Extension field name is the proto field name enclosed in [].
+ name := "[" + entry.key + "]"
+ if err := e.marshalField(name, entry.value, entry.desc); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// marshalUnknown parses the given []byte and marshals fields out.
+// This function assumes proper encoding in the given []byte.
+func (e encoder) marshalUnknown(b []byte) {
+ const dec = 10
+ const hex = 16
+ for len(b) > 0 {
+ num, wtype, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ e.WriteName(strconv.FormatInt(int64(num), dec))
+
+ switch wtype {
+ case protowire.VarintType:
+ var v uint64
+ v, n = protowire.ConsumeVarint(b)
+ e.WriteUint(v)
+ case protowire.Fixed32Type:
+ var v uint32
+ v, n = protowire.ConsumeFixed32(b)
+ e.WriteLiteral("0x" + strconv.FormatUint(uint64(v), hex))
+ case protowire.Fixed64Type:
+ var v uint64
+ v, n = protowire.ConsumeFixed64(b)
+ e.WriteLiteral("0x" + strconv.FormatUint(v, hex))
+ case protowire.BytesType:
+ var v []byte
+ v, n = protowire.ConsumeBytes(b)
+ e.WriteString(string(v))
+ case protowire.StartGroupType:
+ e.StartMessage()
+ var v []byte
+ v, n = protowire.ConsumeGroup(num, b)
+ e.marshalUnknown(v)
+ e.EndMessage()
+ default:
+ panic(fmt.Sprintf("prototext: error parsing unknown field wire type: %v", wtype))
+ }
+
+ b = b[n:]
+ }
+}
+
+// marshalAny marshals the given google.protobuf.Any message in expanded form.
+// It returns true if it was able to marshal, else false.
+func (e encoder) marshalAny(any pref.Message) bool {
+ // Construct the embedded message.
+ fds := any.Descriptor().Fields()
+ fdType := fds.ByNumber(fieldnum.Any_TypeUrl)
+ typeURL := any.Get(fdType).String()
+ mt, err := e.opts.Resolver.FindMessageByURL(typeURL)
+ if err != nil {
+ return false
+ }
+ m := mt.New().Interface()
+
+ // Unmarshal bytes into embedded message.
+ fdValue := fds.ByNumber(fieldnum.Any_Value)
+ value := any.Get(fdValue)
+ err = proto.UnmarshalOptions{
+ AllowPartial: true,
+ Resolver: e.opts.Resolver,
+ }.Unmarshal(value.Bytes(), m)
+ if err != nil {
+ return false
+ }
+
+ // Get current encoder position. If marshaling fails, reset encoder output
+ // back to this position.
+ pos := e.Snapshot()
+
+ // Field name is the proto field name enclosed in [].
+ e.WriteName("[" + typeURL + "]")
+ err = e.marshalMessage(m.ProtoReflect(), true)
+ if err != nil {
+ e.Reset(pos)
+ return false
+ }
+ return true
+}
diff --git a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go
new file mode 100644
index 000000000..a427f8b70
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go
@@ -0,0 +1,538 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package protowire parses and formats the raw wire encoding.
+// See https://developers.google.com/protocol-buffers/docs/encoding.
+//
+// For marshaling and unmarshaling entire protobuf messages,
+// use the "google.golang.org/protobuf/proto" package instead.
+package protowire
+
+import (
+ "io"
+ "math"
+ "math/bits"
+
+ "google.golang.org/protobuf/internal/errors"
+)
+
+// Number represents the field number.
+type Number int32
+
+const (
+ MinValidNumber Number = 1
+ FirstReservedNumber Number = 19000
+ LastReservedNumber Number = 19999
+ MaxValidNumber Number = 1<<29 - 1
+)
+
+// IsValid reports whether the field number is semantically valid.
+//
+// Note that while numbers within the reserved range are semantically invalid,
+// they are syntactically valid in the wire format.
+// Implementations may treat records with reserved field numbers as unknown.
+func (n Number) IsValid() bool {
+ return MinValidNumber <= n && n < FirstReservedNumber || LastReservedNumber < n && n <= MaxValidNumber
+}
+
+// Type represents the wire type.
+type Type int8
+
+const (
+ VarintType Type = 0
+ Fixed32Type Type = 5
+ Fixed64Type Type = 1
+ BytesType Type = 2
+ StartGroupType Type = 3
+ EndGroupType Type = 4
+)
+
+const (
+ _ = -iota
+ errCodeTruncated
+ errCodeFieldNumber
+ errCodeOverflow
+ errCodeReserved
+ errCodeEndGroup
+)
+
+var (
+ errFieldNumber = errors.New("invalid field number")
+ errOverflow = errors.New("variable length integer overflow")
+ errReserved = errors.New("cannot parse reserved wire type")
+ errEndGroup = errors.New("mismatching end group marker")
+ errParse = errors.New("parse error")
+)
+
+// ParseError converts an error code into an error value.
+// This returns nil if n is a non-negative number.
+func ParseError(n int) error {
+ if n >= 0 {
+ return nil
+ }
+ switch n {
+ case errCodeTruncated:
+ return io.ErrUnexpectedEOF
+ case errCodeFieldNumber:
+ return errFieldNumber
+ case errCodeOverflow:
+ return errOverflow
+ case errCodeReserved:
+ return errReserved
+ case errCodeEndGroup:
+ return errEndGroup
+ default:
+ return errParse
+ }
+}
+
+// ConsumeField parses an entire field record (both tag and value) and returns
+// the field number, the wire type, and the total length.
+// This returns a negative length upon an error (see ParseError).
+//
+// The total length includes the tag header and the end group marker (if the
+// field is a group).
+func ConsumeField(b []byte) (Number, Type, int) {
+ num, typ, n := ConsumeTag(b)
+ if n < 0 {
+ return 0, 0, n // forward error code
+ }
+ m := ConsumeFieldValue(num, typ, b[n:])
+ if m < 0 {
+ return 0, 0, m // forward error code
+ }
+ return num, typ, n + m
+}
+
+// ConsumeFieldValue parses a field value and returns its length.
+// This assumes that the field Number and wire Type have already been parsed.
+// This returns a negative length upon an error (see ParseError).
+//
+// When parsing a group, the length includes the end group marker and
+// the end group is verified to match the starting field number.
+func ConsumeFieldValue(num Number, typ Type, b []byte) (n int) {
+ switch typ {
+ case VarintType:
+ _, n = ConsumeVarint(b)
+ return n
+ case Fixed32Type:
+ _, n = ConsumeFixed32(b)
+ return n
+ case Fixed64Type:
+ _, n = ConsumeFixed64(b)
+ return n
+ case BytesType:
+ _, n = ConsumeBytes(b)
+ return n
+ case StartGroupType:
+ n0 := len(b)
+ for {
+ num2, typ2, n := ConsumeTag(b)
+ if n < 0 {
+ return n // forward error code
+ }
+ b = b[n:]
+ if typ2 == EndGroupType {
+ if num != num2 {
+ return errCodeEndGroup
+ }
+ return n0 - len(b)
+ }
+
+ n = ConsumeFieldValue(num2, typ2, b)
+ if n < 0 {
+ return n // forward error code
+ }
+ b = b[n:]
+ }
+ case EndGroupType:
+ return errCodeEndGroup
+ default:
+ return errCodeReserved
+ }
+}
+
+// AppendTag encodes num and typ as a varint-encoded tag and appends it to b.
+func AppendTag(b []byte, num Number, typ Type) []byte {
+ return AppendVarint(b, EncodeTag(num, typ))
+}
+
+// ConsumeTag parses b as a varint-encoded tag, reporting its length.
+// This returns a negative length upon an error (see ParseError).
+func ConsumeTag(b []byte) (Number, Type, int) {
+ v, n := ConsumeVarint(b)
+ if n < 0 {
+ return 0, 0, n // forward error code
+ }
+ num, typ := DecodeTag(v)
+ if num < MinValidNumber {
+ return 0, 0, errCodeFieldNumber
+ }
+ return num, typ, n
+}
+
+func SizeTag(num Number) int {
+ return SizeVarint(EncodeTag(num, 0)) // wire type has no effect on size
+}
+
+// AppendVarint appends v to b as a varint-encoded uint64.
+func AppendVarint(b []byte, v uint64) []byte {
+ switch {
+ case v < 1<<7:
+ b = append(b, byte(v))
+ case v < 1<<14:
+ b = append(b,
+ byte((v>>0)&0x7f|0x80),
+ byte(v>>7))
+ case v < 1<<21:
+ b = append(b,
+ byte((v>>0)&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte(v>>14))
+ case v < 1<<28:
+ b = append(b,
+ byte((v>>0)&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte(v>>21))
+ case v < 1<<35:
+ b = append(b,
+ byte((v>>0)&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte(v>>28))
+ case v < 1<<42:
+ b = append(b,
+ byte((v>>0)&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte(v>>35))
+ case v < 1<<49:
+ b = append(b,
+ byte((v>>0)&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte((v>>35)&0x7f|0x80),
+ byte(v>>42))
+ case v < 1<<56:
+ b = append(b,
+ byte((v>>0)&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte((v>>35)&0x7f|0x80),
+ byte((v>>42)&0x7f|0x80),
+ byte(v>>49))
+ case v < 1<<63:
+ b = append(b,
+ byte((v>>0)&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte((v>>35)&0x7f|0x80),
+ byte((v>>42)&0x7f|0x80),
+ byte((v>>49)&0x7f|0x80),
+ byte(v>>56))
+ default:
+ b = append(b,
+ byte((v>>0)&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte((v>>35)&0x7f|0x80),
+ byte((v>>42)&0x7f|0x80),
+ byte((v>>49)&0x7f|0x80),
+ byte((v>>56)&0x7f|0x80),
+ 1)
+ }
+ return b
+}
+
+// ConsumeVarint parses b as a varint-encoded uint64, reporting its length.
+// This returns a negative length upon an error (see ParseError).
+func ConsumeVarint(b []byte) (v uint64, n int) {
+ var y uint64
+ if len(b) <= 0 {
+ return 0, errCodeTruncated
+ }
+ v = uint64(b[0])
+ if v < 0x80 {
+ return v, 1
+ }
+ v -= 0x80
+
+ if len(b) <= 1 {
+ return 0, errCodeTruncated
+ }
+ y = uint64(b[1])
+ v += y << 7
+ if y < 0x80 {
+ return v, 2
+ }
+ v -= 0x80 << 7
+
+ if len(b) <= 2 {
+ return 0, errCodeTruncated
+ }
+ y = uint64(b[2])
+ v += y << 14
+ if y < 0x80 {
+ return v, 3
+ }
+ v -= 0x80 << 14
+
+ if len(b) <= 3 {
+ return 0, errCodeTruncated
+ }
+ y = uint64(b[3])
+ v += y << 21
+ if y < 0x80 {
+ return v, 4
+ }
+ v -= 0x80 << 21
+
+ if len(b) <= 4 {
+ return 0, errCodeTruncated
+ }
+ y = uint64(b[4])
+ v += y << 28
+ if y < 0x80 {
+ return v, 5
+ }
+ v -= 0x80 << 28
+
+ if len(b) <= 5 {
+ return 0, errCodeTruncated
+ }
+ y = uint64(b[5])
+ v += y << 35
+ if y < 0x80 {
+ return v, 6
+ }
+ v -= 0x80 << 35
+
+ if len(b) <= 6 {
+ return 0, errCodeTruncated
+ }
+ y = uint64(b[6])
+ v += y << 42
+ if y < 0x80 {
+ return v, 7
+ }
+ v -= 0x80 << 42
+
+ if len(b) <= 7 {
+ return 0, errCodeTruncated
+ }
+ y = uint64(b[7])
+ v += y << 49
+ if y < 0x80 {
+ return v, 8
+ }
+ v -= 0x80 << 49
+
+ if len(b) <= 8 {
+ return 0, errCodeTruncated
+ }
+ y = uint64(b[8])
+ v += y << 56
+ if y < 0x80 {
+ return v, 9
+ }
+ v -= 0x80 << 56
+
+ if len(b) <= 9 {
+ return 0, errCodeTruncated
+ }
+ y = uint64(b[9])
+ v += y << 63
+ if y < 2 {
+ return v, 10
+ }
+ return 0, errCodeOverflow
+}
+
+// SizeVarint returns the encoded size of a varint.
+// The size is guaranteed to be within 1 and 10, inclusive.
+func SizeVarint(v uint64) int {
+ // This computes 1 + (bits.Len64(v)-1)/7.
+ // 9/64 is a good enough approximation of 1/7
+ return int(9*uint32(bits.Len64(v))+64) / 64
+}
+
+// AppendFixed32 appends v to b as a little-endian uint32.
+func AppendFixed32(b []byte, v uint32) []byte {
+ return append(b,
+ byte(v>>0),
+ byte(v>>8),
+ byte(v>>16),
+ byte(v>>24))
+}
+
+// ConsumeFixed32 parses b as a little-endian uint32, reporting its length.
+// This returns a negative length upon an error (see ParseError).
+func ConsumeFixed32(b []byte) (v uint32, n int) {
+ if len(b) < 4 {
+ return 0, errCodeTruncated
+ }
+ v = uint32(b[0])<<0 | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+ return v, 4
+}
+
+// SizeFixed32 returns the encoded size of a fixed32; which is always 4.
+func SizeFixed32() int {
+ return 4
+}
+
+// AppendFixed64 appends v to b as a little-endian uint64.
+func AppendFixed64(b []byte, v uint64) []byte {
+ return append(b,
+ byte(v>>0),
+ byte(v>>8),
+ byte(v>>16),
+ byte(v>>24),
+ byte(v>>32),
+ byte(v>>40),
+ byte(v>>48),
+ byte(v>>56))
+}
+
+// ConsumeFixed64 parses b as a little-endian uint64, reporting its length.
+// This returns a negative length upon an error (see ParseError).
+func ConsumeFixed64(b []byte) (v uint64, n int) {
+ if len(b) < 8 {
+ return 0, errCodeTruncated
+ }
+ v = uint64(b[0])<<0 | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+ return v, 8
+}
+
+// SizeFixed64 returns the encoded size of a fixed64; which is always 8.
+func SizeFixed64() int {
+ return 8
+}
+
+// AppendBytes appends v to b as a length-prefixed bytes value.
+func AppendBytes(b []byte, v []byte) []byte {
+ return append(AppendVarint(b, uint64(len(v))), v...)
+}
+
+// ConsumeBytes parses b as a length-prefixed bytes value, reporting its length.
+// This returns a negative length upon an error (see ParseError).
+func ConsumeBytes(b []byte) (v []byte, n int) {
+ m, n := ConsumeVarint(b)
+ if n < 0 {
+ return nil, n // forward error code
+ }
+ if m > uint64(len(b[n:])) {
+ return nil, errCodeTruncated
+ }
+ return b[n:][:m], n + int(m)
+}
+
+// SizeBytes returns the encoded size of a length-prefixed bytes value,
+// given only the length.
+func SizeBytes(n int) int {
+ return SizeVarint(uint64(n)) + n
+}
+
+// AppendString appends v to b as a length-prefixed bytes value.
+func AppendString(b []byte, v string) []byte {
+ return append(AppendVarint(b, uint64(len(v))), v...)
+}
+
+// ConsumeString parses b as a length-prefixed bytes value, reporting its length.
+// This returns a negative length upon an error (see ParseError).
+func ConsumeString(b []byte) (v string, n int) {
+ bb, n := ConsumeBytes(b)
+ return string(bb), n
+}
+
+// AppendGroup appends v to b as group value, with a trailing end group marker.
+// The value v must not contain the end marker.
+func AppendGroup(b []byte, num Number, v []byte) []byte {
+ return AppendVarint(append(b, v...), EncodeTag(num, EndGroupType))
+}
+
+// ConsumeGroup parses b as a group value until the trailing end group marker,
+// and verifies that the end marker matches the provided num. The value v
+// does not contain the end marker, while the length does contain the end marker.
+// This returns a negative length upon an error (see ParseError).
+func ConsumeGroup(num Number, b []byte) (v []byte, n int) {
+ n = ConsumeFieldValue(num, StartGroupType, b)
+ if n < 0 {
+ return nil, n // forward error code
+ }
+ b = b[:n]
+
+ // Truncate off end group marker, but need to handle denormalized varints.
+ // Assuming end marker is never 0 (which is always the case since
+ // EndGroupType is non-zero), we can truncate all trailing bytes where the
+ // lower 7 bits are all zero (implying that the varint is denormalized).
+ for len(b) > 0 && b[len(b)-1]&0x7f == 0 {
+ b = b[:len(b)-1]
+ }
+ b = b[:len(b)-SizeTag(num)]
+ return b, n
+}
+
+// SizeGroup returns the encoded size of a group, given only the length.
+func SizeGroup(num Number, n int) int {
+ return n + SizeTag(num)
+}
+
+// DecodeTag decodes the field Number and wire Type from its unified form.
+// The Number is -1 if the decoded field number overflows int32.
+// Other than overflow, this does not check for field number validity.
+func DecodeTag(x uint64) (Number, Type) {
+ // NOTE: MessageSet allows for larger field numbers than normal.
+ if x>>3 > uint64(math.MaxInt32) {
+ return -1, 0
+ }
+ return Number(x >> 3), Type(x & 7)
+}
+
+// EncodeTag encodes the field Number and wire Type into its unified form.
+func EncodeTag(num Number, typ Type) uint64 {
+ return uint64(num)<<3 | uint64(typ&7)
+}
+
+// DecodeZigZag decodes a zig-zag-encoded uint64 as an int64.
+// Input: {…, 5, 3, 1, 0, 2, 4, 6, …}
+// Output: {…, -3, -2, -1, 0, +1, +2, +3, …}
+func DecodeZigZag(x uint64) int64 {
+ return int64(x>>1) ^ int64(x)<<63>>63
+}
+
+// EncodeZigZag encodes an int64 as a zig-zag-encoded uint64.
+// Input: {…, -3, -2, -1, 0, +1, +2, +3, …}
+// Output: {…, 5, 3, 1, 0, 2, 4, 6, …}
+func EncodeZigZag(x int64) uint64 {
+ return uint64(x<<1) ^ uint64(x>>63)
+}
+
+// DecodeBool decodes a uint64 as a bool.
+// Input: { 0, 1, 2, …}
+// Output: {false, true, true, …}
+func DecodeBool(x uint64) bool {
+ return x != 0
+}
+
+// EncodeBool encodes a bool as a uint64.
+// Input: {false, true}
+// Output: { 0, 1}
+func EncodeBool(x bool) uint64 {
+ if x {
+ return 1
+ }
+ return 0
+}
diff --git a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go
new file mode 100644
index 000000000..e7af0fe0d
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go
@@ -0,0 +1,316 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package descfmt provides functionality to format descriptors.
+package descfmt
+
+import (
+ "fmt"
+ "io"
+ "reflect"
+ "strconv"
+ "strings"
+
+ "google.golang.org/protobuf/internal/detrand"
+ "google.golang.org/protobuf/internal/pragma"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+type list interface {
+ Len() int
+ pragma.DoNotImplement
+}
+
+func FormatList(s fmt.State, r rune, vs list) {
+ io.WriteString(s, formatListOpt(vs, true, r == 'v' && (s.Flag('+') || s.Flag('#'))))
+}
+func formatListOpt(vs list, isRoot, allowMulti bool) string {
+ start, end := "[", "]"
+ if isRoot {
+ var name string
+ switch vs.(type) {
+ case pref.Names:
+ name = "Names"
+ case pref.FieldNumbers:
+ name = "FieldNumbers"
+ case pref.FieldRanges:
+ name = "FieldRanges"
+ case pref.EnumRanges:
+ name = "EnumRanges"
+ case pref.FileImports:
+ name = "FileImports"
+ case pref.Descriptor:
+ name = reflect.ValueOf(vs).MethodByName("Get").Type().Out(0).Name() + "s"
+ }
+ start, end = name+"{", "}"
+ }
+
+ var ss []string
+ switch vs := vs.(type) {
+ case pref.Names:
+ for i := 0; i < vs.Len(); i++ {
+ ss = append(ss, fmt.Sprint(vs.Get(i)))
+ }
+ return start + joinStrings(ss, false) + end
+ case pref.FieldNumbers:
+ for i := 0; i < vs.Len(); i++ {
+ ss = append(ss, fmt.Sprint(vs.Get(i)))
+ }
+ return start + joinStrings(ss, false) + end
+ case pref.FieldRanges:
+ for i := 0; i < vs.Len(); i++ {
+ r := vs.Get(i)
+ if r[0]+1 == r[1] {
+ ss = append(ss, fmt.Sprintf("%d", r[0]))
+ } else {
+ ss = append(ss, fmt.Sprintf("%d:%d", r[0], r[1])) // enum ranges are end exclusive
+ }
+ }
+ return start + joinStrings(ss, false) + end
+ case pref.EnumRanges:
+ for i := 0; i < vs.Len(); i++ {
+ r := vs.Get(i)
+ if r[0] == r[1] {
+ ss = append(ss, fmt.Sprintf("%d", r[0]))
+ } else {
+ ss = append(ss, fmt.Sprintf("%d:%d", r[0], int64(r[1])+1)) // enum ranges are end inclusive
+ }
+ }
+ return start + joinStrings(ss, false) + end
+ case pref.FileImports:
+ for i := 0; i < vs.Len(); i++ {
+ var rs records
+ rs.Append(reflect.ValueOf(vs.Get(i)), "Path", "Package", "IsPublic", "IsWeak")
+ ss = append(ss, "{"+rs.Join()+"}")
+ }
+ return start + joinStrings(ss, allowMulti) + end
+ default:
+ _, isEnumValue := vs.(pref.EnumValueDescriptors)
+ for i := 0; i < vs.Len(); i++ {
+ m := reflect.ValueOf(vs).MethodByName("Get")
+ v := m.Call([]reflect.Value{reflect.ValueOf(i)})[0].Interface()
+ ss = append(ss, formatDescOpt(v.(pref.Descriptor), false, allowMulti && !isEnumValue))
+ }
+ return start + joinStrings(ss, allowMulti && isEnumValue) + end
+ }
+}
+
+// descriptorAccessors is a list of accessors to print for each descriptor.
+//
+// Do not print all accessors since some contain redundant information,
+// while others are pointers that we do not want to follow since the descriptor
+// is actually a cyclic graph.
+//
+// Using a list allows us to print the accessors in a sensible order.
+var descriptorAccessors = map[reflect.Type][]string{
+ reflect.TypeOf((*pref.FileDescriptor)(nil)).Elem(): {"Path", "Package", "Imports", "Messages", "Enums", "Extensions", "Services"},
+ reflect.TypeOf((*pref.MessageDescriptor)(nil)).Elem(): {"IsMapEntry", "Fields", "Oneofs", "ReservedNames", "ReservedRanges", "RequiredNumbers", "ExtensionRanges", "Messages", "Enums", "Extensions"},
+ reflect.TypeOf((*pref.FieldDescriptor)(nil)).Elem(): {"Number", "Cardinality", "Kind", "HasJSONName", "JSONName", "HasPresence", "IsExtension", "IsPacked", "IsWeak", "IsList", "IsMap", "MapKey", "MapValue", "HasDefault", "Default", "ContainingOneof", "ContainingMessage", "Message", "Enum"},
+ reflect.TypeOf((*pref.OneofDescriptor)(nil)).Elem(): {"Fields"}, // not directly used; must keep in sync with formatDescOpt
+ reflect.TypeOf((*pref.EnumDescriptor)(nil)).Elem(): {"Values", "ReservedNames", "ReservedRanges"},
+ reflect.TypeOf((*pref.EnumValueDescriptor)(nil)).Elem(): {"Number"},
+ reflect.TypeOf((*pref.ServiceDescriptor)(nil)).Elem(): {"Methods"},
+ reflect.TypeOf((*pref.MethodDescriptor)(nil)).Elem(): {"Input", "Output", "IsStreamingClient", "IsStreamingServer"},
+}
+
+func FormatDesc(s fmt.State, r rune, t pref.Descriptor) {
+ io.WriteString(s, formatDescOpt(t, true, r == 'v' && (s.Flag('+') || s.Flag('#'))))
+}
+func formatDescOpt(t pref.Descriptor, isRoot, allowMulti bool) string {
+ rv := reflect.ValueOf(t)
+ rt := rv.MethodByName("ProtoType").Type().In(0)
+
+ start, end := "{", "}"
+ if isRoot {
+ start = rt.Name() + "{"
+ }
+
+ _, isFile := t.(pref.FileDescriptor)
+ rs := records{allowMulti: allowMulti}
+ if t.IsPlaceholder() {
+ if isFile {
+ rs.Append(rv, "Path", "Package", "IsPlaceholder")
+ } else {
+ rs.Append(rv, "FullName", "IsPlaceholder")
+ }
+ } else {
+ switch {
+ case isFile:
+ rs.Append(rv, "Syntax")
+ case isRoot:
+ rs.Append(rv, "Syntax", "FullName")
+ default:
+ rs.Append(rv, "Name")
+ }
+ switch t := t.(type) {
+ case pref.FieldDescriptor:
+ for _, s := range descriptorAccessors[rt] {
+ switch s {
+ case "MapKey":
+ if k := t.MapKey(); k != nil {
+ rs.recs = append(rs.recs, [2]string{"MapKey", k.Kind().String()})
+ }
+ case "MapValue":
+ if v := t.MapValue(); v != nil {
+ switch v.Kind() {
+ case pref.EnumKind:
+ rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Enum().FullName())})
+ case pref.MessageKind, pref.GroupKind:
+ rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Message().FullName())})
+ default:
+ rs.recs = append(rs.recs, [2]string{"MapValue", v.Kind().String()})
+ }
+ }
+ case "ContainingOneof":
+ if od := t.ContainingOneof(); od != nil {
+ rs.recs = append(rs.recs, [2]string{"Oneof", string(od.Name())})
+ }
+ case "ContainingMessage":
+ if t.IsExtension() {
+ rs.recs = append(rs.recs, [2]string{"Extendee", string(t.ContainingMessage().FullName())})
+ }
+ case "Message":
+ if !t.IsMap() {
+ rs.Append(rv, s)
+ }
+ default:
+ rs.Append(rv, s)
+ }
+ }
+ case pref.OneofDescriptor:
+ var ss []string
+ fs := t.Fields()
+ for i := 0; i < fs.Len(); i++ {
+ ss = append(ss, string(fs.Get(i).Name()))
+ }
+ if len(ss) > 0 {
+ rs.recs = append(rs.recs, [2]string{"Fields", "[" + joinStrings(ss, false) + "]"})
+ }
+ default:
+ rs.Append(rv, descriptorAccessors[rt]...)
+ }
+ if rv.MethodByName("GoType").IsValid() {
+ rs.Append(rv, "GoType")
+ }
+ }
+ return start + rs.Join() + end
+}
+
+type records struct {
+ recs [][2]string
+ allowMulti bool
+}
+
+func (rs *records) Append(v reflect.Value, accessors ...string) {
+ for _, a := range accessors {
+ var rv reflect.Value
+ if m := v.MethodByName(a); m.IsValid() {
+ rv = m.Call(nil)[0]
+ }
+ if v.Kind() == reflect.Struct && !rv.IsValid() {
+ rv = v.FieldByName(a)
+ }
+ if !rv.IsValid() {
+ panic(fmt.Sprintf("unknown accessor: %v.%s", v.Type(), a))
+ }
+ if _, ok := rv.Interface().(pref.Value); ok {
+ rv = rv.MethodByName("Interface").Call(nil)[0]
+ if !rv.IsNil() {
+ rv = rv.Elem()
+ }
+ }
+
+ // Ignore zero values.
+ var isZero bool
+ switch rv.Kind() {
+ case reflect.Interface, reflect.Slice:
+ isZero = rv.IsNil()
+ case reflect.Bool:
+ isZero = rv.Bool() == false
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ isZero = rv.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ isZero = rv.Uint() == 0
+ case reflect.String:
+ isZero = rv.String() == ""
+ }
+ if n, ok := rv.Interface().(list); ok {
+ isZero = n.Len() == 0
+ }
+ if isZero {
+ continue
+ }
+
+ // Format the value.
+ var s string
+ v := rv.Interface()
+ switch v := v.(type) {
+ case list:
+ s = formatListOpt(v, false, rs.allowMulti)
+ case pref.FieldDescriptor, pref.OneofDescriptor, pref.EnumValueDescriptor, pref.MethodDescriptor:
+ s = string(v.(pref.Descriptor).Name())
+ case pref.Descriptor:
+ s = string(v.FullName())
+ case string:
+ s = strconv.Quote(v)
+ case []byte:
+ s = fmt.Sprintf("%q", v)
+ default:
+ s = fmt.Sprint(v)
+ }
+ rs.recs = append(rs.recs, [2]string{a, s})
+ }
+}
+
+func (rs *records) Join() string {
+ var ss []string
+
+ // In single line mode, simply join all records with commas.
+ if !rs.allowMulti {
+ for _, r := range rs.recs {
+ ss = append(ss, r[0]+formatColon(0)+r[1])
+ }
+ return joinStrings(ss, false)
+ }
+
+ // In allowMulti line mode, align single line records for more readable output.
+ var maxLen int
+ flush := func(i int) {
+ for _, r := range rs.recs[len(ss):i] {
+ ss = append(ss, r[0]+formatColon(maxLen-len(r[0]))+r[1])
+ }
+ maxLen = 0
+ }
+ for i, r := range rs.recs {
+ if isMulti := strings.Contains(r[1], "\n"); isMulti {
+ flush(i)
+ ss = append(ss, r[0]+formatColon(0)+strings.Join(strings.Split(r[1], "\n"), "\n\t"))
+ } else if maxLen < len(r[0]) {
+ maxLen = len(r[0])
+ }
+ }
+ flush(len(rs.recs))
+ return joinStrings(ss, true)
+}
+
+func formatColon(padding int) string {
+ // Deliberately introduce instability into the debug output to
+ // discourage users from performing string comparisons.
+ // This provides us flexibility to change the output in the future.
+ if detrand.Bool() {
+ return ":" + strings.Repeat(" ", 1+padding) // use non-breaking spaces (U+00a0)
+ } else {
+ return ":" + strings.Repeat(" ", 1+padding) // use regular spaces (U+0020)
+ }
+}
+
+func joinStrings(ss []string, isMulti bool) string {
+ if len(ss) == 0 {
+ return ""
+ }
+ if isMulti {
+ return "\n\t" + strings.Join(ss, "\n\t") + "\n"
+ }
+ return strings.Join(ss, ", ")
+}
diff --git a/vendor/google.golang.org/protobuf/internal/descopts/options.go b/vendor/google.golang.org/protobuf/internal/descopts/options.go
new file mode 100644
index 000000000..8401be8c8
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/descopts/options.go
@@ -0,0 +1,29 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package descopts contains the nil pointers to concrete descriptor options.
+//
+// This package exists as a form of reverse dependency injection so that certain
+// packages (e.g., internal/filedesc and internal/filetype can avoid a direct
+// dependency on the descriptor proto package).
+package descopts
+
+import pref "google.golang.org/protobuf/reflect/protoreflect"
+
+// These variables are set by the init function in descriptor.pb.go via logic
+// in internal/filetype. In other words, so long as the descriptor proto package
+// is linked in, these variables will be populated.
+//
+// Each variable is populated with a nil pointer to the options struct.
+var (
+ File pref.ProtoMessage
+ Enum pref.ProtoMessage
+ EnumValue pref.ProtoMessage
+ Message pref.ProtoMessage
+ Field pref.ProtoMessage
+ Oneof pref.ProtoMessage
+ ExtensionRange pref.ProtoMessage
+ Service pref.ProtoMessage
+ Method pref.ProtoMessage
+)
diff --git a/vendor/google.golang.org/protobuf/internal/detrand/rand.go b/vendor/google.golang.org/protobuf/internal/detrand/rand.go
new file mode 100644
index 000000000..a904dd1f9
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/detrand/rand.go
@@ -0,0 +1,61 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package detrand provides deterministically random functionality.
+//
+// The pseudo-randomness of these functions is seeded by the program binary
+// itself and guarantees that the output does not change within a program,
+// while ensuring that the output is unstable across different builds.
+package detrand
+
+import (
+ "encoding/binary"
+ "hash/fnv"
+ "os"
+)
+
+// Disable disables detrand such that all functions returns the zero value.
+// This function is not concurrent-safe and must be called during program init.
+func Disable() {
+ randSeed = 0
+}
+
+// Bool returns a deterministically random boolean.
+func Bool() bool {
+ return randSeed%2 == 1
+}
+
+// randSeed is a best-effort at an approximate hash of the Go binary.
+var randSeed = binaryHash()
+
+func binaryHash() uint64 {
+ // Open the Go binary.
+ s, err := os.Executable()
+ if err != nil {
+ return 0
+ }
+ f, err := os.Open(s)
+ if err != nil {
+ return 0
+ }
+ defer f.Close()
+
+ // Hash the size and several samples of the Go binary.
+ const numSamples = 8
+ var buf [64]byte
+ h := fnv.New64()
+ fi, err := f.Stat()
+ if err != nil {
+ return 0
+ }
+ binary.LittleEndian.PutUint64(buf[:8], uint64(fi.Size()))
+ h.Write(buf[:8])
+ for i := int64(0); i < numSamples; i++ {
+ if _, err := f.ReadAt(buf[:], i*fi.Size()/numSamples); err != nil {
+ return 0
+ }
+ h.Write(buf[:])
+ }
+ return h.Sum64()
+}
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go b/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go
new file mode 100644
index 000000000..fdd9b13f2
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go
@@ -0,0 +1,213 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package defval marshals and unmarshals textual forms of default values.
+//
+// This package handles both the form historically used in Go struct field tags
+// and also the form used by google.protobuf.FieldDescriptorProto.default_value
+// since they differ in superficial ways.
+package defval
+
+import (
+ "fmt"
+ "math"
+ "strconv"
+
+ ptext "google.golang.org/protobuf/internal/encoding/text"
+ errors "google.golang.org/protobuf/internal/errors"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// Format is the serialization format used to represent the default value.
+type Format int
+
+const (
+ _ Format = iota
+
+ // Descriptor uses the serialization format that protoc uses with the
+ // google.protobuf.FieldDescriptorProto.default_value field.
+ Descriptor
+
+ // GoTag uses the historical serialization format in Go struct field tags.
+ GoTag
+)
+
+// Unmarshal deserializes the default string s according to the given kind k.
+// When k is an enum, a list of enum value descriptors must be provided.
+func Unmarshal(s string, k pref.Kind, evs pref.EnumValueDescriptors, f Format) (pref.Value, pref.EnumValueDescriptor, error) {
+ switch k {
+ case pref.BoolKind:
+ if f == GoTag {
+ switch s {
+ case "1":
+ return pref.ValueOfBool(true), nil, nil
+ case "0":
+ return pref.ValueOfBool(false), nil, nil
+ }
+ } else {
+ switch s {
+ case "true":
+ return pref.ValueOfBool(true), nil, nil
+ case "false":
+ return pref.ValueOfBool(false), nil, nil
+ }
+ }
+ case pref.EnumKind:
+ if f == GoTag {
+ // Go tags use the numeric form of the enum value.
+ if n, err := strconv.ParseInt(s, 10, 32); err == nil {
+ if ev := evs.ByNumber(pref.EnumNumber(n)); ev != nil {
+ return pref.ValueOfEnum(ev.Number()), ev, nil
+ }
+ }
+ } else {
+ // Descriptor default_value use the enum identifier.
+ ev := evs.ByName(pref.Name(s))
+ if ev != nil {
+ return pref.ValueOfEnum(ev.Number()), ev, nil
+ }
+ }
+ case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind:
+ if v, err := strconv.ParseInt(s, 10, 32); err == nil {
+ return pref.ValueOfInt32(int32(v)), nil, nil
+ }
+ case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind:
+ if v, err := strconv.ParseInt(s, 10, 64); err == nil {
+ return pref.ValueOfInt64(int64(v)), nil, nil
+ }
+ case pref.Uint32Kind, pref.Fixed32Kind:
+ if v, err := strconv.ParseUint(s, 10, 32); err == nil {
+ return pref.ValueOfUint32(uint32(v)), nil, nil
+ }
+ case pref.Uint64Kind, pref.Fixed64Kind:
+ if v, err := strconv.ParseUint(s, 10, 64); err == nil {
+ return pref.ValueOfUint64(uint64(v)), nil, nil
+ }
+ case pref.FloatKind, pref.DoubleKind:
+ var v float64
+ var err error
+ switch s {
+ case "-inf":
+ v = math.Inf(-1)
+ case "inf":
+ v = math.Inf(+1)
+ case "nan":
+ v = math.NaN()
+ default:
+ v, err = strconv.ParseFloat(s, 64)
+ }
+ if err == nil {
+ if k == pref.FloatKind {
+ return pref.ValueOfFloat32(float32(v)), nil, nil
+ } else {
+ return pref.ValueOfFloat64(float64(v)), nil, nil
+ }
+ }
+ case pref.StringKind:
+ // String values are already unescaped and can be used as is.
+ return pref.ValueOfString(s), nil, nil
+ case pref.BytesKind:
+ if b, ok := unmarshalBytes(s); ok {
+ return pref.ValueOfBytes(b), nil, nil
+ }
+ }
+ return pref.Value{}, nil, errors.New("could not parse value for %v: %q", k, s)
+}
+
+// Marshal serializes v as the default string according to the given kind k.
+// When specifying the Descriptor format for an enum kind, the associated
+// enum value descriptor must be provided.
+func Marshal(v pref.Value, ev pref.EnumValueDescriptor, k pref.Kind, f Format) (string, error) {
+ switch k {
+ case pref.BoolKind:
+ if f == GoTag {
+ if v.Bool() {
+ return "1", nil
+ } else {
+ return "0", nil
+ }
+ } else {
+ if v.Bool() {
+ return "true", nil
+ } else {
+ return "false", nil
+ }
+ }
+ case pref.EnumKind:
+ if f == GoTag {
+ return strconv.FormatInt(int64(v.Enum()), 10), nil
+ } else {
+ return string(ev.Name()), nil
+ }
+ case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind, pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind:
+ return strconv.FormatInt(v.Int(), 10), nil
+ case pref.Uint32Kind, pref.Fixed32Kind, pref.Uint64Kind, pref.Fixed64Kind:
+ return strconv.FormatUint(v.Uint(), 10), nil
+ case pref.FloatKind, pref.DoubleKind:
+ f := v.Float()
+ switch {
+ case math.IsInf(f, -1):
+ return "-inf", nil
+ case math.IsInf(f, +1):
+ return "inf", nil
+ case math.IsNaN(f):
+ return "nan", nil
+ default:
+ if k == pref.FloatKind {
+ return strconv.FormatFloat(f, 'g', -1, 32), nil
+ } else {
+ return strconv.FormatFloat(f, 'g', -1, 64), nil
+ }
+ }
+ case pref.StringKind:
+ // String values are serialized as is without any escaping.
+ return v.String(), nil
+ case pref.BytesKind:
+ if s, ok := marshalBytes(v.Bytes()); ok {
+ return s, nil
+ }
+ }
+ return "", errors.New("could not format value for %v: %v", k, v)
+}
+
+// unmarshalBytes deserializes bytes by applying C unescaping.
+func unmarshalBytes(s string) ([]byte, bool) {
+ // Bytes values use the same escaping as the text format,
+ // however they lack the surrounding double quotes.
+ v, err := ptext.UnmarshalString(`"` + s + `"`)
+ if err != nil {
+ return nil, false
+ }
+ return []byte(v), true
+}
+
+// marshalBytes serializes bytes by using C escaping.
+// To match the exact output of protoc, this is identical to the
+// CEscape function in strutil.cc of the protoc source code.
+func marshalBytes(b []byte) (string, bool) {
+ var s []byte
+ for _, c := range b {
+ switch c {
+ case '\n':
+ s = append(s, `\n`...)
+ case '\r':
+ s = append(s, `\r`...)
+ case '\t':
+ s = append(s, `\t`...)
+ case '"':
+ s = append(s, `\"`...)
+ case '\'':
+ s = append(s, `\'`...)
+ case '\\':
+ s = append(s, `\\`...)
+ default:
+ if printableASCII := c >= 0x20 && c <= 0x7e; printableASCII {
+ s = append(s, c)
+ } else {
+ s = append(s, fmt.Sprintf(`\%03o`, c)...)
+ }
+ }
+ }
+ return string(s), true
+}
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go b/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go
new file mode 100644
index 000000000..b1eeea507
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go
@@ -0,0 +1,258 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package messageset encodes and decodes the obsolete MessageSet wire format.
+package messageset
+
+import (
+ "math"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/errors"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+ preg "google.golang.org/protobuf/reflect/protoregistry"
+)
+
+// The MessageSet wire format is equivalent to a message defiend as follows,
+// where each Item defines an extension field with a field number of 'type_id'
+// and content of 'message'. MessageSet extensions must be non-repeated message
+// fields.
+//
+// message MessageSet {
+// repeated group Item = 1 {
+// required int32 type_id = 2;
+// required string message = 3;
+// }
+// }
+const (
+ FieldItem = protowire.Number(1)
+ FieldTypeID = protowire.Number(2)
+ FieldMessage = protowire.Number(3)
+)
+
+// ExtensionName is the field name for extensions of MessageSet.
+//
+// A valid MessageSet extension must be of the form:
+// message MyMessage {
+// extend proto2.bridge.MessageSet {
+// optional MyMessage message_set_extension = 1234;
+// }
+// ...
+// }
+const ExtensionName = "message_set_extension"
+
+// IsMessageSet returns whether the message uses the MessageSet wire format.
+func IsMessageSet(md pref.MessageDescriptor) bool {
+ xmd, ok := md.(interface{ IsMessageSet() bool })
+ return ok && xmd.IsMessageSet()
+}
+
+// IsMessageSetExtension reports this field extends a MessageSet.
+func IsMessageSetExtension(fd pref.FieldDescriptor) bool {
+ if fd.Name() != ExtensionName {
+ return false
+ }
+ if fd.FullName().Parent() != fd.Message().FullName() {
+ return false
+ }
+ return IsMessageSet(fd.ContainingMessage())
+}
+
+// FindMessageSetExtension locates a MessageSet extension field by name.
+// In text and JSON formats, the extension name used is the message itself.
+// The extension field name is derived by appending ExtensionName.
+func FindMessageSetExtension(r preg.ExtensionTypeResolver, s pref.FullName) (pref.ExtensionType, error) {
+ name := s.Append(ExtensionName)
+ xt, err := r.FindExtensionByName(name)
+ if err != nil {
+ if err == preg.NotFound {
+ return nil, err
+ }
+ return nil, errors.Wrap(err, "%q", name)
+ }
+ if !IsMessageSetExtension(xt.TypeDescriptor()) {
+ return nil, preg.NotFound
+ }
+ return xt, nil
+}
+
+// SizeField returns the size of a MessageSet item field containing an extension
+// with the given field number, not counting the contents of the message subfield.
+func SizeField(num protowire.Number) int {
+ return 2*protowire.SizeTag(FieldItem) + protowire.SizeTag(FieldTypeID) + protowire.SizeVarint(uint64(num))
+}
+
+// Unmarshal parses a MessageSet.
+//
+// It calls fn with the type ID and value of each item in the MessageSet.
+// Unknown fields are discarded.
+//
+// If wantLen is true, the item values include the varint length prefix.
+// This is ugly, but simplifies the fast-path decoder in internal/impl.
+func Unmarshal(b []byte, wantLen bool, fn func(typeID protowire.Number, value []byte) error) error {
+ for len(b) > 0 {
+ num, wtyp, n := protowire.ConsumeTag(b)
+ if n < 0 {
+ return protowire.ParseError(n)
+ }
+ b = b[n:]
+ if num != FieldItem || wtyp != protowire.StartGroupType {
+ n := protowire.ConsumeFieldValue(num, wtyp, b)
+ if n < 0 {
+ return protowire.ParseError(n)
+ }
+ b = b[n:]
+ continue
+ }
+ typeID, value, n, err := ConsumeFieldValue(b, wantLen)
+ if err != nil {
+ return err
+ }
+ b = b[n:]
+ if typeID == 0 {
+ continue
+ }
+ if err := fn(typeID, value); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// ConsumeFieldValue parses b as a MessageSet item field value until and including
+// the trailing end group marker. It assumes the start group tag has already been parsed.
+// It returns the contents of the type_id and message subfields and the total
+// item length.
+//
+// If wantLen is true, the returned message value includes the length prefix.
+func ConsumeFieldValue(b []byte, wantLen bool) (typeid protowire.Number, message []byte, n int, err error) {
+ ilen := len(b)
+ for {
+ num, wtyp, n := protowire.ConsumeTag(b)
+ if n < 0 {
+ return 0, nil, 0, protowire.ParseError(n)
+ }
+ b = b[n:]
+ switch {
+ case num == FieldItem && wtyp == protowire.EndGroupType:
+ if wantLen && len(message) == 0 {
+ // The message field was missing, which should never happen.
+ // Be prepared for this case anyway.
+ message = protowire.AppendVarint(message, 0)
+ }
+ return typeid, message, ilen - len(b), nil
+ case num == FieldTypeID && wtyp == protowire.VarintType:
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return 0, nil, 0, protowire.ParseError(n)
+ }
+ b = b[n:]
+ if v < 1 || v > math.MaxInt32 {
+ return 0, nil, 0, errors.New("invalid type_id in message set")
+ }
+ typeid = protowire.Number(v)
+ case num == FieldMessage && wtyp == protowire.BytesType:
+ m, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return 0, nil, 0, protowire.ParseError(n)
+ }
+ if message == nil {
+ if wantLen {
+ message = b[:n:n]
+ } else {
+ message = m[:len(m):len(m)]
+ }
+ } else {
+ // This case should never happen in practice, but handle it for
+ // correctness: The MessageSet item contains multiple message
+ // fields, which need to be merged.
+ //
+ // In the case where we're returning the length, this becomes
+ // quite inefficient since we need to strip the length off
+ // the existing data and reconstruct it with the combined length.
+ if wantLen {
+ _, nn := protowire.ConsumeVarint(message)
+ m0 := message[nn:]
+ message = nil
+ message = protowire.AppendVarint(message, uint64(len(m0)+len(m)))
+ message = append(message, m0...)
+ message = append(message, m...)
+ } else {
+ message = append(message, m...)
+ }
+ }
+ b = b[n:]
+ default:
+ // We have no place to put it, so we just ignore unknown fields.
+ n := protowire.ConsumeFieldValue(num, wtyp, b)
+ if n < 0 {
+ return 0, nil, 0, protowire.ParseError(n)
+ }
+ b = b[n:]
+ }
+ }
+}
+
+// AppendFieldStart appends the start of a MessageSet item field containing
+// an extension with the given number. The caller must add the message
+// subfield (including the tag).
+func AppendFieldStart(b []byte, num protowire.Number) []byte {
+ b = protowire.AppendTag(b, FieldItem, protowire.StartGroupType)
+ b = protowire.AppendTag(b, FieldTypeID, protowire.VarintType)
+ b = protowire.AppendVarint(b, uint64(num))
+ return b
+}
+
+// AppendFieldEnd appends the trailing end group marker for a MessageSet item field.
+func AppendFieldEnd(b []byte) []byte {
+ return protowire.AppendTag(b, FieldItem, protowire.EndGroupType)
+}
+
+// SizeUnknown returns the size of an unknown fields section in MessageSet format.
+//
+// See AppendUnknown.
+func SizeUnknown(unknown []byte) (size int) {
+ for len(unknown) > 0 {
+ num, typ, n := protowire.ConsumeTag(unknown)
+ if n < 0 || typ != protowire.BytesType {
+ return 0
+ }
+ unknown = unknown[n:]
+ _, n = protowire.ConsumeBytes(unknown)
+ if n < 0 {
+ return 0
+ }
+ unknown = unknown[n:]
+ size += SizeField(num) + protowire.SizeTag(FieldMessage) + n
+ }
+ return size
+}
+
+// AppendUnknown appends unknown fields to b in MessageSet format.
+//
+// For historic reasons, unresolved items in a MessageSet are stored in a
+// message's unknown fields section in non-MessageSet format. That is, an
+// unknown item with typeID T and value V appears in the unknown fields as
+// a field with number T and value V.
+//
+// This function converts the unknown fields back into MessageSet form.
+func AppendUnknown(b, unknown []byte) ([]byte, error) {
+ for len(unknown) > 0 {
+ num, typ, n := protowire.ConsumeTag(unknown)
+ if n < 0 || typ != protowire.BytesType {
+ return nil, errors.New("invalid data in message set unknown fields")
+ }
+ unknown = unknown[n:]
+ _, n = protowire.ConsumeBytes(unknown)
+ if n < 0 {
+ return nil, errors.New("invalid data in message set unknown fields")
+ }
+ b = AppendFieldStart(b, num)
+ b = protowire.AppendTag(b, FieldMessage, protowire.BytesType)
+ b = append(b, unknown[:n]...)
+ b = AppendFieldEnd(b)
+ unknown = unknown[n:]
+ }
+ return b, nil
+}
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go
new file mode 100644
index 000000000..16c02d7b6
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go
@@ -0,0 +1,207 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package tag marshals and unmarshals the legacy struct tags as generated
+// by historical versions of protoc-gen-go.
+package tag
+
+import (
+ "reflect"
+ "strconv"
+ "strings"
+
+ defval "google.golang.org/protobuf/internal/encoding/defval"
+ fdesc "google.golang.org/protobuf/internal/filedesc"
+ "google.golang.org/protobuf/internal/strs"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+var byteType = reflect.TypeOf(byte(0))
+
+// Unmarshal decodes the tag into a prototype.Field.
+//
+// The goType is needed to determine the original protoreflect.Kind since the
+// tag does not record sufficient information to determine that.
+// The type is the underlying field type (e.g., a repeated field may be
+// represented by []T, but the Go type passed in is just T).
+// A list of enum value descriptors must be provided for enum fields.
+// This does not populate the Enum or Message (except for weak message).
+//
+// This function is a best effort attempt; parsing errors are ignored.
+func Unmarshal(tag string, goType reflect.Type, evs pref.EnumValueDescriptors) pref.FieldDescriptor {
+ f := new(fdesc.Field)
+ f.L0.ParentFile = fdesc.SurrogateProto2
+ for len(tag) > 0 {
+ i := strings.IndexByte(tag, ',')
+ if i < 0 {
+ i = len(tag)
+ }
+ switch s := tag[:i]; {
+ case strings.HasPrefix(s, "name="):
+ f.L0.FullName = pref.FullName(s[len("name="):])
+ case strings.Trim(s, "0123456789") == "":
+ n, _ := strconv.ParseUint(s, 10, 32)
+ f.L1.Number = pref.FieldNumber(n)
+ case s == "opt":
+ f.L1.Cardinality = pref.Optional
+ case s == "req":
+ f.L1.Cardinality = pref.Required
+ case s == "rep":
+ f.L1.Cardinality = pref.Repeated
+ case s == "varint":
+ switch goType.Kind() {
+ case reflect.Bool:
+ f.L1.Kind = pref.BoolKind
+ case reflect.Int32:
+ f.L1.Kind = pref.Int32Kind
+ case reflect.Int64:
+ f.L1.Kind = pref.Int64Kind
+ case reflect.Uint32:
+ f.L1.Kind = pref.Uint32Kind
+ case reflect.Uint64:
+ f.L1.Kind = pref.Uint64Kind
+ }
+ case s == "zigzag32":
+ if goType.Kind() == reflect.Int32 {
+ f.L1.Kind = pref.Sint32Kind
+ }
+ case s == "zigzag64":
+ if goType.Kind() == reflect.Int64 {
+ f.L1.Kind = pref.Sint64Kind
+ }
+ case s == "fixed32":
+ switch goType.Kind() {
+ case reflect.Int32:
+ f.L1.Kind = pref.Sfixed32Kind
+ case reflect.Uint32:
+ f.L1.Kind = pref.Fixed32Kind
+ case reflect.Float32:
+ f.L1.Kind = pref.FloatKind
+ }
+ case s == "fixed64":
+ switch goType.Kind() {
+ case reflect.Int64:
+ f.L1.Kind = pref.Sfixed64Kind
+ case reflect.Uint64:
+ f.L1.Kind = pref.Fixed64Kind
+ case reflect.Float64:
+ f.L1.Kind = pref.DoubleKind
+ }
+ case s == "bytes":
+ switch {
+ case goType.Kind() == reflect.String:
+ f.L1.Kind = pref.StringKind
+ case goType.Kind() == reflect.Slice && goType.Elem() == byteType:
+ f.L1.Kind = pref.BytesKind
+ default:
+ f.L1.Kind = pref.MessageKind
+ }
+ case s == "group":
+ f.L1.Kind = pref.GroupKind
+ case strings.HasPrefix(s, "enum="):
+ f.L1.Kind = pref.EnumKind
+ case strings.HasPrefix(s, "json="):
+ jsonName := s[len("json="):]
+ if jsonName != strs.JSONCamelCase(string(f.L0.FullName.Name())) {
+ f.L1.JSONName.Init(jsonName)
+ }
+ case s == "packed":
+ f.L1.HasPacked = true
+ f.L1.IsPacked = true
+ case strings.HasPrefix(s, "weak="):
+ f.L1.IsWeak = true
+ f.L1.Message = fdesc.PlaceholderMessage(pref.FullName(s[len("weak="):]))
+ case strings.HasPrefix(s, "def="):
+ // The default tag is special in that everything afterwards is the
+ // default regardless of the presence of commas.
+ s, i = tag[len("def="):], len(tag)
+ v, ev, _ := defval.Unmarshal(s, f.L1.Kind, evs, defval.GoTag)
+ f.L1.Default = fdesc.DefaultValue(v, ev)
+ case s == "proto3":
+ f.L0.ParentFile = fdesc.SurrogateProto3
+ }
+ tag = strings.TrimPrefix(tag[i:], ",")
+ }
+
+ // The generator uses the group message name instead of the field name.
+ // We obtain the real field name by lowercasing the group name.
+ if f.L1.Kind == pref.GroupKind {
+ f.L0.FullName = pref.FullName(strings.ToLower(string(f.L0.FullName)))
+ }
+ return f
+}
+
+// Marshal encodes the protoreflect.FieldDescriptor as a tag.
+//
+// The enumName must be provided if the kind is an enum.
+// Historically, the formulation of the enum "name" was the proto package
+// dot-concatenated with the generated Go identifier for the enum type.
+// Depending on the context on how Marshal is called, there are different ways
+// through which that information is determined. As such it is the caller's
+// responsibility to provide a function to obtain that information.
+func Marshal(fd pref.FieldDescriptor, enumName string) string {
+ var tag []string
+ switch fd.Kind() {
+ case pref.BoolKind, pref.EnumKind, pref.Int32Kind, pref.Uint32Kind, pref.Int64Kind, pref.Uint64Kind:
+ tag = append(tag, "varint")
+ case pref.Sint32Kind:
+ tag = append(tag, "zigzag32")
+ case pref.Sint64Kind:
+ tag = append(tag, "zigzag64")
+ case pref.Sfixed32Kind, pref.Fixed32Kind, pref.FloatKind:
+ tag = append(tag, "fixed32")
+ case pref.Sfixed64Kind, pref.Fixed64Kind, pref.DoubleKind:
+ tag = append(tag, "fixed64")
+ case pref.StringKind, pref.BytesKind, pref.MessageKind:
+ tag = append(tag, "bytes")
+ case pref.GroupKind:
+ tag = append(tag, "group")
+ }
+ tag = append(tag, strconv.Itoa(int(fd.Number())))
+ switch fd.Cardinality() {
+ case pref.Optional:
+ tag = append(tag, "opt")
+ case pref.Required:
+ tag = append(tag, "req")
+ case pref.Repeated:
+ tag = append(tag, "rep")
+ }
+ if fd.IsPacked() {
+ tag = append(tag, "packed")
+ }
+ name := string(fd.Name())
+ if fd.Kind() == pref.GroupKind {
+ // The name of the FieldDescriptor for a group field is
+ // lowercased. To find the original capitalization, we
+ // look in the field's MessageType.
+ name = string(fd.Message().Name())
+ }
+ tag = append(tag, "name="+name)
+ if jsonName := fd.JSONName(); jsonName != "" && jsonName != name && !fd.IsExtension() {
+ // NOTE: The jsonName != name condition is suspect, but it preserve
+ // the exact same semantics from the previous generator.
+ tag = append(tag, "json="+jsonName)
+ }
+ if fd.IsWeak() {
+ tag = append(tag, "weak="+string(fd.Message().FullName()))
+ }
+ // The previous implementation does not tag extension fields as proto3,
+ // even when the field is defined in a proto3 file. Match that behavior
+ // for consistency.
+ if fd.Syntax() == pref.Proto3 && !fd.IsExtension() {
+ tag = append(tag, "proto3")
+ }
+ if fd.Kind() == pref.EnumKind && enumName != "" {
+ tag = append(tag, "enum="+enumName)
+ }
+ if fd.ContainingOneof() != nil {
+ tag = append(tag, "oneof")
+ }
+ // This must appear last in the tag, since commas in strings aren't escaped.
+ if fd.HasDefault() {
+ def, _ := defval.Marshal(fd.Default(), fd.DefaultEnumValue(), fd.Kind(), defval.GoTag)
+ tag = append(tag, "def="+def)
+ }
+ return strings.Join(tag, ",")
+}
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go
new file mode 100644
index 000000000..eb10ea102
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go
@@ -0,0 +1,665 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package text
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "regexp"
+ "strconv"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/internal/errors"
+)
+
+// Decoder is a token-based textproto decoder.
+type Decoder struct {
+ // lastCall is last method called, either readCall or peekCall.
+ // Initial value is readCall.
+ lastCall call
+
+ // lastToken contains the last read token.
+ lastToken Token
+
+ // lastErr contains the last read error.
+ lastErr error
+
+ // openStack is a stack containing the byte characters for MessageOpen and
+ // ListOpen kinds. The top of stack represents the message or the list that
+ // the current token is nested in. An empty stack means the current token is
+ // at the top level message. The characters '{' and '<' both represent the
+ // MessageOpen kind.
+ openStack []byte
+
+ // orig is used in reporting line and column.
+ orig []byte
+ // in contains the unconsumed input.
+ in []byte
+}
+
+// NewDecoder returns a Decoder to read the given []byte.
+func NewDecoder(b []byte) *Decoder {
+ return &Decoder{orig: b, in: b}
+}
+
+// ErrUnexpectedEOF means that EOF was encountered in the middle of the input.
+var ErrUnexpectedEOF = errors.New("%v", io.ErrUnexpectedEOF)
+
+// call specifies which Decoder method was invoked.
+type call uint8
+
+const (
+ readCall call = iota
+ peekCall
+)
+
+// Peek looks ahead and returns the next token and error without advancing a read.
+func (d *Decoder) Peek() (Token, error) {
+ defer func() { d.lastCall = peekCall }()
+ if d.lastCall == readCall {
+ d.lastToken, d.lastErr = d.Read()
+ }
+ return d.lastToken, d.lastErr
+}
+
+// Read returns the next token.
+// It will return an error if there is no valid token.
+func (d *Decoder) Read() (Token, error) {
+ defer func() { d.lastCall = readCall }()
+ if d.lastCall == peekCall {
+ return d.lastToken, d.lastErr
+ }
+
+ tok, err := d.parseNext(d.lastToken.kind)
+ if err != nil {
+ return Token{}, err
+ }
+
+ switch tok.kind {
+ case comma, semicolon:
+ tok, err = d.parseNext(tok.kind)
+ if err != nil {
+ return Token{}, err
+ }
+ }
+ d.lastToken = tok
+ return tok, nil
+}
+
+const (
+ mismatchedFmt = "mismatched close character %q"
+ unexpectedFmt = "unexpected character %q"
+)
+
+// parseNext parses the next Token based on given last kind.
+func (d *Decoder) parseNext(lastKind Kind) (Token, error) {
+ // Trim leading spaces.
+ d.consume(0)
+ isEOF := false
+ if len(d.in) == 0 {
+ isEOF = true
+ }
+
+ switch lastKind {
+ case EOF:
+ return d.consumeToken(EOF, 0, 0), nil
+
+ case bof:
+ // Start of top level message. Next token can be EOF or Name.
+ if isEOF {
+ return d.consumeToken(EOF, 0, 0), nil
+ }
+ return d.parseFieldName()
+
+ case Name:
+ // Next token can be MessageOpen, ListOpen or Scalar.
+ if isEOF {
+ return Token{}, ErrUnexpectedEOF
+ }
+ switch ch := d.in[0]; ch {
+ case '{', '<':
+ d.pushOpenStack(ch)
+ return d.consumeToken(MessageOpen, 1, 0), nil
+ case '[':
+ d.pushOpenStack(ch)
+ return d.consumeToken(ListOpen, 1, 0), nil
+ default:
+ return d.parseScalar()
+ }
+
+ case Scalar:
+ openKind, closeCh := d.currentOpenKind()
+ switch openKind {
+ case bof:
+ // Top level message.
+ // Next token can be EOF, comma, semicolon or Name.
+ if isEOF {
+ return d.consumeToken(EOF, 0, 0), nil
+ }
+ switch d.in[0] {
+ case ',':
+ return d.consumeToken(comma, 1, 0), nil
+ case ';':
+ return d.consumeToken(semicolon, 1, 0), nil
+ default:
+ return d.parseFieldName()
+ }
+
+ case MessageOpen:
+ // Next token can be MessageClose, comma, semicolon or Name.
+ if isEOF {
+ return Token{}, ErrUnexpectedEOF
+ }
+ switch ch := d.in[0]; ch {
+ case closeCh:
+ d.popOpenStack()
+ return d.consumeToken(MessageClose, 1, 0), nil
+ case otherCloseChar[closeCh]:
+ return Token{}, d.newSyntaxError(mismatchedFmt, ch)
+ case ',':
+ return d.consumeToken(comma, 1, 0), nil
+ case ';':
+ return d.consumeToken(semicolon, 1, 0), nil
+ default:
+ return d.parseFieldName()
+ }
+
+ case ListOpen:
+ // Next token can be ListClose or comma.
+ if isEOF {
+ return Token{}, ErrUnexpectedEOF
+ }
+ switch ch := d.in[0]; ch {
+ case ']':
+ d.popOpenStack()
+ return d.consumeToken(ListClose, 1, 0), nil
+ case ',':
+ return d.consumeToken(comma, 1, 0), nil
+ default:
+ return Token{}, d.newSyntaxError(unexpectedFmt, ch)
+ }
+ }
+
+ case MessageOpen:
+ // Next token can be MessageClose or Name.
+ if isEOF {
+ return Token{}, ErrUnexpectedEOF
+ }
+ _, closeCh := d.currentOpenKind()
+ switch ch := d.in[0]; ch {
+ case closeCh:
+ d.popOpenStack()
+ return d.consumeToken(MessageClose, 1, 0), nil
+ case otherCloseChar[closeCh]:
+ return Token{}, d.newSyntaxError(mismatchedFmt, ch)
+ default:
+ return d.parseFieldName()
+ }
+
+ case MessageClose:
+ openKind, closeCh := d.currentOpenKind()
+ switch openKind {
+ case bof:
+ // Top level message.
+ // Next token can be EOF, comma, semicolon or Name.
+ if isEOF {
+ return d.consumeToken(EOF, 0, 0), nil
+ }
+ switch ch := d.in[0]; ch {
+ case ',':
+ return d.consumeToken(comma, 1, 0), nil
+ case ';':
+ return d.consumeToken(semicolon, 1, 0), nil
+ default:
+ return d.parseFieldName()
+ }
+
+ case MessageOpen:
+ // Next token can be MessageClose, comma, semicolon or Name.
+ if isEOF {
+ return Token{}, ErrUnexpectedEOF
+ }
+ switch ch := d.in[0]; ch {
+ case closeCh:
+ d.popOpenStack()
+ return d.consumeToken(MessageClose, 1, 0), nil
+ case otherCloseChar[closeCh]:
+ return Token{}, d.newSyntaxError(mismatchedFmt, ch)
+ case ',':
+ return d.consumeToken(comma, 1, 0), nil
+ case ';':
+ return d.consumeToken(semicolon, 1, 0), nil
+ default:
+ return d.parseFieldName()
+ }
+
+ case ListOpen:
+ // Next token can be ListClose or comma
+ if isEOF {
+ return Token{}, ErrUnexpectedEOF
+ }
+ switch ch := d.in[0]; ch {
+ case closeCh:
+ d.popOpenStack()
+ return d.consumeToken(ListClose, 1, 0), nil
+ case ',':
+ return d.consumeToken(comma, 1, 0), nil
+ default:
+ return Token{}, d.newSyntaxError(unexpectedFmt, ch)
+ }
+ }
+
+ case ListOpen:
+ // Next token can be ListClose, MessageStart or Scalar.
+ if isEOF {
+ return Token{}, ErrUnexpectedEOF
+ }
+ switch ch := d.in[0]; ch {
+ case ']':
+ d.popOpenStack()
+ return d.consumeToken(ListClose, 1, 0), nil
+ case '{', '<':
+ d.pushOpenStack(ch)
+ return d.consumeToken(MessageOpen, 1, 0), nil
+ default:
+ return d.parseScalar()
+ }
+
+ case ListClose:
+ openKind, closeCh := d.currentOpenKind()
+ switch openKind {
+ case bof:
+ // Top level message.
+ // Next token can be EOF, comma, semicolon or Name.
+ if isEOF {
+ return d.consumeToken(EOF, 0, 0), nil
+ }
+ switch ch := d.in[0]; ch {
+ case ',':
+ return d.consumeToken(comma, 1, 0), nil
+ case ';':
+ return d.consumeToken(semicolon, 1, 0), nil
+ default:
+ return d.parseFieldName()
+ }
+
+ case MessageOpen:
+ // Next token can be MessageClose, comma, semicolon or Name.
+ if isEOF {
+ return Token{}, ErrUnexpectedEOF
+ }
+ switch ch := d.in[0]; ch {
+ case closeCh:
+ d.popOpenStack()
+ return d.consumeToken(MessageClose, 1, 0), nil
+ case otherCloseChar[closeCh]:
+ return Token{}, d.newSyntaxError(mismatchedFmt, ch)
+ case ',':
+ return d.consumeToken(comma, 1, 0), nil
+ case ';':
+ return d.consumeToken(semicolon, 1, 0), nil
+ default:
+ return d.parseFieldName()
+ }
+
+ default:
+ // It is not possible to have this case. Let it panic below.
+ }
+
+ case comma, semicolon:
+ openKind, closeCh := d.currentOpenKind()
+ switch openKind {
+ case bof:
+ // Top level message. Next token can be EOF or Name.
+ if isEOF {
+ return d.consumeToken(EOF, 0, 0), nil
+ }
+ return d.parseFieldName()
+
+ case MessageOpen:
+ // Next token can be MessageClose or Name.
+ if isEOF {
+ return Token{}, ErrUnexpectedEOF
+ }
+ switch ch := d.in[0]; ch {
+ case closeCh:
+ d.popOpenStack()
+ return d.consumeToken(MessageClose, 1, 0), nil
+ case otherCloseChar[closeCh]:
+ return Token{}, d.newSyntaxError(mismatchedFmt, ch)
+ default:
+ return d.parseFieldName()
+ }
+
+ case ListOpen:
+ if lastKind == semicolon {
+ // It is not be possible to have this case as logic here
+ // should not have produced a semicolon Token when inside a
+ // list. Let it panic below.
+ break
+ }
+ // Next token can be MessageOpen or Scalar.
+ if isEOF {
+ return Token{}, ErrUnexpectedEOF
+ }
+ switch ch := d.in[0]; ch {
+ case '{', '<':
+ d.pushOpenStack(ch)
+ return d.consumeToken(MessageOpen, 1, 0), nil
+ default:
+ return d.parseScalar()
+ }
+ }
+ }
+
+ line, column := d.Position(len(d.orig) - len(d.in))
+ panic(fmt.Sprintf("Decoder.parseNext: bug at handling line %d:%d with lastKind=%v", line, column, lastKind))
+}
+
+var otherCloseChar = map[byte]byte{
+ '}': '>',
+ '>': '}',
+}
+
+// currentOpenKind indicates whether current position is inside a message, list
+// or top-level message by returning MessageOpen, ListOpen or bof respectively.
+// If the returned kind is either a MessageOpen or ListOpen, it also returns the
+// corresponding closing character.
+func (d *Decoder) currentOpenKind() (Kind, byte) {
+ if len(d.openStack) == 0 {
+ return bof, 0
+ }
+ openCh := d.openStack[len(d.openStack)-1]
+ switch openCh {
+ case '{':
+ return MessageOpen, '}'
+ case '<':
+ return MessageOpen, '>'
+ case '[':
+ return ListOpen, ']'
+ }
+ panic(fmt.Sprintf("Decoder: openStack contains invalid byte %s", string(openCh)))
+}
+
+func (d *Decoder) pushOpenStack(ch byte) {
+ d.openStack = append(d.openStack, ch)
+}
+
+func (d *Decoder) popOpenStack() {
+ d.openStack = d.openStack[:len(d.openStack)-1]
+}
+
+// parseFieldName parses field name and separator.
+func (d *Decoder) parseFieldName() (tok Token, err error) {
+ defer func() {
+ if err == nil && d.tryConsumeChar(':') {
+ tok.attrs |= hasSeparator
+ }
+ }()
+
+ // Extension or Any type URL.
+ if d.in[0] == '[' {
+ return d.parseTypeName()
+ }
+
+ // Identifier.
+ if size := parseIdent(d.in, false); size > 0 {
+ return d.consumeToken(Name, size, uint8(IdentName)), nil
+ }
+
+ // Field number. Identify if input is a valid number that is not negative
+ // and is decimal integer within 32-bit range.
+ if num := parseNumber(d.in); num.size > 0 {
+ if !num.neg && num.kind == numDec {
+ if _, err := strconv.ParseInt(string(d.in[:num.size]), 10, 32); err == nil {
+ return d.consumeToken(Name, num.size, uint8(FieldNumber)), nil
+ }
+ }
+ return Token{}, d.newSyntaxError("invalid field number: %s", d.in[:num.size])
+ }
+
+ return Token{}, d.newSyntaxError("invalid field name: %s", errRegexp.Find(d.in))
+}
+
+// parseTypeName parses Any type URL or extension field name. The name is
+// enclosed in [ and ] characters. The C++ parser does not handle many legal URL
+// strings. This implementation is more liberal and allows for the pattern
+// ^[-_a-zA-Z0-9]+([./][-_a-zA-Z0-9]+)*`). Whitespaces and comments are allowed
+// in between [ ], '.', '/' and the sub names.
+func (d *Decoder) parseTypeName() (Token, error) {
+ startPos := len(d.orig) - len(d.in)
+ // Use alias s to advance first in order to use d.in for error handling.
+ // Caller already checks for [ as first character.
+ s := consume(d.in[1:], 0)
+ if len(s) == 0 {
+ return Token{}, ErrUnexpectedEOF
+ }
+
+ var name []byte
+ for len(s) > 0 && isTypeNameChar(s[0]) {
+ name = append(name, s[0])
+ s = s[1:]
+ }
+ s = consume(s, 0)
+
+ var closed bool
+ for len(s) > 0 && !closed {
+ switch {
+ case s[0] == ']':
+ s = s[1:]
+ closed = true
+
+ case s[0] == '/', s[0] == '.':
+ if len(name) > 0 && (name[len(name)-1] == '/' || name[len(name)-1] == '.') {
+ return Token{}, d.newSyntaxError("invalid type URL/extension field name: %s",
+ d.orig[startPos:len(d.orig)-len(s)+1])
+ }
+ name = append(name, s[0])
+ s = s[1:]
+ s = consume(s, 0)
+ for len(s) > 0 && isTypeNameChar(s[0]) {
+ name = append(name, s[0])
+ s = s[1:]
+ }
+ s = consume(s, 0)
+
+ default:
+ return Token{}, d.newSyntaxError(
+ "invalid type URL/extension field name: %s", d.orig[startPos:len(d.orig)-len(s)+1])
+ }
+ }
+
+ if !closed {
+ return Token{}, ErrUnexpectedEOF
+ }
+
+ // First character cannot be '.'. Last character cannot be '.' or '/'.
+ size := len(name)
+ if size == 0 || name[0] == '.' || name[size-1] == '.' || name[size-1] == '/' {
+ return Token{}, d.newSyntaxError("invalid type URL/extension field name: %s",
+ d.orig[startPos:len(d.orig)-len(s)])
+ }
+
+ d.in = s
+ endPos := len(d.orig) - len(d.in)
+ d.consume(0)
+
+ return Token{
+ kind: Name,
+ attrs: uint8(TypeName),
+ pos: startPos,
+ raw: d.orig[startPos:endPos],
+ str: string(name),
+ }, nil
+}
+
+func isTypeNameChar(b byte) bool {
+ return (b == '-' || b == '_' ||
+ ('0' <= b && b <= '9') ||
+ ('a' <= b && b <= 'z') ||
+ ('A' <= b && b <= 'Z'))
+}
+
+func isWhiteSpace(b byte) bool {
+ switch b {
+ case ' ', '\n', '\r', '\t':
+ return true
+ default:
+ return false
+ }
+}
+
+// parseIdent parses an unquoted proto identifier and returns size.
+// If allowNeg is true, it allows '-' to be the first character in the
+// identifier. This is used when parsing literal values like -infinity, etc.
+// Regular expression matches an identifier: `^[_a-zA-Z][_a-zA-Z0-9]*`
+func parseIdent(input []byte, allowNeg bool) int {
+ var size int
+
+ s := input
+ if len(s) == 0 {
+ return 0
+ }
+
+ if allowNeg && s[0] == '-' {
+ s = s[1:]
+ size++
+ if len(s) == 0 {
+ return 0
+ }
+ }
+
+ switch {
+ case s[0] == '_',
+ 'a' <= s[0] && s[0] <= 'z',
+ 'A' <= s[0] && s[0] <= 'Z':
+ s = s[1:]
+ size++
+ default:
+ return 0
+ }
+
+ for len(s) > 0 && (s[0] == '_' ||
+ 'a' <= s[0] && s[0] <= 'z' ||
+ 'A' <= s[0] && s[0] <= 'Z' ||
+ '0' <= s[0] && s[0] <= '9') {
+ s = s[1:]
+ size++
+ }
+
+ if len(s) > 0 && !isDelim(s[0]) {
+ return 0
+ }
+
+ return size
+}
+
+// parseScalar parses for a string, literal or number value.
+func (d *Decoder) parseScalar() (Token, error) {
+ if d.in[0] == '"' || d.in[0] == '\'' {
+ return d.parseStringValue()
+ }
+
+ if tok, ok := d.parseLiteralValue(); ok {
+ return tok, nil
+ }
+
+ if tok, ok := d.parseNumberValue(); ok {
+ return tok, nil
+ }
+
+ return Token{}, d.newSyntaxError("invalid scalar value: %s", errRegexp.Find(d.in))
+}
+
+// parseLiteralValue parses a literal value. A literal value is used for
+// bools, special floats and enums. This function simply identifies that the
+// field value is a literal.
+func (d *Decoder) parseLiteralValue() (Token, bool) {
+ size := parseIdent(d.in, true)
+ if size == 0 {
+ return Token{}, false
+ }
+ return d.consumeToken(Scalar, size, literalValue), true
+}
+
+// consumeToken constructs a Token for given Kind from d.in and consumes given
+// size-length from it.
+func (d *Decoder) consumeToken(kind Kind, size int, attrs uint8) Token {
+ // Important to compute raw and pos before consuming.
+ tok := Token{
+ kind: kind,
+ attrs: attrs,
+ pos: len(d.orig) - len(d.in),
+ raw: d.in[:size],
+ }
+ d.consume(size)
+ return tok
+}
+
+// newSyntaxError returns a syntax error with line and column information for
+// current position.
+func (d *Decoder) newSyntaxError(f string, x ...interface{}) error {
+ e := errors.New(f, x...)
+ line, column := d.Position(len(d.orig) - len(d.in))
+ return errors.New("syntax error (line %d:%d): %v", line, column, e)
+}
+
+// Position returns line and column number of given index of the original input.
+// It will panic if index is out of range.
+func (d *Decoder) Position(idx int) (line int, column int) {
+ b := d.orig[:idx]
+ line = bytes.Count(b, []byte("\n")) + 1
+ if i := bytes.LastIndexByte(b, '\n'); i >= 0 {
+ b = b[i+1:]
+ }
+ column = utf8.RuneCount(b) + 1 // ignore multi-rune characters
+ return line, column
+}
+
+func (d *Decoder) tryConsumeChar(c byte) bool {
+ if len(d.in) > 0 && d.in[0] == c {
+ d.consume(1)
+ return true
+ }
+ return false
+}
+
+// consume consumes n bytes of input and any subsequent whitespace or comments.
+func (d *Decoder) consume(n int) {
+ d.in = consume(d.in, n)
+ return
+}
+
+// consume consumes n bytes of input and any subsequent whitespace or comments.
+func consume(b []byte, n int) []byte {
+ b = b[n:]
+ for len(b) > 0 {
+ switch b[0] {
+ case ' ', '\n', '\r', '\t':
+ b = b[1:]
+ case '#':
+ if i := bytes.IndexByte(b, '\n'); i >= 0 {
+ b = b[i+len("\n"):]
+ } else {
+ b = nil
+ }
+ default:
+ return b
+ }
+ }
+ return b
+}
+
+// Any sequence that looks like a non-delimiter (for error reporting).
+var errRegexp = regexp.MustCompile(`^([-+._a-zA-Z0-9\/]+|.)`)
+
+// isDelim returns true if given byte is a delimiter character.
+func isDelim(c byte) bool {
+ return !(c == '-' || c == '+' || c == '.' || c == '_' ||
+ ('a' <= c && c <= 'z') ||
+ ('A' <= c && c <= 'Z') ||
+ ('0' <= c && c <= '9'))
+}
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go
new file mode 100644
index 000000000..f2d90b789
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go
@@ -0,0 +1,190 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package text
+
+// parseNumberValue parses a number from the input and returns a Token object.
+func (d *Decoder) parseNumberValue() (Token, bool) {
+ in := d.in
+ num := parseNumber(in)
+ if num.size == 0 {
+ return Token{}, false
+ }
+ numAttrs := num.kind
+ if num.neg {
+ numAttrs |= isNegative
+ }
+ strSize := num.size
+ last := num.size - 1
+ if num.kind == numFloat && (d.in[last] == 'f' || d.in[last] == 'F') {
+ strSize = last
+ }
+ tok := Token{
+ kind: Scalar,
+ attrs: numberValue,
+ pos: len(d.orig) - len(d.in),
+ raw: d.in[:num.size],
+ str: string(d.in[:strSize]),
+ numAttrs: numAttrs,
+ }
+ d.consume(num.size)
+ return tok, true
+}
+
+const (
+ numDec uint8 = (1 << iota) / 2
+ numHex
+ numOct
+ numFloat
+)
+
+// number is the result of parsing out a valid number from parseNumber. It
+// contains data for doing float or integer conversion via the strconv package
+// in conjunction with the input bytes.
+type number struct {
+ kind uint8
+ neg bool
+ size int
+}
+
+// parseNumber constructs a number object from given input. It allows for the
+// following patterns:
+// integer: ^-?([1-9][0-9]*|0[xX][0-9a-fA-F]+|0[0-7]*)
+// float: ^-?((0|[1-9][0-9]*)?([.][0-9]*)?([eE][+-]?[0-9]+)?[fF]?)
+// It also returns the number of parsed bytes for the given number, 0 if it is
+// not a number.
+func parseNumber(input []byte) number {
+ kind := numDec
+ var size int
+ var neg bool
+
+ s := input
+ if len(s) == 0 {
+ return number{}
+ }
+
+ // Optional -
+ if s[0] == '-' {
+ neg = true
+ s = s[1:]
+ size++
+ if len(s) == 0 {
+ return number{}
+ }
+ }
+
+ // C++ allows for whitespace and comments in between the negative sign and
+ // the rest of the number. This logic currently does not but is consistent
+ // with v1.
+
+ switch {
+ case s[0] == '0':
+ if len(s) > 1 {
+ switch {
+ case s[1] == 'x' || s[1] == 'X':
+ // Parse as hex number.
+ kind = numHex
+ n := 2
+ s = s[2:]
+ for len(s) > 0 && (('0' <= s[0] && s[0] <= '9') ||
+ ('a' <= s[0] && s[0] <= 'f') ||
+ ('A' <= s[0] && s[0] <= 'F')) {
+ s = s[1:]
+ n++
+ }
+ if n == 2 {
+ return number{}
+ }
+ size += n
+
+ case '0' <= s[1] && s[1] <= '7':
+ // Parse as octal number.
+ kind = numOct
+ n := 2
+ s = s[2:]
+ for len(s) > 0 && '0' <= s[0] && s[0] <= '7' {
+ s = s[1:]
+ n++
+ }
+ size += n
+ }
+
+ if kind&(numHex|numOct) > 0 {
+ if len(s) > 0 && !isDelim(s[0]) {
+ return number{}
+ }
+ return number{kind: kind, neg: neg, size: size}
+ }
+ }
+ s = s[1:]
+ size++
+
+ case '1' <= s[0] && s[0] <= '9':
+ n := 1
+ s = s[1:]
+ for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+ s = s[1:]
+ n++
+ }
+ size += n
+
+ case s[0] == '.':
+ // Set kind to numFloat to signify the intent to parse as float. And
+ // that it needs to have other digits after '.'.
+ kind = numFloat
+
+ default:
+ return number{}
+ }
+
+ // . followed by 0 or more digits.
+ if len(s) > 0 && s[0] == '.' {
+ n := 1
+ s = s[1:]
+ // If decimal point was before any digits, it should be followed by
+ // other digits.
+ if len(s) == 0 && kind == numFloat {
+ return number{}
+ }
+ for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+ s = s[1:]
+ n++
+ }
+ size += n
+ kind = numFloat
+ }
+
+ // e or E followed by an optional - or + and 1 or more digits.
+ if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') {
+ kind = numFloat
+ s = s[1:]
+ n := 1
+ if s[0] == '+' || s[0] == '-' {
+ s = s[1:]
+ n++
+ if len(s) == 0 {
+ return number{}
+ }
+ }
+ for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+ s = s[1:]
+ n++
+ }
+ size += n
+ }
+
+ // Optional suffix f or F for floats.
+ if len(s) > 0 && (s[0] == 'f' || s[0] == 'F') {
+ kind = numFloat
+ s = s[1:]
+ size++
+ }
+
+ // Check that next byte is a delimiter or it is at the end.
+ if len(s) > 0 && !isDelim(s[0]) {
+ return number{}
+ }
+
+ return number{kind: kind, neg: neg, size: size}
+}
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_string.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_string.go
new file mode 100644
index 000000000..d4d349023
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_string.go
@@ -0,0 +1,161 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package text
+
+import (
+ "bytes"
+ "strconv"
+ "strings"
+ "unicode"
+ "unicode/utf16"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/internal/strs"
+)
+
+// parseStringValue parses string field token.
+// This differs from parseString since the text format allows
+// multiple back-to-back string literals where they are semantically treated
+// as a single large string with all values concatenated.
+//
+// E.g., `"foo" "bar" "baz"` => "foobarbaz"
+func (d *Decoder) parseStringValue() (Token, error) {
+ // Note that the ending quote is sufficient to unambiguously mark the end
+ // of a string. Thus, the text grammar does not require intervening
+ // whitespace or control characters in-between strings.
+ // Thus, the following is valid:
+ // `"foo"'bar'"baz"` => "foobarbaz"
+ in0 := d.in
+ var ss []string
+ for len(d.in) > 0 && (d.in[0] == '"' || d.in[0] == '\'') {
+ s, err := d.parseString()
+ if err != nil {
+ return Token{}, err
+ }
+ ss = append(ss, s)
+ }
+ // d.in already points to the end of the value at this point.
+ return Token{
+ kind: Scalar,
+ attrs: stringValue,
+ pos: len(d.orig) - len(in0),
+ raw: in0[:len(in0)-len(d.in)],
+ str: strings.Join(ss, ""),
+ }, nil
+}
+
+// parseString parses a string value enclosed in " or '.
+func (d *Decoder) parseString() (string, error) {
+ in := d.in
+ if len(in) == 0 {
+ return "", ErrUnexpectedEOF
+ }
+ quote := in[0]
+ in = in[1:]
+ i := indexNeedEscapeInBytes(in)
+ in, out := in[i:], in[:i:i] // set cap to prevent mutations
+ for len(in) > 0 {
+ switch r, n := utf8.DecodeRune(in); {
+ case r == utf8.RuneError && n == 1:
+ return "", d.newSyntaxError("invalid UTF-8 detected")
+ case r == 0 || r == '\n':
+ return "", d.newSyntaxError("invalid character %q in string", r)
+ case r == rune(quote):
+ in = in[1:]
+ d.consume(len(d.in) - len(in))
+ return string(out), nil
+ case r == '\\':
+ if len(in) < 2 {
+ return "", ErrUnexpectedEOF
+ }
+ switch r := in[1]; r {
+ case '"', '\'', '\\', '?':
+ in, out = in[2:], append(out, r)
+ case 'a':
+ in, out = in[2:], append(out, '\a')
+ case 'b':
+ in, out = in[2:], append(out, '\b')
+ case 'n':
+ in, out = in[2:], append(out, '\n')
+ case 'r':
+ in, out = in[2:], append(out, '\r')
+ case 't':
+ in, out = in[2:], append(out, '\t')
+ case 'v':
+ in, out = in[2:], append(out, '\v')
+ case 'f':
+ in, out = in[2:], append(out, '\f')
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ // One, two, or three octal characters.
+ n := len(in[1:]) - len(bytes.TrimLeft(in[1:], "01234567"))
+ if n > 3 {
+ n = 3
+ }
+ v, err := strconv.ParseUint(string(in[1:1+n]), 8, 8)
+ if err != nil {
+ return "", d.newSyntaxError("invalid octal escape code %q in string", in[:1+n])
+ }
+ in, out = in[1+n:], append(out, byte(v))
+ case 'x':
+ // One or two hexadecimal characters.
+ n := len(in[2:]) - len(bytes.TrimLeft(in[2:], "0123456789abcdefABCDEF"))
+ if n > 2 {
+ n = 2
+ }
+ v, err := strconv.ParseUint(string(in[2:2+n]), 16, 8)
+ if err != nil {
+ return "", d.newSyntaxError("invalid hex escape code %q in string", in[:2+n])
+ }
+ in, out = in[2+n:], append(out, byte(v))
+ case 'u', 'U':
+ // Four or eight hexadecimal characters
+ n := 6
+ if r == 'U' {
+ n = 10
+ }
+ if len(in) < n {
+ return "", ErrUnexpectedEOF
+ }
+ v, err := strconv.ParseUint(string(in[2:n]), 16, 32)
+ if utf8.MaxRune < v || err != nil {
+ return "", d.newSyntaxError("invalid Unicode escape code %q in string", in[:n])
+ }
+ in = in[n:]
+
+ r := rune(v)
+ if utf16.IsSurrogate(r) {
+ if len(in) < 6 {
+ return "", ErrUnexpectedEOF
+ }
+ v, err := strconv.ParseUint(string(in[2:6]), 16, 16)
+ r = utf16.DecodeRune(r, rune(v))
+ if in[0] != '\\' || in[1] != 'u' || r == unicode.ReplacementChar || err != nil {
+ return "", d.newSyntaxError("invalid Unicode escape code %q in string", in[:6])
+ }
+ in = in[6:]
+ }
+ out = append(out, string(r)...)
+ default:
+ return "", d.newSyntaxError("invalid escape code %q in string", in[:2])
+ }
+ default:
+ i := indexNeedEscapeInBytes(in[n:])
+ in, out = in[n+i:], append(out, in[:n+i]...)
+ }
+ }
+ return "", ErrUnexpectedEOF
+}
+
+// indexNeedEscapeInString returns the index of the character that needs
+// escaping. If no characters need escaping, this returns the input length.
+func indexNeedEscapeInBytes(b []byte) int { return indexNeedEscapeInString(strs.UnsafeString(b)) }
+
+// UnmarshalString returns an unescaped string given a textproto string value.
+// String value needs to contain single or double quotes. This is only used by
+// internal/encoding/defval package for unmarshaling bytes.
+func UnmarshalString(s string) (string, error) {
+ d := NewDecoder([]byte(s))
+ return d.parseString()
+}
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_token.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_token.go
new file mode 100644
index 000000000..83d2b0d5a
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_token.go
@@ -0,0 +1,373 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package text
+
+import (
+ "bytes"
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+
+ "google.golang.org/protobuf/internal/flags"
+)
+
+// Kind represents a token kind expressible in the textproto format.
+type Kind uint8
+
+// Kind values.
+const (
+ Invalid Kind = iota
+ EOF
+ Name // Name indicates the field name.
+ Scalar // Scalar are scalar values, e.g. "string", 47, ENUM_LITERAL, true.
+ MessageOpen
+ MessageClose
+ ListOpen
+ ListClose
+
+ // comma and semi-colon are only for parsing in between values and should not be exposed.
+ comma
+ semicolon
+
+ // bof indicates beginning of file, which is the default token
+ // kind at the beginning of parsing.
+ bof = Invalid
+)
+
+func (t Kind) String() string {
+ switch t {
+ case Invalid:
+ return "<invalid>"
+ case EOF:
+ return "eof"
+ case Scalar:
+ return "scalar"
+ case Name:
+ return "name"
+ case MessageOpen:
+ return "{"
+ case MessageClose:
+ return "}"
+ case ListOpen:
+ return "["
+ case ListClose:
+ return "]"
+ case comma:
+ return ","
+ case semicolon:
+ return ";"
+ default:
+ return fmt.Sprintf("<invalid:%v>", uint8(t))
+ }
+}
+
+// NameKind represents different types of field names.
+type NameKind uint8
+
+// NameKind values.
+const (
+ IdentName NameKind = iota + 1
+ TypeName
+ FieldNumber
+)
+
+func (t NameKind) String() string {
+ switch t {
+ case IdentName:
+ return "IdentName"
+ case TypeName:
+ return "TypeName"
+ case FieldNumber:
+ return "FieldNumber"
+ default:
+ return fmt.Sprintf("<invalid:%v>", uint8(t))
+ }
+}
+
+// Bit mask in Token.attrs to indicate if a Name token is followed by the
+// separator char ':'. The field name separator char is optional for message
+// field or repeated message field, but required for all other types. Decoder
+// simply indicates whether a Name token is followed by separator or not. It is
+// up to the prototext package to validate.
+const hasSeparator = 1 << 7
+
+// Scalar value types.
+const (
+ numberValue = iota + 1
+ stringValue
+ literalValue
+)
+
+// Bit mask in Token.numAttrs to indicate that the number is a negative.
+const isNegative = 1 << 7
+
+// Token provides a parsed token kind and value. Values are provided by the
+// different accessor methods.
+type Token struct {
+ // Kind of the Token object.
+ kind Kind
+ // attrs contains metadata for the following Kinds:
+ // Name: hasSeparator bit and one of NameKind.
+ // Scalar: one of numberValue, stringValue, literalValue.
+ attrs uint8
+ // numAttrs contains metadata for numberValue:
+ // - highest bit is whether negative or positive.
+ // - lower bits indicate one of numDec, numHex, numOct, numFloat.
+ numAttrs uint8
+ // pos provides the position of the token in the original input.
+ pos int
+ // raw bytes of the serialized token.
+ // This is a subslice into the original input.
+ raw []byte
+ // str contains parsed string for the following:
+ // - stringValue of Scalar kind
+ // - numberValue of Scalar kind
+ // - TypeName of Name kind
+ str string
+}
+
+// Kind returns the token kind.
+func (t Token) Kind() Kind {
+ return t.kind
+}
+
+// RawString returns the read value in string.
+func (t Token) RawString() string {
+ return string(t.raw)
+}
+
+// Pos returns the token position from the input.
+func (t Token) Pos() int {
+ return t.pos
+}
+
+// NameKind returns IdentName, TypeName or FieldNumber.
+// It panics if type is not Name.
+func (t Token) NameKind() NameKind {
+ if t.kind == Name {
+ return NameKind(t.attrs &^ hasSeparator)
+ }
+ panic(fmt.Sprintf("Token is not a Name type: %s", t.kind))
+}
+
+// HasSeparator returns true if the field name is followed by the separator char
+// ':', else false. It panics if type is not Name.
+func (t Token) HasSeparator() bool {
+ if t.kind == Name {
+ return t.attrs&hasSeparator != 0
+ }
+ panic(fmt.Sprintf("Token is not a Name type: %s", t.kind))
+}
+
+// IdentName returns the value for IdentName type.
+func (t Token) IdentName() string {
+ if t.kind == Name && t.attrs&uint8(IdentName) != 0 {
+ return string(t.raw)
+ }
+ panic(fmt.Sprintf("Token is not an IdentName: %s:%s", t.kind, NameKind(t.attrs&^hasSeparator)))
+}
+
+// TypeName returns the value for TypeName type.
+func (t Token) TypeName() string {
+ if t.kind == Name && t.attrs&uint8(TypeName) != 0 {
+ return t.str
+ }
+ panic(fmt.Sprintf("Token is not a TypeName: %s:%s", t.kind, NameKind(t.attrs&^hasSeparator)))
+}
+
+// FieldNumber returns the value for FieldNumber type. It returns a
+// non-negative int32 value. Caller will still need to validate for the correct
+// field number range.
+func (t Token) FieldNumber() int32 {
+ if t.kind != Name || t.attrs&uint8(FieldNumber) == 0 {
+ panic(fmt.Sprintf("Token is not a FieldNumber: %s:%s", t.kind, NameKind(t.attrs&^hasSeparator)))
+ }
+ // Following should not return an error as it had already been called right
+ // before this Token was constructed.
+ num, _ := strconv.ParseInt(string(t.raw), 10, 32)
+ return int32(num)
+}
+
+// String returns the string value for a Scalar type.
+func (t Token) String() (string, bool) {
+ if t.kind != Scalar || t.attrs != stringValue {
+ return "", false
+ }
+ return t.str, true
+}
+
+// Enum returns the literal value for a Scalar type for use as enum literals.
+func (t Token) Enum() (string, bool) {
+ if t.kind != Scalar || t.attrs != literalValue || (len(t.raw) > 0 && t.raw[0] == '-') {
+ return "", false
+ }
+ return string(t.raw), true
+}
+
+// Bool returns the bool value for a Scalar type.
+func (t Token) Bool() (bool, bool) {
+ if t.kind != Scalar {
+ return false, false
+ }
+ switch t.attrs {
+ case literalValue:
+ if b, ok := boolLits[string(t.raw)]; ok {
+ return b, true
+ }
+ case numberValue:
+ // Unsigned integer representation of 0 or 1 is permitted: 00, 0x0, 01,
+ // 0x1, etc.
+ n, err := strconv.ParseUint(t.str, 0, 64)
+ if err == nil {
+ switch n {
+ case 0:
+ return false, true
+ case 1:
+ return true, true
+ }
+ }
+ }
+ return false, false
+}
+
+// These exact boolean literals are the ones supported in C++.
+var boolLits = map[string]bool{
+ "t": true,
+ "true": true,
+ "True": true,
+ "f": false,
+ "false": false,
+ "False": false,
+}
+
+// Uint64 returns the uint64 value for a Scalar type.
+func (t Token) Uint64() (uint64, bool) {
+ if t.kind != Scalar || t.attrs != numberValue ||
+ t.numAttrs&isNegative > 0 || t.numAttrs&numFloat > 0 {
+ return 0, false
+ }
+ n, err := strconv.ParseUint(t.str, 0, 64)
+ if err != nil {
+ return 0, false
+ }
+ return n, true
+}
+
+// Uint32 returns the uint32 value for a Scalar type.
+func (t Token) Uint32() (uint32, bool) {
+ if t.kind != Scalar || t.attrs != numberValue ||
+ t.numAttrs&isNegative > 0 || t.numAttrs&numFloat > 0 {
+ return 0, false
+ }
+ n, err := strconv.ParseUint(t.str, 0, 32)
+ if err != nil {
+ return 0, false
+ }
+ return uint32(n), true
+}
+
+// Int64 returns the int64 value for a Scalar type.
+func (t Token) Int64() (int64, bool) {
+ if t.kind != Scalar || t.attrs != numberValue || t.numAttrs&numFloat > 0 {
+ return 0, false
+ }
+ if n, err := strconv.ParseInt(t.str, 0, 64); err == nil {
+ return n, true
+ }
+ // C++ accepts large positive hex numbers as negative values.
+ // This feature is here for proto1 backwards compatibility purposes.
+ if flags.ProtoLegacy && (t.numAttrs == numHex) {
+ if n, err := strconv.ParseUint(t.str, 0, 64); err == nil {
+ return int64(n), true
+ }
+ }
+ return 0, false
+}
+
+// Int32 returns the int32 value for a Scalar type.
+func (t Token) Int32() (int32, bool) {
+ if t.kind != Scalar || t.attrs != numberValue || t.numAttrs&numFloat > 0 {
+ return 0, false
+ }
+ if n, err := strconv.ParseInt(t.str, 0, 32); err == nil {
+ return int32(n), true
+ }
+ // C++ accepts large positive hex numbers as negative values.
+ // This feature is here for proto1 backwards compatibility purposes.
+ if flags.ProtoLegacy && (t.numAttrs == numHex) {
+ if n, err := strconv.ParseUint(t.str, 0, 32); err == nil {
+ return int32(n), true
+ }
+ }
+ return 0, false
+}
+
+// Float64 returns the float64 value for a Scalar type.
+func (t Token) Float64() (float64, bool) {
+ if t.kind != Scalar {
+ return 0, false
+ }
+ switch t.attrs {
+ case literalValue:
+ if f, ok := floatLits[strings.ToLower(string(t.raw))]; ok {
+ return f, true
+ }
+ case numberValue:
+ n, err := strconv.ParseFloat(t.str, 64)
+ if err == nil {
+ return n, true
+ }
+ nerr := err.(*strconv.NumError)
+ if nerr.Err == strconv.ErrRange {
+ return n, true
+ }
+ }
+ return 0, false
+}
+
+// Float32 returns the float32 value for a Scalar type.
+func (t Token) Float32() (float32, bool) {
+ if t.kind != Scalar {
+ return 0, false
+ }
+ switch t.attrs {
+ case literalValue:
+ if f, ok := floatLits[strings.ToLower(string(t.raw))]; ok {
+ return float32(f), true
+ }
+ case numberValue:
+ n, err := strconv.ParseFloat(t.str, 64)
+ if err == nil {
+ // Overflows are treated as (-)infinity.
+ return float32(n), true
+ }
+ nerr := err.(*strconv.NumError)
+ if nerr.Err == strconv.ErrRange {
+ return float32(n), true
+ }
+ }
+ return 0, false
+}
+
+// These are the supported float literals which C++ permits case-insensitive
+// variants of these.
+var floatLits = map[string]float64{
+ "nan": math.NaN(),
+ "inf": math.Inf(1),
+ "infinity": math.Inf(1),
+ "-inf": math.Inf(-1),
+ "-infinity": math.Inf(-1),
+}
+
+// TokenEquals returns true if given Tokens are equal, else false.
+func TokenEquals(x, y Token) bool {
+ return x.kind == y.kind &&
+ x.attrs == y.attrs &&
+ x.numAttrs == y.numAttrs &&
+ x.pos == y.pos &&
+ bytes.Equal(x.raw, y.raw) &&
+ x.str == y.str
+}
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go b/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go
new file mode 100644
index 000000000..0ce8d6fb8
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go
@@ -0,0 +1,29 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package text implements the text format for protocol buffers.
+// This package has no semantic understanding for protocol buffers and is only
+// a parser and composer for the format.
+//
+// There is no formal specification for the protobuf text format, as such the
+// C++ implementation (see google::protobuf::TextFormat) is the reference
+// implementation of the text format.
+//
+// This package is neither a superset nor a subset of the C++ implementation.
+// This implementation permits a more liberal grammar in some cases to be
+// backwards compatible with the historical Go implementation.
+// Future parsings unique to Go should not be added.
+// Some grammars allowed by the C++ implementation are deliberately
+// not implemented here because they are considered a bug by the protobuf team
+// and should not be replicated.
+//
+// The Go implementation should implement a sufficient amount of the C++
+// grammar such that the default text serialization by C++ can be parsed by Go.
+// However, just because the C++ parser accepts some input does not mean that
+// the Go implementation should as well.
+//
+// The text format is almost a superset of JSON except:
+// * message keys are not quoted strings, but identifiers
+// * the top-level value must be a message without the delimiters
+package text
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go
new file mode 100644
index 000000000..c4ba1c598
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go
@@ -0,0 +1,267 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package text
+
+import (
+ "math"
+ "math/bits"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/internal/detrand"
+ "google.golang.org/protobuf/internal/errors"
+)
+
+// encType represents an encoding type.
+type encType uint8
+
+const (
+ _ encType = (1 << iota) / 2
+ name
+ scalar
+ messageOpen
+ messageClose
+)
+
+// Encoder provides methods to write out textproto constructs and values. The user is
+// responsible for producing valid sequences of constructs and values.
+type Encoder struct {
+ encoderState
+
+ indent string
+ newline string // set to "\n" if len(indent) > 0
+ delims [2]byte
+ outputASCII bool
+}
+
+type encoderState struct {
+ lastType encType
+ indents []byte
+ out []byte
+}
+
+// NewEncoder returns an Encoder.
+//
+// If indent is a non-empty string, it causes every entry in a List or Message
+// to be preceded by the indent and trailed by a newline.
+//
+// If delims is not the zero value, it controls the delimiter characters used
+// for messages (e.g., "{}" vs "<>").
+//
+// If outputASCII is true, strings will be serialized in such a way that
+// multi-byte UTF-8 sequences are escaped. This property ensures that the
+// overall output is ASCII (as opposed to UTF-8).
+func NewEncoder(indent string, delims [2]byte, outputASCII bool) (*Encoder, error) {
+ e := &Encoder{}
+ if len(indent) > 0 {
+ if strings.Trim(indent, " \t") != "" {
+ return nil, errors.New("indent may only be composed of space and tab characters")
+ }
+ e.indent = indent
+ e.newline = "\n"
+ }
+ switch delims {
+ case [2]byte{0, 0}:
+ e.delims = [2]byte{'{', '}'}
+ case [2]byte{'{', '}'}, [2]byte{'<', '>'}:
+ e.delims = delims
+ default:
+ return nil, errors.New("delimiters may only be \"{}\" or \"<>\"")
+ }
+ e.outputASCII = outputASCII
+
+ return e, nil
+}
+
+// Bytes returns the content of the written bytes.
+func (e *Encoder) Bytes() []byte {
+ return e.out
+}
+
+// StartMessage writes out the '{' or '<' symbol.
+func (e *Encoder) StartMessage() {
+ e.prepareNext(messageOpen)
+ e.out = append(e.out, e.delims[0])
+}
+
+// EndMessage writes out the '}' or '>' symbol.
+func (e *Encoder) EndMessage() {
+ e.prepareNext(messageClose)
+ e.out = append(e.out, e.delims[1])
+}
+
+// WriteName writes out the field name and the separator ':'.
+func (e *Encoder) WriteName(s string) {
+ e.prepareNext(name)
+ e.out = append(e.out, s...)
+ e.out = append(e.out, ':')
+}
+
+// WriteBool writes out the given boolean value.
+func (e *Encoder) WriteBool(b bool) {
+ if b {
+ e.WriteLiteral("true")
+ } else {
+ e.WriteLiteral("false")
+ }
+}
+
+// WriteString writes out the given string value.
+func (e *Encoder) WriteString(s string) {
+ e.prepareNext(scalar)
+ e.out = appendString(e.out, s, e.outputASCII)
+}
+
+func appendString(out []byte, in string, outputASCII bool) []byte {
+ out = append(out, '"')
+ i := indexNeedEscapeInString(in)
+ in, out = in[i:], append(out, in[:i]...)
+ for len(in) > 0 {
+ switch r, n := utf8.DecodeRuneInString(in); {
+ case r == utf8.RuneError && n == 1:
+ // We do not report invalid UTF-8 because strings in the text format
+ // are used to represent both the proto string and bytes type.
+ r = rune(in[0])
+ fallthrough
+ case r < ' ' || r == '"' || r == '\\':
+ out = append(out, '\\')
+ switch r {
+ case '"', '\\':
+ out = append(out, byte(r))
+ case '\n':
+ out = append(out, 'n')
+ case '\r':
+ out = append(out, 'r')
+ case '\t':
+ out = append(out, 't')
+ default:
+ out = append(out, 'x')
+ out = append(out, "00"[1+(bits.Len32(uint32(r))-1)/4:]...)
+ out = strconv.AppendUint(out, uint64(r), 16)
+ }
+ in = in[n:]
+ case outputASCII && r >= utf8.RuneSelf:
+ out = append(out, '\\')
+ if r <= math.MaxUint16 {
+ out = append(out, 'u')
+ out = append(out, "0000"[1+(bits.Len32(uint32(r))-1)/4:]...)
+ out = strconv.AppendUint(out, uint64(r), 16)
+ } else {
+ out = append(out, 'U')
+ out = append(out, "00000000"[1+(bits.Len32(uint32(r))-1)/4:]...)
+ out = strconv.AppendUint(out, uint64(r), 16)
+ }
+ in = in[n:]
+ default:
+ i := indexNeedEscapeInString(in[n:])
+ in, out = in[n+i:], append(out, in[:n+i]...)
+ }
+ }
+ out = append(out, '"')
+ return out
+}
+
+// indexNeedEscapeInString returns the index of the character that needs
+// escaping. If no characters need escaping, this returns the input length.
+func indexNeedEscapeInString(s string) int {
+ for i := 0; i < len(s); i++ {
+ if c := s[i]; c < ' ' || c == '"' || c == '\'' || c == '\\' || c >= utf8.RuneSelf {
+ return i
+ }
+ }
+ return len(s)
+}
+
+// WriteFloat writes out the given float value for given bitSize.
+func (e *Encoder) WriteFloat(n float64, bitSize int) {
+ e.prepareNext(scalar)
+ e.out = appendFloat(e.out, n, bitSize)
+}
+
+func appendFloat(out []byte, n float64, bitSize int) []byte {
+ switch {
+ case math.IsNaN(n):
+ return append(out, "nan"...)
+ case math.IsInf(n, +1):
+ return append(out, "inf"...)
+ case math.IsInf(n, -1):
+ return append(out, "-inf"...)
+ default:
+ return strconv.AppendFloat(out, n, 'g', -1, bitSize)
+ }
+}
+
+// WriteInt writes out the given signed integer value.
+func (e *Encoder) WriteInt(n int64) {
+ e.prepareNext(scalar)
+ e.out = append(e.out, strconv.FormatInt(n, 10)...)
+}
+
+// WriteUint writes out the given unsigned integer value.
+func (e *Encoder) WriteUint(n uint64) {
+ e.prepareNext(scalar)
+ e.out = append(e.out, strconv.FormatUint(n, 10)...)
+}
+
+// WriteLiteral writes out the given string as a literal value without quotes.
+// This is used for writing enum literal strings.
+func (e *Encoder) WriteLiteral(s string) {
+ e.prepareNext(scalar)
+ e.out = append(e.out, s...)
+}
+
+// prepareNext adds possible space and indentation for the next value based
+// on last encType and indent option. It also updates e.lastType to next.
+func (e *Encoder) prepareNext(next encType) {
+ defer func() {
+ e.lastType = next
+ }()
+
+ // Single line.
+ if len(e.indent) == 0 {
+ // Add space after each field before the next one.
+ if e.lastType&(scalar|messageClose) != 0 && next == name {
+ e.out = append(e.out, ' ')
+ // Add a random extra space to make output unstable.
+ if detrand.Bool() {
+ e.out = append(e.out, ' ')
+ }
+ }
+ return
+ }
+
+ // Multi-line.
+ switch {
+ case e.lastType == name:
+ e.out = append(e.out, ' ')
+ // Add a random extra space after name: to make output unstable.
+ if detrand.Bool() {
+ e.out = append(e.out, ' ')
+ }
+
+ case e.lastType == messageOpen && next != messageClose:
+ e.indents = append(e.indents, e.indent...)
+ e.out = append(e.out, '\n')
+ e.out = append(e.out, e.indents...)
+
+ case e.lastType&(scalar|messageClose) != 0:
+ if next == messageClose {
+ e.indents = e.indents[:len(e.indents)-len(e.indent)]
+ }
+ e.out = append(e.out, '\n')
+ e.out = append(e.out, e.indents...)
+ }
+}
+
+// Snapshot returns the current snapshot for use in Reset.
+func (e *Encoder) Snapshot() encoderState {
+ return e.encoderState
+}
+
+// Reset resets the Encoder to the given encoderState from a Snapshot.
+func (e *Encoder) Reset(es encoderState) {
+ e.encoderState = es
+}
diff --git a/vendor/google.golang.org/protobuf/internal/errors/errors.go b/vendor/google.golang.org/protobuf/internal/errors/errors.go
new file mode 100644
index 000000000..20c17b35e
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/errors/errors.go
@@ -0,0 +1,89 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package errors implements functions to manipulate errors.
+package errors
+
+import (
+ "errors"
+ "fmt"
+
+ "google.golang.org/protobuf/internal/detrand"
+)
+
+// Error is a sentinel matching all errors produced by this package.
+var Error = errors.New("protobuf error")
+
+// New formats a string according to the format specifier and arguments and
+// returns an error that has a "proto" prefix.
+func New(f string, x ...interface{}) error {
+ return &prefixError{s: format(f, x...)}
+}
+
+type prefixError struct{ s string }
+
+var prefix = func() string {
+ // Deliberately introduce instability into the error message string to
+ // discourage users from performing error string comparisons.
+ if detrand.Bool() {
+ return "proto: " // use non-breaking spaces (U+00a0)
+ } else {
+ return "proto: " // use regular spaces (U+0020)
+ }
+}()
+
+func (e *prefixError) Error() string {
+ return prefix + e.s
+}
+
+func (e *prefixError) Unwrap() error {
+ return Error
+}
+
+// Wrap returns an error that has a "proto" prefix, the formatted string described
+// by the format specifier and arguments, and a suffix of err. The error wraps err.
+func Wrap(err error, f string, x ...interface{}) error {
+ return &wrapError{
+ s: format(f, x...),
+ err: err,
+ }
+}
+
+type wrapError struct {
+ s string
+ err error
+}
+
+func (e *wrapError) Error() string {
+ return format("%v%v: %v", prefix, e.s, e.err)
+}
+
+func (e *wrapError) Unwrap() error {
+ return e.err
+}
+
+func (e *wrapError) Is(target error) bool {
+ return target == Error
+}
+
+func format(f string, x ...interface{}) string {
+ // avoid "proto: " prefix when chaining
+ for i := 0; i < len(x); i++ {
+ switch e := x[i].(type) {
+ case *prefixError:
+ x[i] = e.s
+ case *wrapError:
+ x[i] = format("%v: %v", e.s, e.err)
+ }
+ }
+ return fmt.Sprintf(f, x...)
+}
+
+func InvalidUTF8(name string) error {
+ return New("field %v contains invalid UTF-8", name)
+}
+
+func RequiredNotSet(name string) error {
+ return New("required field %v not set", name)
+}
diff --git a/vendor/google.golang.org/protobuf/internal/errors/is_go112.go b/vendor/google.golang.org/protobuf/internal/errors/is_go112.go
new file mode 100644
index 000000000..f90e909b3
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/errors/is_go112.go
@@ -0,0 +1,39 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.13
+
+package errors
+
+import "reflect"
+
+// Is is a copy of Go 1.13's errors.Is for use with older Go versions.
+func Is(err, target error) bool {
+ if target == nil {
+ return err == target
+ }
+
+ isComparable := reflect.TypeOf(target).Comparable()
+ for {
+ if isComparable && err == target {
+ return true
+ }
+ if x, ok := err.(interface{ Is(error) bool }); ok && x.Is(target) {
+ return true
+ }
+ if err = unwrap(err); err == nil {
+ return false
+ }
+ }
+}
+
+func unwrap(err error) error {
+ u, ok := err.(interface {
+ Unwrap() error
+ })
+ if !ok {
+ return nil
+ }
+ return u.Unwrap()
+}
diff --git a/vendor/google.golang.org/protobuf/internal/errors/is_go113.go b/vendor/google.golang.org/protobuf/internal/errors/is_go113.go
new file mode 100644
index 000000000..dc05f4191
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/errors/is_go113.go
@@ -0,0 +1,12 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.13
+
+package errors
+
+import "errors"
+
+// Is is errors.Is.
+func Is(err, target error) bool { return errors.Is(err, target) }
diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/any_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/any_gen.go
new file mode 100644
index 000000000..74c5fef24
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/fieldnum/any_gen.go
@@ -0,0 +1,13 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-protos. DO NOT EDIT.
+
+package fieldnum
+
+// Field numbers for google.protobuf.Any.
+const (
+ Any_TypeUrl = 1 // optional string
+ Any_Value = 2 // optional bytes
+)
diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/api_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/api_gen.go
new file mode 100644
index 000000000..9a6b5f29b
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/fieldnum/api_gen.go
@@ -0,0 +1,35 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-protos. DO NOT EDIT.
+
+package fieldnum
+
+// Field numbers for google.protobuf.Api.
+const (
+ Api_Name = 1 // optional string
+ Api_Methods = 2 // repeated google.protobuf.Method
+ Api_Options = 3 // repeated google.protobuf.Option
+ Api_Version = 4 // optional string
+ Api_SourceContext = 5 // optional google.protobuf.SourceContext
+ Api_Mixins = 6 // repeated google.protobuf.Mixin
+ Api_Syntax = 7 // optional google.protobuf.Syntax
+)
+
+// Field numbers for google.protobuf.Method.
+const (
+ Method_Name = 1 // optional string
+ Method_RequestTypeUrl = 2 // optional string
+ Method_RequestStreaming = 3 // optional bool
+ Method_ResponseTypeUrl = 4 // optional string
+ Method_ResponseStreaming = 5 // optional bool
+ Method_Options = 6 // repeated google.protobuf.Option
+ Method_Syntax = 7 // optional google.protobuf.Syntax
+)
+
+// Field numbers for google.protobuf.Mixin.
+const (
+ Mixin_Name = 1 // optional string
+ Mixin_Root = 2 // optional string
+)
diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/descriptor_gen.go
new file mode 100644
index 000000000..6e37b59e9
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/fieldnum/descriptor_gen.go
@@ -0,0 +1,240 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-protos. DO NOT EDIT.
+
+package fieldnum
+
+// Field numbers for google.protobuf.FileDescriptorSet.
+const (
+ FileDescriptorSet_File = 1 // repeated google.protobuf.FileDescriptorProto
+)
+
+// Field numbers for google.protobuf.FileDescriptorProto.
+const (
+ FileDescriptorProto_Name = 1 // optional string
+ FileDescriptorProto_Package = 2 // optional string
+ FileDescriptorProto_Dependency = 3 // repeated string
+ FileDescriptorProto_PublicDependency = 10 // repeated int32
+ FileDescriptorProto_WeakDependency = 11 // repeated int32
+ FileDescriptorProto_MessageType = 4 // repeated google.protobuf.DescriptorProto
+ FileDescriptorProto_EnumType = 5 // repeated google.protobuf.EnumDescriptorProto
+ FileDescriptorProto_Service = 6 // repeated google.protobuf.ServiceDescriptorProto
+ FileDescriptorProto_Extension = 7 // repeated google.protobuf.FieldDescriptorProto
+ FileDescriptorProto_Options = 8 // optional google.protobuf.FileOptions
+ FileDescriptorProto_SourceCodeInfo = 9 // optional google.protobuf.SourceCodeInfo
+ FileDescriptorProto_Syntax = 12 // optional string
+)
+
+// Field numbers for google.protobuf.DescriptorProto.
+const (
+ DescriptorProto_Name = 1 // optional string
+ DescriptorProto_Field = 2 // repeated google.protobuf.FieldDescriptorProto
+ DescriptorProto_Extension = 6 // repeated google.protobuf.FieldDescriptorProto
+ DescriptorProto_NestedType = 3 // repeated google.protobuf.DescriptorProto
+ DescriptorProto_EnumType = 4 // repeated google.protobuf.EnumDescriptorProto
+ DescriptorProto_ExtensionRange = 5 // repeated google.protobuf.DescriptorProto.ExtensionRange
+ DescriptorProto_OneofDecl = 8 // repeated google.protobuf.OneofDescriptorProto
+ DescriptorProto_Options = 7 // optional google.protobuf.MessageOptions
+ DescriptorProto_ReservedRange = 9 // repeated google.protobuf.DescriptorProto.ReservedRange
+ DescriptorProto_ReservedName = 10 // repeated string
+)
+
+// Field numbers for google.protobuf.DescriptorProto.ExtensionRange.
+const (
+ DescriptorProto_ExtensionRange_Start = 1 // optional int32
+ DescriptorProto_ExtensionRange_End = 2 // optional int32
+ DescriptorProto_ExtensionRange_Options = 3 // optional google.protobuf.ExtensionRangeOptions
+)
+
+// Field numbers for google.protobuf.DescriptorProto.ReservedRange.
+const (
+ DescriptorProto_ReservedRange_Start = 1 // optional int32
+ DescriptorProto_ReservedRange_End = 2 // optional int32
+)
+
+// Field numbers for google.protobuf.ExtensionRangeOptions.
+const (
+ ExtensionRangeOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption
+)
+
+// Field numbers for google.protobuf.FieldDescriptorProto.
+const (
+ FieldDescriptorProto_Name = 1 // optional string
+ FieldDescriptorProto_Number = 3 // optional int32
+ FieldDescriptorProto_Label = 4 // optional google.protobuf.FieldDescriptorProto.Label
+ FieldDescriptorProto_Type = 5 // optional google.protobuf.FieldDescriptorProto.Type
+ FieldDescriptorProto_TypeName = 6 // optional string
+ FieldDescriptorProto_Extendee = 2 // optional string
+ FieldDescriptorProto_DefaultValue = 7 // optional string
+ FieldDescriptorProto_OneofIndex = 9 // optional int32
+ FieldDescriptorProto_JsonName = 10 // optional string
+ FieldDescriptorProto_Options = 8 // optional google.protobuf.FieldOptions
+ FieldDescriptorProto_Proto3Optional = 17 // optional bool
+)
+
+// Field numbers for google.protobuf.OneofDescriptorProto.
+const (
+ OneofDescriptorProto_Name = 1 // optional string
+ OneofDescriptorProto_Options = 2 // optional google.protobuf.OneofOptions
+)
+
+// Field numbers for google.protobuf.EnumDescriptorProto.
+const (
+ EnumDescriptorProto_Name = 1 // optional string
+ EnumDescriptorProto_Value = 2 // repeated google.protobuf.EnumValueDescriptorProto
+ EnumDescriptorProto_Options = 3 // optional google.protobuf.EnumOptions
+ EnumDescriptorProto_ReservedRange = 4 // repeated google.protobuf.EnumDescriptorProto.EnumReservedRange
+ EnumDescriptorProto_ReservedName = 5 // repeated string
+)
+
+// Field numbers for google.protobuf.EnumDescriptorProto.EnumReservedRange.
+const (
+ EnumDescriptorProto_EnumReservedRange_Start = 1 // optional int32
+ EnumDescriptorProto_EnumReservedRange_End = 2 // optional int32
+)
+
+// Field numbers for google.protobuf.EnumValueDescriptorProto.
+const (
+ EnumValueDescriptorProto_Name = 1 // optional string
+ EnumValueDescriptorProto_Number = 2 // optional int32
+ EnumValueDescriptorProto_Options = 3 // optional google.protobuf.EnumValueOptions
+)
+
+// Field numbers for google.protobuf.ServiceDescriptorProto.
+const (
+ ServiceDescriptorProto_Name = 1 // optional string
+ ServiceDescriptorProto_Method = 2 // repeated google.protobuf.MethodDescriptorProto
+ ServiceDescriptorProto_Options = 3 // optional google.protobuf.ServiceOptions
+)
+
+// Field numbers for google.protobuf.MethodDescriptorProto.
+const (
+ MethodDescriptorProto_Name = 1 // optional string
+ MethodDescriptorProto_InputType = 2 // optional string
+ MethodDescriptorProto_OutputType = 3 // optional string
+ MethodDescriptorProto_Options = 4 // optional google.protobuf.MethodOptions
+ MethodDescriptorProto_ClientStreaming = 5 // optional bool
+ MethodDescriptorProto_ServerStreaming = 6 // optional bool
+)
+
+// Field numbers for google.protobuf.FileOptions.
+const (
+ FileOptions_JavaPackage = 1 // optional string
+ FileOptions_JavaOuterClassname = 8 // optional string
+ FileOptions_JavaMultipleFiles = 10 // optional bool
+ FileOptions_JavaGenerateEqualsAndHash = 20 // optional bool
+ FileOptions_JavaStringCheckUtf8 = 27 // optional bool
+ FileOptions_OptimizeFor = 9 // optional google.protobuf.FileOptions.OptimizeMode
+ FileOptions_GoPackage = 11 // optional string
+ FileOptions_CcGenericServices = 16 // optional bool
+ FileOptions_JavaGenericServices = 17 // optional bool
+ FileOptions_PyGenericServices = 18 // optional bool
+ FileOptions_PhpGenericServices = 42 // optional bool
+ FileOptions_Deprecated = 23 // optional bool
+ FileOptions_CcEnableArenas = 31 // optional bool
+ FileOptions_ObjcClassPrefix = 36 // optional string
+ FileOptions_CsharpNamespace = 37 // optional string
+ FileOptions_SwiftPrefix = 39 // optional string
+ FileOptions_PhpClassPrefix = 40 // optional string
+ FileOptions_PhpNamespace = 41 // optional string
+ FileOptions_PhpMetadataNamespace = 44 // optional string
+ FileOptions_RubyPackage = 45 // optional string
+ FileOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption
+)
+
+// Field numbers for google.protobuf.MessageOptions.
+const (
+ MessageOptions_MessageSetWireFormat = 1 // optional bool
+ MessageOptions_NoStandardDescriptorAccessor = 2 // optional bool
+ MessageOptions_Deprecated = 3 // optional bool
+ MessageOptions_MapEntry = 7 // optional bool
+ MessageOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption
+)
+
+// Field numbers for google.protobuf.FieldOptions.
+const (
+ FieldOptions_Ctype = 1 // optional google.protobuf.FieldOptions.CType
+ FieldOptions_Packed = 2 // optional bool
+ FieldOptions_Jstype = 6 // optional google.protobuf.FieldOptions.JSType
+ FieldOptions_Lazy = 5 // optional bool
+ FieldOptions_Deprecated = 3 // optional bool
+ FieldOptions_Weak = 10 // optional bool
+ FieldOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption
+)
+
+// Field numbers for google.protobuf.OneofOptions.
+const (
+ OneofOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption
+)
+
+// Field numbers for google.protobuf.EnumOptions.
+const (
+ EnumOptions_AllowAlias = 2 // optional bool
+ EnumOptions_Deprecated = 3 // optional bool
+ EnumOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption
+)
+
+// Field numbers for google.protobuf.EnumValueOptions.
+const (
+ EnumValueOptions_Deprecated = 1 // optional bool
+ EnumValueOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption
+)
+
+// Field numbers for google.protobuf.ServiceOptions.
+const (
+ ServiceOptions_Deprecated = 33 // optional bool
+ ServiceOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption
+)
+
+// Field numbers for google.protobuf.MethodOptions.
+const (
+ MethodOptions_Deprecated = 33 // optional bool
+ MethodOptions_IdempotencyLevel = 34 // optional google.protobuf.MethodOptions.IdempotencyLevel
+ MethodOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption
+)
+
+// Field numbers for google.protobuf.UninterpretedOption.
+const (
+ UninterpretedOption_Name = 2 // repeated google.protobuf.UninterpretedOption.NamePart
+ UninterpretedOption_IdentifierValue = 3 // optional string
+ UninterpretedOption_PositiveIntValue = 4 // optional uint64
+ UninterpretedOption_NegativeIntValue = 5 // optional int64
+ UninterpretedOption_DoubleValue = 6 // optional double
+ UninterpretedOption_StringValue = 7 // optional bytes
+ UninterpretedOption_AggregateValue = 8 // optional string
+)
+
+// Field numbers for google.protobuf.UninterpretedOption.NamePart.
+const (
+ UninterpretedOption_NamePart_NamePart = 1 // required string
+ UninterpretedOption_NamePart_IsExtension = 2 // required bool
+)
+
+// Field numbers for google.protobuf.SourceCodeInfo.
+const (
+ SourceCodeInfo_Location = 1 // repeated google.protobuf.SourceCodeInfo.Location
+)
+
+// Field numbers for google.protobuf.SourceCodeInfo.Location.
+const (
+ SourceCodeInfo_Location_Path = 1 // repeated int32
+ SourceCodeInfo_Location_Span = 2 // repeated int32
+ SourceCodeInfo_Location_LeadingComments = 3 // optional string
+ SourceCodeInfo_Location_TrailingComments = 4 // optional string
+ SourceCodeInfo_Location_LeadingDetachedComments = 6 // repeated string
+)
+
+// Field numbers for google.protobuf.GeneratedCodeInfo.
+const (
+ GeneratedCodeInfo_Annotation = 1 // repeated google.protobuf.GeneratedCodeInfo.Annotation
+)
+
+// Field numbers for google.protobuf.GeneratedCodeInfo.Annotation.
+const (
+ GeneratedCodeInfo_Annotation_Path = 1 // repeated int32
+ GeneratedCodeInfo_Annotation_SourceFile = 2 // optional string
+ GeneratedCodeInfo_Annotation_Begin = 3 // optional int32
+ GeneratedCodeInfo_Annotation_End = 4 // optional int32
+)
diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/doc.go b/vendor/google.golang.org/protobuf/internal/fieldnum/doc.go
new file mode 100644
index 000000000..e59788599
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/fieldnum/doc.go
@@ -0,0 +1,7 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package fieldnum contains constants for field numbers of fields in messages
+// declared in descriptor.proto and any of the well-known types.
+package fieldnum
diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/duration_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/duration_gen.go
new file mode 100644
index 000000000..8816c7358
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/fieldnum/duration_gen.go
@@ -0,0 +1,13 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-protos. DO NOT EDIT.
+
+package fieldnum
+
+// Field numbers for google.protobuf.Duration.
+const (
+ Duration_Seconds = 1 // optional int64
+ Duration_Nanos = 2 // optional int32
+)
diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/empty_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/empty_gen.go
new file mode 100644
index 000000000..b5130a6dd
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/fieldnum/empty_gen.go
@@ -0,0 +1,10 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-protos. DO NOT EDIT.
+
+package fieldnum
+
+// Field numbers for google.protobuf.Empty.
+const ()
diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/field_mask_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/field_mask_gen.go
new file mode 100644
index 000000000..7e3bfa27b
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/fieldnum/field_mask_gen.go
@@ -0,0 +1,12 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-protos. DO NOT EDIT.
+
+package fieldnum
+
+// Field numbers for google.protobuf.FieldMask.
+const (
+ FieldMask_Paths = 1 // repeated string
+)
diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/source_context_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/source_context_gen.go
new file mode 100644
index 000000000..241972b1f
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/fieldnum/source_context_gen.go
@@ -0,0 +1,12 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-protos. DO NOT EDIT.
+
+package fieldnum
+
+// Field numbers for google.protobuf.SourceContext.
+const (
+ SourceContext_FileName = 1 // optional string
+)
diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/struct_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/struct_gen.go
new file mode 100644
index 000000000..c460aab44
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/fieldnum/struct_gen.go
@@ -0,0 +1,33 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-protos. DO NOT EDIT.
+
+package fieldnum
+
+// Field numbers for google.protobuf.Struct.
+const (
+ Struct_Fields = 1 // repeated google.protobuf.Struct.FieldsEntry
+)
+
+// Field numbers for google.protobuf.Struct.FieldsEntry.
+const (
+ Struct_FieldsEntry_Key = 1 // optional string
+ Struct_FieldsEntry_Value = 2 // optional google.protobuf.Value
+)
+
+// Field numbers for google.protobuf.Value.
+const (
+ Value_NullValue = 1 // optional google.protobuf.NullValue
+ Value_NumberValue = 2 // optional double
+ Value_StringValue = 3 // optional string
+ Value_BoolValue = 4 // optional bool
+ Value_StructValue = 5 // optional google.protobuf.Struct
+ Value_ListValue = 6 // optional google.protobuf.ListValue
+)
+
+// Field numbers for google.protobuf.ListValue.
+const (
+ ListValue_Values = 1 // repeated google.protobuf.Value
+)
diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/timestamp_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/timestamp_gen.go
new file mode 100644
index 000000000..b4346fba5
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/fieldnum/timestamp_gen.go
@@ -0,0 +1,13 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-protos. DO NOT EDIT.
+
+package fieldnum
+
+// Field numbers for google.protobuf.Timestamp.
+const (
+ Timestamp_Seconds = 1 // optional int64
+ Timestamp_Nanos = 2 // optional int32
+)
diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/type_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/type_gen.go
new file mode 100644
index 000000000..b392e9598
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/fieldnum/type_gen.go
@@ -0,0 +1,53 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-protos. DO NOT EDIT.
+
+package fieldnum
+
+// Field numbers for google.protobuf.Type.
+const (
+ Type_Name = 1 // optional string
+ Type_Fields = 2 // repeated google.protobuf.Field
+ Type_Oneofs = 3 // repeated string
+ Type_Options = 4 // repeated google.protobuf.Option
+ Type_SourceContext = 5 // optional google.protobuf.SourceContext
+ Type_Syntax = 6 // optional google.protobuf.Syntax
+)
+
+// Field numbers for google.protobuf.Field.
+const (
+ Field_Kind = 1 // optional google.protobuf.Field.Kind
+ Field_Cardinality = 2 // optional google.protobuf.Field.Cardinality
+ Field_Number = 3 // optional int32
+ Field_Name = 4 // optional string
+ Field_TypeUrl = 6 // optional string
+ Field_OneofIndex = 7 // optional int32
+ Field_Packed = 8 // optional bool
+ Field_Options = 9 // repeated google.protobuf.Option
+ Field_JsonName = 10 // optional string
+ Field_DefaultValue = 11 // optional string
+)
+
+// Field numbers for google.protobuf.Enum.
+const (
+ Enum_Name = 1 // optional string
+ Enum_Enumvalue = 2 // repeated google.protobuf.EnumValue
+ Enum_Options = 3 // repeated google.protobuf.Option
+ Enum_SourceContext = 4 // optional google.protobuf.SourceContext
+ Enum_Syntax = 5 // optional google.protobuf.Syntax
+)
+
+// Field numbers for google.protobuf.EnumValue.
+const (
+ EnumValue_Name = 1 // optional string
+ EnumValue_Number = 2 // optional int32
+ EnumValue_Options = 3 // repeated google.protobuf.Option
+)
+
+// Field numbers for google.protobuf.Option.
+const (
+ Option_Name = 1 // optional string
+ Option_Value = 2 // optional google.protobuf.Any
+)
diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/wrappers_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/wrappers_gen.go
new file mode 100644
index 000000000..42f846a9f
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/fieldnum/wrappers_gen.go
@@ -0,0 +1,52 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-protos. DO NOT EDIT.
+
+package fieldnum
+
+// Field numbers for google.protobuf.DoubleValue.
+const (
+ DoubleValue_Value = 1 // optional double
+)
+
+// Field numbers for google.protobuf.FloatValue.
+const (
+ FloatValue_Value = 1 // optional float
+)
+
+// Field numbers for google.protobuf.Int64Value.
+const (
+ Int64Value_Value = 1 // optional int64
+)
+
+// Field numbers for google.protobuf.UInt64Value.
+const (
+ UInt64Value_Value = 1 // optional uint64
+)
+
+// Field numbers for google.protobuf.Int32Value.
+const (
+ Int32Value_Value = 1 // optional int32
+)
+
+// Field numbers for google.protobuf.UInt32Value.
+const (
+ UInt32Value_Value = 1 // optional uint32
+)
+
+// Field numbers for google.protobuf.BoolValue.
+const (
+ BoolValue_Value = 1 // optional bool
+)
+
+// Field numbers for google.protobuf.StringValue.
+const (
+ StringValue_Value = 1 // optional string
+)
+
+// Field numbers for google.protobuf.BytesValue.
+const (
+ BytesValue_Value = 1 // optional bytes
+)
diff --git a/vendor/google.golang.org/protobuf/internal/fieldsort/fieldsort.go b/vendor/google.golang.org/protobuf/internal/fieldsort/fieldsort.go
new file mode 100644
index 000000000..517c4e2a0
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/fieldsort/fieldsort.go
@@ -0,0 +1,40 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package fieldsort defines an ordering of fields.
+//
+// The ordering defined by this package matches the historic behavior of the proto
+// package, placing extensions first and oneofs last.
+//
+// There is no guarantee about stability of the wire encoding, and users should not
+// depend on the order defined in this package as it is subject to change without
+// notice.
+package fieldsort
+
+import (
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// Less returns true if field a comes before field j in ordered wire marshal output.
+func Less(a, b protoreflect.FieldDescriptor) bool {
+ ea := a.IsExtension()
+ eb := b.IsExtension()
+ oa := a.ContainingOneof()
+ ob := b.ContainingOneof()
+ switch {
+ case ea != eb:
+ return ea
+ case oa != nil && ob != nil:
+ if oa == ob {
+ return a.Number() < b.Number()
+ }
+ return oa.Index() < ob.Index()
+ case oa != nil && !oa.IsSynthetic():
+ return false
+ case ob != nil && !ob.IsSynthetic():
+ return true
+ default:
+ return a.Number() < b.Number()
+ }
+}
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/build.go b/vendor/google.golang.org/protobuf/internal/filedesc/build.go
new file mode 100644
index 000000000..462d384e9
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/build.go
@@ -0,0 +1,155 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package filedesc provides functionality for constructing descriptors.
+package filedesc
+
+import (
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/fieldnum"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+ preg "google.golang.org/protobuf/reflect/protoregistry"
+)
+
+// Builder construct a protoreflect.FileDescriptor from the raw descriptor.
+type Builder struct {
+ // GoPackagePath is the Go package path that is invoking this builder.
+ GoPackagePath string
+
+ // RawDescriptor is the wire-encoded bytes of FileDescriptorProto
+ // and must be populated.
+ RawDescriptor []byte
+
+ // NumEnums is the total number of enums declared in the file.
+ NumEnums int32
+ // NumMessages is the total number of messages declared in the file.
+ // It includes the implicit message declarations for map entries.
+ NumMessages int32
+ // NumExtensions is the total number of extensions declared in the file.
+ NumExtensions int32
+ // NumServices is the total number of services declared in the file.
+ NumServices int32
+
+ // TypeResolver resolves extension field types for descriptor options.
+ // If nil, it uses protoregistry.GlobalTypes.
+ TypeResolver interface {
+ preg.ExtensionTypeResolver
+ }
+
+ // FileRegistry is use to lookup file, enum, and message dependencies.
+ // Once constructed, the file descriptor is registered here.
+ // If nil, it uses protoregistry.GlobalFiles.
+ FileRegistry interface {
+ FindFileByPath(string) (protoreflect.FileDescriptor, error)
+ FindDescriptorByName(pref.FullName) (pref.Descriptor, error)
+ RegisterFile(pref.FileDescriptor) error
+ }
+}
+
+// resolverByIndex is an interface Builder.FileRegistry may implement.
+// If so, it permits looking up an enum or message dependency based on the
+// sub-list and element index into filetype.Builder.DependencyIndexes.
+type resolverByIndex interface {
+ FindEnumByIndex(int32, int32, []Enum, []Message) pref.EnumDescriptor
+ FindMessageByIndex(int32, int32, []Enum, []Message) pref.MessageDescriptor
+}
+
+// Indexes of each sub-list in filetype.Builder.DependencyIndexes.
+const (
+ listFieldDeps int32 = iota
+ listExtTargets
+ listExtDeps
+ listMethInDeps
+ listMethOutDeps
+)
+
+// Out is the output of the Builder.
+type Out struct {
+ File pref.FileDescriptor
+
+ // Enums is all enum descriptors in "flattened ordering".
+ Enums []Enum
+ // Messages is all message descriptors in "flattened ordering".
+ // It includes the implicit message declarations for map entries.
+ Messages []Message
+ // Extensions is all extension descriptors in "flattened ordering".
+ Extensions []Extension
+ // Service is all service descriptors in "flattened ordering".
+ Services []Service
+}
+
+// Build constructs a FileDescriptor given the parameters set in Builder.
+// It assumes that the inputs are well-formed and panics if any inconsistencies
+// are encountered.
+//
+// If NumEnums+NumMessages+NumExtensions+NumServices is zero,
+// then Build automatically derives them from the raw descriptor.
+func (db Builder) Build() (out Out) {
+ // Populate the counts if uninitialized.
+ if db.NumEnums+db.NumMessages+db.NumExtensions+db.NumServices == 0 {
+ db.unmarshalCounts(db.RawDescriptor, true)
+ }
+
+ // Initialize resolvers and registries if unpopulated.
+ if db.TypeResolver == nil {
+ db.TypeResolver = preg.GlobalTypes
+ }
+ if db.FileRegistry == nil {
+ db.FileRegistry = preg.GlobalFiles
+ }
+
+ fd := newRawFile(db)
+ out.File = fd
+ out.Enums = fd.allEnums
+ out.Messages = fd.allMessages
+ out.Extensions = fd.allExtensions
+ out.Services = fd.allServices
+
+ if err := db.FileRegistry.RegisterFile(fd); err != nil {
+ panic(err)
+ }
+ return out
+}
+
+// unmarshalCounts counts the number of enum, message, extension, and service
+// declarations in the raw message, which is either a FileDescriptorProto
+// or a MessageDescriptorProto depending on whether isFile is set.
+func (db *Builder) unmarshalCounts(b []byte, isFile bool) {
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.BytesType:
+ v, m := protowire.ConsumeBytes(b)
+ b = b[m:]
+ if isFile {
+ switch num {
+ case fieldnum.FileDescriptorProto_EnumType:
+ db.NumEnums++
+ case fieldnum.FileDescriptorProto_MessageType:
+ db.unmarshalCounts(v, false)
+ db.NumMessages++
+ case fieldnum.FileDescriptorProto_Extension:
+ db.NumExtensions++
+ case fieldnum.FileDescriptorProto_Service:
+ db.NumServices++
+ }
+ } else {
+ switch num {
+ case fieldnum.DescriptorProto_EnumType:
+ db.NumEnums++
+ case fieldnum.DescriptorProto_NestedType:
+ db.unmarshalCounts(v, false)
+ db.NumMessages++
+ case fieldnum.DescriptorProto_Extension:
+ db.NumExtensions++
+ }
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+}
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
new file mode 100644
index 000000000..2540befd6
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
@@ -0,0 +1,613 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package filedesc
+
+import (
+ "bytes"
+ "fmt"
+ "sync"
+ "sync/atomic"
+
+ "google.golang.org/protobuf/internal/descfmt"
+ "google.golang.org/protobuf/internal/descopts"
+ "google.golang.org/protobuf/internal/encoding/defval"
+ "google.golang.org/protobuf/internal/pragma"
+ "google.golang.org/protobuf/internal/strs"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+)
+
+// The types in this file may have a suffix:
+// • L0: Contains fields common to all descriptors (except File) and
+// must be initialized up front.
+// • L1: Contains fields specific to a descriptor and
+// must be initialized up front.
+// • L2: Contains fields that are lazily initialized when constructing
+// from the raw file descriptor. When constructing as a literal, the L2
+// fields must be initialized up front.
+//
+// The types are exported so that packages like reflect/protodesc can
+// directly construct descriptors.
+
+type (
+ File struct {
+ fileRaw
+ L1 FileL1
+
+ once uint32 // atomically set if L2 is valid
+ mu sync.Mutex // protects L2
+ L2 *FileL2
+ }
+ FileL1 struct {
+ Syntax pref.Syntax
+ Path string
+ Package pref.FullName
+
+ Enums Enums
+ Messages Messages
+ Extensions Extensions
+ Services Services
+ }
+ FileL2 struct {
+ Options func() pref.ProtoMessage
+ Imports FileImports
+ Locations SourceLocations
+ }
+)
+
+func (fd *File) ParentFile() pref.FileDescriptor { return fd }
+func (fd *File) Parent() pref.Descriptor { return nil }
+func (fd *File) Index() int { return 0 }
+func (fd *File) Syntax() pref.Syntax { return fd.L1.Syntax }
+func (fd *File) Name() pref.Name { return fd.L1.Package.Name() }
+func (fd *File) FullName() pref.FullName { return fd.L1.Package }
+func (fd *File) IsPlaceholder() bool { return false }
+func (fd *File) Options() pref.ProtoMessage {
+ if f := fd.lazyInit().Options; f != nil {
+ return f()
+ }
+ return descopts.File
+}
+func (fd *File) Path() string { return fd.L1.Path }
+func (fd *File) Package() pref.FullName { return fd.L1.Package }
+func (fd *File) Imports() pref.FileImports { return &fd.lazyInit().Imports }
+func (fd *File) Enums() pref.EnumDescriptors { return &fd.L1.Enums }
+func (fd *File) Messages() pref.MessageDescriptors { return &fd.L1.Messages }
+func (fd *File) Extensions() pref.ExtensionDescriptors { return &fd.L1.Extensions }
+func (fd *File) Services() pref.ServiceDescriptors { return &fd.L1.Services }
+func (fd *File) SourceLocations() pref.SourceLocations { return &fd.lazyInit().Locations }
+func (fd *File) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) }
+func (fd *File) ProtoType(pref.FileDescriptor) {}
+func (fd *File) ProtoInternal(pragma.DoNotImplement) {}
+
+func (fd *File) lazyInit() *FileL2 {
+ if atomic.LoadUint32(&fd.once) == 0 {
+ fd.lazyInitOnce()
+ }
+ return fd.L2
+}
+
+func (fd *File) lazyInitOnce() {
+ fd.mu.Lock()
+ if fd.L2 == nil {
+ fd.lazyRawInit() // recursively initializes all L2 structures
+ }
+ atomic.StoreUint32(&fd.once, 1)
+ fd.mu.Unlock()
+}
+
+// ProtoLegacyRawDesc is a pseudo-internal API for allowing the v1 code
+// to be able to retrieve the raw descriptor.
+//
+// WARNING: This method is exempt from the compatibility promise and may be
+// removed in the future without warning.
+func (fd *File) ProtoLegacyRawDesc() []byte {
+ return fd.builder.RawDescriptor
+}
+
+// GoPackagePath is a pseudo-internal API for determining the Go package path
+// that this file descriptor is declared in.
+//
+// WARNING: This method is exempt from the compatibility promise and may be
+// removed in the future without warning.
+func (fd *File) GoPackagePath() string {
+ return fd.builder.GoPackagePath
+}
+
+type (
+ Enum struct {
+ Base
+ L1 EnumL1
+ L2 *EnumL2 // protected by fileDesc.once
+ }
+ EnumL1 struct {
+ eagerValues bool // controls whether EnumL2.Values is already populated
+ }
+ EnumL2 struct {
+ Options func() pref.ProtoMessage
+ Values EnumValues
+ ReservedNames Names
+ ReservedRanges EnumRanges
+ }
+
+ EnumValue struct {
+ Base
+ L1 EnumValueL1
+ }
+ EnumValueL1 struct {
+ Options func() pref.ProtoMessage
+ Number pref.EnumNumber
+ }
+)
+
+func (ed *Enum) Options() pref.ProtoMessage {
+ if f := ed.lazyInit().Options; f != nil {
+ return f()
+ }
+ return descopts.Enum
+}
+func (ed *Enum) Values() pref.EnumValueDescriptors {
+ if ed.L1.eagerValues {
+ return &ed.L2.Values
+ }
+ return &ed.lazyInit().Values
+}
+func (ed *Enum) ReservedNames() pref.Names { return &ed.lazyInit().ReservedNames }
+func (ed *Enum) ReservedRanges() pref.EnumRanges { return &ed.lazyInit().ReservedRanges }
+func (ed *Enum) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) }
+func (ed *Enum) ProtoType(pref.EnumDescriptor) {}
+func (ed *Enum) lazyInit() *EnumL2 {
+ ed.L0.ParentFile.lazyInit() // implicitly initializes L2
+ return ed.L2
+}
+
+func (ed *EnumValue) Options() pref.ProtoMessage {
+ if f := ed.L1.Options; f != nil {
+ return f()
+ }
+ return descopts.EnumValue
+}
+func (ed *EnumValue) Number() pref.EnumNumber { return ed.L1.Number }
+func (ed *EnumValue) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) }
+func (ed *EnumValue) ProtoType(pref.EnumValueDescriptor) {}
+
+type (
+ Message struct {
+ Base
+ L1 MessageL1
+ L2 *MessageL2 // protected by fileDesc.once
+ }
+ MessageL1 struct {
+ Enums Enums
+ Messages Messages
+ Extensions Extensions
+ IsMapEntry bool // promoted from google.protobuf.MessageOptions
+ IsMessageSet bool // promoted from google.protobuf.MessageOptions
+ }
+ MessageL2 struct {
+ Options func() pref.ProtoMessage
+ Fields Fields
+ Oneofs Oneofs
+ ReservedNames Names
+ ReservedRanges FieldRanges
+ RequiredNumbers FieldNumbers // must be consistent with Fields.Cardinality
+ ExtensionRanges FieldRanges
+ ExtensionRangeOptions []func() pref.ProtoMessage // must be same length as ExtensionRanges
+ }
+
+ Field struct {
+ Base
+ L1 FieldL1
+ }
+ FieldL1 struct {
+ Options func() pref.ProtoMessage
+ Number pref.FieldNumber
+ Cardinality pref.Cardinality // must be consistent with Message.RequiredNumbers
+ Kind pref.Kind
+ JSONName jsonName
+ IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto
+ IsWeak bool // promoted from google.protobuf.FieldOptions
+ HasPacked bool // promoted from google.protobuf.FieldOptions
+ IsPacked bool // promoted from google.protobuf.FieldOptions
+ HasEnforceUTF8 bool // promoted from google.protobuf.FieldOptions
+ EnforceUTF8 bool // promoted from google.protobuf.FieldOptions
+ Default defaultValue
+ ContainingOneof pref.OneofDescriptor // must be consistent with Message.Oneofs.Fields
+ Enum pref.EnumDescriptor
+ Message pref.MessageDescriptor
+ }
+
+ Oneof struct {
+ Base
+ L1 OneofL1
+ }
+ OneofL1 struct {
+ Options func() pref.ProtoMessage
+ Fields OneofFields // must be consistent with Message.Fields.ContainingOneof
+ }
+)
+
+func (md *Message) Options() pref.ProtoMessage {
+ if f := md.lazyInit().Options; f != nil {
+ return f()
+ }
+ return descopts.Message
+}
+func (md *Message) IsMapEntry() bool { return md.L1.IsMapEntry }
+func (md *Message) Fields() pref.FieldDescriptors { return &md.lazyInit().Fields }
+func (md *Message) Oneofs() pref.OneofDescriptors { return &md.lazyInit().Oneofs }
+func (md *Message) ReservedNames() pref.Names { return &md.lazyInit().ReservedNames }
+func (md *Message) ReservedRanges() pref.FieldRanges { return &md.lazyInit().ReservedRanges }
+func (md *Message) RequiredNumbers() pref.FieldNumbers { return &md.lazyInit().RequiredNumbers }
+func (md *Message) ExtensionRanges() pref.FieldRanges { return &md.lazyInit().ExtensionRanges }
+func (md *Message) ExtensionRangeOptions(i int) pref.ProtoMessage {
+ if f := md.lazyInit().ExtensionRangeOptions[i]; f != nil {
+ return f()
+ }
+ return descopts.ExtensionRange
+}
+func (md *Message) Enums() pref.EnumDescriptors { return &md.L1.Enums }
+func (md *Message) Messages() pref.MessageDescriptors { return &md.L1.Messages }
+func (md *Message) Extensions() pref.ExtensionDescriptors { return &md.L1.Extensions }
+func (md *Message) ProtoType(pref.MessageDescriptor) {}
+func (md *Message) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) }
+func (md *Message) lazyInit() *MessageL2 {
+ md.L0.ParentFile.lazyInit() // implicitly initializes L2
+ return md.L2
+}
+
+// IsMessageSet is a pseudo-internal API for checking whether a message
+// should serialize in the proto1 message format.
+//
+// WARNING: This method is exempt from the compatibility promise and may be
+// removed in the future without warning.
+func (md *Message) IsMessageSet() bool {
+ return md.L1.IsMessageSet
+}
+
+func (fd *Field) Options() pref.ProtoMessage {
+ if f := fd.L1.Options; f != nil {
+ return f()
+ }
+ return descopts.Field
+}
+func (fd *Field) Number() pref.FieldNumber { return fd.L1.Number }
+func (fd *Field) Cardinality() pref.Cardinality { return fd.L1.Cardinality }
+func (fd *Field) Kind() pref.Kind { return fd.L1.Kind }
+func (fd *Field) HasJSONName() bool { return fd.L1.JSONName.has }
+func (fd *Field) JSONName() string { return fd.L1.JSONName.get(fd) }
+func (fd *Field) HasPresence() bool {
+ return fd.L1.Cardinality != pref.Repeated && (fd.L0.ParentFile.L1.Syntax == pref.Proto2 || fd.L1.Message != nil || fd.L1.ContainingOneof != nil)
+}
+func (fd *Field) HasOptionalKeyword() bool {
+ return (fd.L0.ParentFile.L1.Syntax == pref.Proto2 && fd.L1.Cardinality == pref.Optional && fd.L1.ContainingOneof == nil) || fd.L1.IsProto3Optional
+}
+func (fd *Field) IsPacked() bool {
+ if !fd.L1.HasPacked && fd.L0.ParentFile.L1.Syntax != pref.Proto2 && fd.L1.Cardinality == pref.Repeated {
+ switch fd.L1.Kind {
+ case pref.StringKind, pref.BytesKind, pref.MessageKind, pref.GroupKind:
+ default:
+ return true
+ }
+ }
+ return fd.L1.IsPacked
+}
+func (fd *Field) IsExtension() bool { return false }
+func (fd *Field) IsWeak() bool { return fd.L1.IsWeak }
+func (fd *Field) IsList() bool { return fd.Cardinality() == pref.Repeated && !fd.IsMap() }
+func (fd *Field) IsMap() bool { return fd.Message() != nil && fd.Message().IsMapEntry() }
+func (fd *Field) MapKey() pref.FieldDescriptor {
+ if !fd.IsMap() {
+ return nil
+ }
+ return fd.Message().Fields().ByNumber(1)
+}
+func (fd *Field) MapValue() pref.FieldDescriptor {
+ if !fd.IsMap() {
+ return nil
+ }
+ return fd.Message().Fields().ByNumber(2)
+}
+func (fd *Field) HasDefault() bool { return fd.L1.Default.has }
+func (fd *Field) Default() pref.Value { return fd.L1.Default.get(fd) }
+func (fd *Field) DefaultEnumValue() pref.EnumValueDescriptor { return fd.L1.Default.enum }
+func (fd *Field) ContainingOneof() pref.OneofDescriptor { return fd.L1.ContainingOneof }
+func (fd *Field) ContainingMessage() pref.MessageDescriptor {
+ return fd.L0.Parent.(pref.MessageDescriptor)
+}
+func (fd *Field) Enum() pref.EnumDescriptor {
+ return fd.L1.Enum
+}
+func (fd *Field) Message() pref.MessageDescriptor {
+ if fd.L1.IsWeak {
+ if d, _ := protoregistry.GlobalFiles.FindDescriptorByName(fd.L1.Message.FullName()); d != nil {
+ return d.(pref.MessageDescriptor)
+ }
+ }
+ return fd.L1.Message
+}
+func (fd *Field) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) }
+func (fd *Field) ProtoType(pref.FieldDescriptor) {}
+
+// EnforceUTF8 is a pseudo-internal API to determine whether to enforce UTF-8
+// validation for the string field. This exists for Google-internal use only
+// since proto3 did not enforce UTF-8 validity prior to the open-source release.
+// If this method does not exist, the default is to enforce valid UTF-8.
+//
+// WARNING: This method is exempt from the compatibility promise and may be
+// removed in the future without warning.
+func (fd *Field) EnforceUTF8() bool {
+ if fd.L1.HasEnforceUTF8 {
+ return fd.L1.EnforceUTF8
+ }
+ return fd.L0.ParentFile.L1.Syntax == pref.Proto3
+}
+
+func (od *Oneof) IsSynthetic() bool {
+ return od.L0.ParentFile.L1.Syntax == pref.Proto3 && len(od.L1.Fields.List) == 1 && od.L1.Fields.List[0].HasOptionalKeyword()
+}
+func (od *Oneof) Options() pref.ProtoMessage {
+ if f := od.L1.Options; f != nil {
+ return f()
+ }
+ return descopts.Oneof
+}
+func (od *Oneof) Fields() pref.FieldDescriptors { return &od.L1.Fields }
+func (od *Oneof) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, od) }
+func (od *Oneof) ProtoType(pref.OneofDescriptor) {}
+
+type (
+ Extension struct {
+ Base
+ L1 ExtensionL1
+ L2 *ExtensionL2 // protected by fileDesc.once
+ }
+ ExtensionL1 struct {
+ Number pref.FieldNumber
+ Extendee pref.MessageDescriptor
+ Cardinality pref.Cardinality
+ Kind pref.Kind
+ }
+ ExtensionL2 struct {
+ Options func() pref.ProtoMessage
+ JSONName jsonName
+ IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto
+ IsPacked bool // promoted from google.protobuf.FieldOptions
+ Default defaultValue
+ Enum pref.EnumDescriptor
+ Message pref.MessageDescriptor
+ }
+)
+
+func (xd *Extension) Options() pref.ProtoMessage {
+ if f := xd.lazyInit().Options; f != nil {
+ return f()
+ }
+ return descopts.Field
+}
+func (xd *Extension) Number() pref.FieldNumber { return xd.L1.Number }
+func (xd *Extension) Cardinality() pref.Cardinality { return xd.L1.Cardinality }
+func (xd *Extension) Kind() pref.Kind { return xd.L1.Kind }
+func (xd *Extension) HasJSONName() bool { return xd.lazyInit().JSONName.has }
+func (xd *Extension) JSONName() string { return xd.lazyInit().JSONName.get(xd) }
+func (xd *Extension) HasPresence() bool { return xd.L1.Cardinality != pref.Repeated }
+func (xd *Extension) HasOptionalKeyword() bool {
+ return (xd.L0.ParentFile.L1.Syntax == pref.Proto2 && xd.L1.Cardinality == pref.Optional) || xd.lazyInit().IsProto3Optional
+}
+func (xd *Extension) IsPacked() bool { return xd.lazyInit().IsPacked }
+func (xd *Extension) IsExtension() bool { return true }
+func (xd *Extension) IsWeak() bool { return false }
+func (xd *Extension) IsList() bool { return xd.Cardinality() == pref.Repeated }
+func (xd *Extension) IsMap() bool { return false }
+func (xd *Extension) MapKey() pref.FieldDescriptor { return nil }
+func (xd *Extension) MapValue() pref.FieldDescriptor { return nil }
+func (xd *Extension) HasDefault() bool { return xd.lazyInit().Default.has }
+func (xd *Extension) Default() pref.Value { return xd.lazyInit().Default.get(xd) }
+func (xd *Extension) DefaultEnumValue() pref.EnumValueDescriptor { return xd.lazyInit().Default.enum }
+func (xd *Extension) ContainingOneof() pref.OneofDescriptor { return nil }
+func (xd *Extension) ContainingMessage() pref.MessageDescriptor { return xd.L1.Extendee }
+func (xd *Extension) Enum() pref.EnumDescriptor { return xd.lazyInit().Enum }
+func (xd *Extension) Message() pref.MessageDescriptor { return xd.lazyInit().Message }
+func (xd *Extension) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, xd) }
+func (xd *Extension) ProtoType(pref.FieldDescriptor) {}
+func (xd *Extension) ProtoInternal(pragma.DoNotImplement) {}
+func (xd *Extension) lazyInit() *ExtensionL2 {
+ xd.L0.ParentFile.lazyInit() // implicitly initializes L2
+ return xd.L2
+}
+
+type (
+ Service struct {
+ Base
+ L1 ServiceL1
+ L2 *ServiceL2 // protected by fileDesc.once
+ }
+ ServiceL1 struct{}
+ ServiceL2 struct {
+ Options func() pref.ProtoMessage
+ Methods Methods
+ }
+
+ Method struct {
+ Base
+ L1 MethodL1
+ }
+ MethodL1 struct {
+ Options func() pref.ProtoMessage
+ Input pref.MessageDescriptor
+ Output pref.MessageDescriptor
+ IsStreamingClient bool
+ IsStreamingServer bool
+ }
+)
+
+func (sd *Service) Options() pref.ProtoMessage {
+ if f := sd.lazyInit().Options; f != nil {
+ return f()
+ }
+ return descopts.Service
+}
+func (sd *Service) Methods() pref.MethodDescriptors { return &sd.lazyInit().Methods }
+func (sd *Service) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, sd) }
+func (sd *Service) ProtoType(pref.ServiceDescriptor) {}
+func (sd *Service) ProtoInternal(pragma.DoNotImplement) {}
+func (sd *Service) lazyInit() *ServiceL2 {
+ sd.L0.ParentFile.lazyInit() // implicitly initializes L2
+ return sd.L2
+}
+
+func (md *Method) Options() pref.ProtoMessage {
+ if f := md.L1.Options; f != nil {
+ return f()
+ }
+ return descopts.Method
+}
+func (md *Method) Input() pref.MessageDescriptor { return md.L1.Input }
+func (md *Method) Output() pref.MessageDescriptor { return md.L1.Output }
+func (md *Method) IsStreamingClient() bool { return md.L1.IsStreamingClient }
+func (md *Method) IsStreamingServer() bool { return md.L1.IsStreamingServer }
+func (md *Method) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) }
+func (md *Method) ProtoType(pref.MethodDescriptor) {}
+func (md *Method) ProtoInternal(pragma.DoNotImplement) {}
+
+// Surrogate files are can be used to create standalone descriptors
+// where the syntax is only information derived from the parent file.
+var (
+ SurrogateProto2 = &File{L1: FileL1{Syntax: pref.Proto2}, L2: &FileL2{}}
+ SurrogateProto3 = &File{L1: FileL1{Syntax: pref.Proto3}, L2: &FileL2{}}
+)
+
+type (
+ Base struct {
+ L0 BaseL0
+ }
+ BaseL0 struct {
+ FullName pref.FullName // must be populated
+ ParentFile *File // must be populated
+ Parent pref.Descriptor
+ Index int
+ }
+)
+
+func (d *Base) Name() pref.Name { return d.L0.FullName.Name() }
+func (d *Base) FullName() pref.FullName { return d.L0.FullName }
+func (d *Base) ParentFile() pref.FileDescriptor {
+ if d.L0.ParentFile == SurrogateProto2 || d.L0.ParentFile == SurrogateProto3 {
+ return nil // surrogate files are not real parents
+ }
+ return d.L0.ParentFile
+}
+func (d *Base) Parent() pref.Descriptor { return d.L0.Parent }
+func (d *Base) Index() int { return d.L0.Index }
+func (d *Base) Syntax() pref.Syntax { return d.L0.ParentFile.Syntax() }
+func (d *Base) IsPlaceholder() bool { return false }
+func (d *Base) ProtoInternal(pragma.DoNotImplement) {}
+
+type jsonName struct {
+ has bool
+ once sync.Once
+ name string
+}
+
+// Init initializes the name. It is exported for use by other internal packages.
+func (js *jsonName) Init(s string) {
+ js.has = true
+ js.name = s
+}
+
+func (js *jsonName) get(fd pref.FieldDescriptor) string {
+ if !js.has {
+ js.once.Do(func() {
+ js.name = strs.JSONCamelCase(string(fd.Name()))
+ })
+ }
+ return js.name
+}
+
+func DefaultValue(v pref.Value, ev pref.EnumValueDescriptor) defaultValue {
+ dv := defaultValue{has: v.IsValid(), val: v, enum: ev}
+ if b, ok := v.Interface().([]byte); ok {
+ // Store a copy of the default bytes, so that we can detect
+ // accidental mutations of the original value.
+ dv.bytes = append([]byte(nil), b...)
+ }
+ return dv
+}
+
+func unmarshalDefault(b []byte, k pref.Kind, pf *File, ed pref.EnumDescriptor) defaultValue {
+ var evs pref.EnumValueDescriptors
+ if k == pref.EnumKind {
+ // If the enum is declared within the same file, be careful not to
+ // blindly call the Values method, lest we bind ourselves in a deadlock.
+ if e, ok := ed.(*Enum); ok && e.L0.ParentFile == pf {
+ evs = &e.L2.Values
+ } else {
+ evs = ed.Values()
+ }
+
+ // If we are unable to resolve the enum dependency, use a placeholder
+ // enum value since we will not be able to parse the default value.
+ if ed.IsPlaceholder() && pref.Name(b).IsValid() {
+ v := pref.ValueOfEnum(0)
+ ev := PlaceholderEnumValue(ed.FullName().Parent().Append(pref.Name(b)))
+ return DefaultValue(v, ev)
+ }
+ }
+
+ v, ev, err := defval.Unmarshal(string(b), k, evs, defval.Descriptor)
+ if err != nil {
+ panic(err)
+ }
+ return DefaultValue(v, ev)
+}
+
+type defaultValue struct {
+ has bool
+ val pref.Value
+ enum pref.EnumValueDescriptor
+ bytes []byte
+}
+
+func (dv *defaultValue) get(fd pref.FieldDescriptor) pref.Value {
+ // Return the zero value as the default if unpopulated.
+ if !dv.has {
+ if fd.Cardinality() == pref.Repeated {
+ return pref.Value{}
+ }
+ switch fd.Kind() {
+ case pref.BoolKind:
+ return pref.ValueOfBool(false)
+ case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind:
+ return pref.ValueOfInt32(0)
+ case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind:
+ return pref.ValueOfInt64(0)
+ case pref.Uint32Kind, pref.Fixed32Kind:
+ return pref.ValueOfUint32(0)
+ case pref.Uint64Kind, pref.Fixed64Kind:
+ return pref.ValueOfUint64(0)
+ case pref.FloatKind:
+ return pref.ValueOfFloat32(0)
+ case pref.DoubleKind:
+ return pref.ValueOfFloat64(0)
+ case pref.StringKind:
+ return pref.ValueOfString("")
+ case pref.BytesKind:
+ return pref.ValueOfBytes(nil)
+ case pref.EnumKind:
+ if evs := fd.Enum().Values(); evs.Len() > 0 {
+ return pref.ValueOfEnum(evs.Get(0).Number())
+ }
+ return pref.ValueOfEnum(0)
+ }
+ }
+
+ if len(dv.bytes) > 0 && !bytes.Equal(dv.bytes, dv.val.Bytes()) {
+ // TODO: Avoid panic if we're running with the race detector
+ // and instead spawn a goroutine that periodically resets
+ // this value back to the original to induce a race.
+ panic(fmt.Sprintf("detected mutation on the default bytes for %v", fd.FullName()))
+ }
+ return dv.val
+}
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
new file mode 100644
index 000000000..c0cddf86a
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
@@ -0,0 +1,471 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package filedesc
+
+import (
+ "sync"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/fieldnum"
+ "google.golang.org/protobuf/internal/strs"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// fileRaw is a data struct used when initializing a file descriptor from
+// a raw FileDescriptorProto.
+type fileRaw struct {
+ builder Builder
+ allEnums []Enum
+ allMessages []Message
+ allExtensions []Extension
+ allServices []Service
+}
+
+func newRawFile(db Builder) *File {
+ fd := &File{fileRaw: fileRaw{builder: db}}
+ fd.initDecls(db.NumEnums, db.NumMessages, db.NumExtensions, db.NumServices)
+ fd.unmarshalSeed(db.RawDescriptor)
+
+ // Extended message targets are eagerly resolved since registration
+ // needs this information at program init time.
+ for i := range fd.allExtensions {
+ xd := &fd.allExtensions[i]
+ xd.L1.Extendee = fd.resolveMessageDependency(xd.L1.Extendee, listExtTargets, int32(i))
+ }
+
+ fd.checkDecls()
+ return fd
+}
+
+// initDecls pre-allocates slices for the exact number of enums, messages
+// (including map entries), extensions, and services declared in the proto file.
+// This is done to avoid regrowing the slice, which would change the address
+// for any previously seen declaration.
+//
+// The alloc methods "allocates" slices by pulling from the capacity.
+func (fd *File) initDecls(numEnums, numMessages, numExtensions, numServices int32) {
+ fd.allEnums = make([]Enum, 0, numEnums)
+ fd.allMessages = make([]Message, 0, numMessages)
+ fd.allExtensions = make([]Extension, 0, numExtensions)
+ fd.allServices = make([]Service, 0, numServices)
+}
+
+func (fd *File) allocEnums(n int) []Enum {
+ total := len(fd.allEnums)
+ es := fd.allEnums[total : total+n]
+ fd.allEnums = fd.allEnums[:total+n]
+ return es
+}
+func (fd *File) allocMessages(n int) []Message {
+ total := len(fd.allMessages)
+ ms := fd.allMessages[total : total+n]
+ fd.allMessages = fd.allMessages[:total+n]
+ return ms
+}
+func (fd *File) allocExtensions(n int) []Extension {
+ total := len(fd.allExtensions)
+ xs := fd.allExtensions[total : total+n]
+ fd.allExtensions = fd.allExtensions[:total+n]
+ return xs
+}
+func (fd *File) allocServices(n int) []Service {
+ total := len(fd.allServices)
+ xs := fd.allServices[total : total+n]
+ fd.allServices = fd.allServices[:total+n]
+ return xs
+}
+
+// checkDecls performs a sanity check that the expected number of expected
+// declarations matches the number that were found in the descriptor proto.
+func (fd *File) checkDecls() {
+ switch {
+ case len(fd.allEnums) != cap(fd.allEnums):
+ case len(fd.allMessages) != cap(fd.allMessages):
+ case len(fd.allExtensions) != cap(fd.allExtensions):
+ case len(fd.allServices) != cap(fd.allServices):
+ default:
+ return
+ }
+ panic("mismatching cardinality")
+}
+
+func (fd *File) unmarshalSeed(b []byte) {
+ sb := getBuilder()
+ defer putBuilder(sb)
+
+ var prevField pref.FieldNumber
+ var numEnums, numMessages, numExtensions, numServices int
+ var posEnums, posMessages, posExtensions, posServices int
+ b0 := b
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.BytesType:
+ v, m := protowire.ConsumeBytes(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.FileDescriptorProto_Syntax:
+ switch string(v) {
+ case "proto2":
+ fd.L1.Syntax = pref.Proto2
+ case "proto3":
+ fd.L1.Syntax = pref.Proto3
+ default:
+ panic("invalid syntax")
+ }
+ case fieldnum.FileDescriptorProto_Name:
+ fd.L1.Path = sb.MakeString(v)
+ case fieldnum.FileDescriptorProto_Package:
+ fd.L1.Package = pref.FullName(sb.MakeString(v))
+ case fieldnum.FileDescriptorProto_EnumType:
+ if prevField != fieldnum.FileDescriptorProto_EnumType {
+ if numEnums > 0 {
+ panic("non-contiguous repeated field")
+ }
+ posEnums = len(b0) - len(b) - n - m
+ }
+ numEnums++
+ case fieldnum.FileDescriptorProto_MessageType:
+ if prevField != fieldnum.FileDescriptorProto_MessageType {
+ if numMessages > 0 {
+ panic("non-contiguous repeated field")
+ }
+ posMessages = len(b0) - len(b) - n - m
+ }
+ numMessages++
+ case fieldnum.FileDescriptorProto_Extension:
+ if prevField != fieldnum.FileDescriptorProto_Extension {
+ if numExtensions > 0 {
+ panic("non-contiguous repeated field")
+ }
+ posExtensions = len(b0) - len(b) - n - m
+ }
+ numExtensions++
+ case fieldnum.FileDescriptorProto_Service:
+ if prevField != fieldnum.FileDescriptorProto_Service {
+ if numServices > 0 {
+ panic("non-contiguous repeated field")
+ }
+ posServices = len(b0) - len(b) - n - m
+ }
+ numServices++
+ }
+ prevField = num
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ prevField = -1 // ignore known field numbers of unknown wire type
+ }
+ }
+
+ // If syntax is missing, it is assumed to be proto2.
+ if fd.L1.Syntax == 0 {
+ fd.L1.Syntax = pref.Proto2
+ }
+
+ // Must allocate all declarations before parsing each descriptor type
+ // to ensure we handled all descriptors in "flattened ordering".
+ if numEnums > 0 {
+ fd.L1.Enums.List = fd.allocEnums(numEnums)
+ }
+ if numMessages > 0 {
+ fd.L1.Messages.List = fd.allocMessages(numMessages)
+ }
+ if numExtensions > 0 {
+ fd.L1.Extensions.List = fd.allocExtensions(numExtensions)
+ }
+ if numServices > 0 {
+ fd.L1.Services.List = fd.allocServices(numServices)
+ }
+
+ if numEnums > 0 {
+ b := b0[posEnums:]
+ for i := range fd.L1.Enums.List {
+ _, n := protowire.ConsumeVarint(b)
+ v, m := protowire.ConsumeBytes(b[n:])
+ fd.L1.Enums.List[i].unmarshalSeed(v, sb, fd, fd, i)
+ b = b[n+m:]
+ }
+ }
+ if numMessages > 0 {
+ b := b0[posMessages:]
+ for i := range fd.L1.Messages.List {
+ _, n := protowire.ConsumeVarint(b)
+ v, m := protowire.ConsumeBytes(b[n:])
+ fd.L1.Messages.List[i].unmarshalSeed(v, sb, fd, fd, i)
+ b = b[n+m:]
+ }
+ }
+ if numExtensions > 0 {
+ b := b0[posExtensions:]
+ for i := range fd.L1.Extensions.List {
+ _, n := protowire.ConsumeVarint(b)
+ v, m := protowire.ConsumeBytes(b[n:])
+ fd.L1.Extensions.List[i].unmarshalSeed(v, sb, fd, fd, i)
+ b = b[n+m:]
+ }
+ }
+ if numServices > 0 {
+ b := b0[posServices:]
+ for i := range fd.L1.Services.List {
+ _, n := protowire.ConsumeVarint(b)
+ v, m := protowire.ConsumeBytes(b[n:])
+ fd.L1.Services.List[i].unmarshalSeed(v, sb, fd, fd, i)
+ b = b[n+m:]
+ }
+ }
+}
+
+func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) {
+ ed.L0.ParentFile = pf
+ ed.L0.Parent = pd
+ ed.L0.Index = i
+
+ var numValues int
+ for b := b; len(b) > 0; {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.BytesType:
+ v, m := protowire.ConsumeBytes(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.EnumDescriptorProto_Name:
+ ed.L0.FullName = appendFullName(sb, pd.FullName(), v)
+ case fieldnum.EnumDescriptorProto_Value:
+ numValues++
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+
+ // Only construct enum value descriptors for top-level enums since
+ // they are needed for registration.
+ if pd != pf {
+ return
+ }
+ ed.L1.eagerValues = true
+ ed.L2 = new(EnumL2)
+ ed.L2.Values.List = make([]EnumValue, numValues)
+ for i := 0; len(b) > 0; {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.BytesType:
+ v, m := protowire.ConsumeBytes(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.EnumDescriptorProto_Value:
+ ed.L2.Values.List[i].unmarshalFull(v, sb, pf, ed, i)
+ i++
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+}
+
+func (md *Message) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) {
+ md.L0.ParentFile = pf
+ md.L0.Parent = pd
+ md.L0.Index = i
+
+ var prevField pref.FieldNumber
+ var numEnums, numMessages, numExtensions int
+ var posEnums, posMessages, posExtensions int
+ b0 := b
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.BytesType:
+ v, m := protowire.ConsumeBytes(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.DescriptorProto_Name:
+ md.L0.FullName = appendFullName(sb, pd.FullName(), v)
+ case fieldnum.DescriptorProto_EnumType:
+ if prevField != fieldnum.DescriptorProto_EnumType {
+ if numEnums > 0 {
+ panic("non-contiguous repeated field")
+ }
+ posEnums = len(b0) - len(b) - n - m
+ }
+ numEnums++
+ case fieldnum.DescriptorProto_NestedType:
+ if prevField != fieldnum.DescriptorProto_NestedType {
+ if numMessages > 0 {
+ panic("non-contiguous repeated field")
+ }
+ posMessages = len(b0) - len(b) - n - m
+ }
+ numMessages++
+ case fieldnum.DescriptorProto_Extension:
+ if prevField != fieldnum.DescriptorProto_Extension {
+ if numExtensions > 0 {
+ panic("non-contiguous repeated field")
+ }
+ posExtensions = len(b0) - len(b) - n - m
+ }
+ numExtensions++
+ case fieldnum.DescriptorProto_Options:
+ md.unmarshalSeedOptions(v)
+ }
+ prevField = num
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ prevField = -1 // ignore known field numbers of unknown wire type
+ }
+ }
+
+ // Must allocate all declarations before parsing each descriptor type
+ // to ensure we handled all descriptors in "flattened ordering".
+ if numEnums > 0 {
+ md.L1.Enums.List = pf.allocEnums(numEnums)
+ }
+ if numMessages > 0 {
+ md.L1.Messages.List = pf.allocMessages(numMessages)
+ }
+ if numExtensions > 0 {
+ md.L1.Extensions.List = pf.allocExtensions(numExtensions)
+ }
+
+ if numEnums > 0 {
+ b := b0[posEnums:]
+ for i := range md.L1.Enums.List {
+ _, n := protowire.ConsumeVarint(b)
+ v, m := protowire.ConsumeBytes(b[n:])
+ md.L1.Enums.List[i].unmarshalSeed(v, sb, pf, md, i)
+ b = b[n+m:]
+ }
+ }
+ if numMessages > 0 {
+ b := b0[posMessages:]
+ for i := range md.L1.Messages.List {
+ _, n := protowire.ConsumeVarint(b)
+ v, m := protowire.ConsumeBytes(b[n:])
+ md.L1.Messages.List[i].unmarshalSeed(v, sb, pf, md, i)
+ b = b[n+m:]
+ }
+ }
+ if numExtensions > 0 {
+ b := b0[posExtensions:]
+ for i := range md.L1.Extensions.List {
+ _, n := protowire.ConsumeVarint(b)
+ v, m := protowire.ConsumeBytes(b[n:])
+ md.L1.Extensions.List[i].unmarshalSeed(v, sb, pf, md, i)
+ b = b[n+m:]
+ }
+ }
+}
+
+func (md *Message) unmarshalSeedOptions(b []byte) {
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.VarintType:
+ v, m := protowire.ConsumeVarint(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.MessageOptions_MapEntry:
+ md.L1.IsMapEntry = protowire.DecodeBool(v)
+ case fieldnum.MessageOptions_MessageSetWireFormat:
+ md.L1.IsMessageSet = protowire.DecodeBool(v)
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+}
+
+func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) {
+ xd.L0.ParentFile = pf
+ xd.L0.Parent = pd
+ xd.L0.Index = i
+
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.VarintType:
+ v, m := protowire.ConsumeVarint(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.FieldDescriptorProto_Number:
+ xd.L1.Number = pref.FieldNumber(v)
+ case fieldnum.FieldDescriptorProto_Label:
+ xd.L1.Cardinality = pref.Cardinality(v)
+ case fieldnum.FieldDescriptorProto_Type:
+ xd.L1.Kind = pref.Kind(v)
+ }
+ case protowire.BytesType:
+ v, m := protowire.ConsumeBytes(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.FieldDescriptorProto_Name:
+ xd.L0.FullName = appendFullName(sb, pd.FullName(), v)
+ case fieldnum.FieldDescriptorProto_Extendee:
+ xd.L1.Extendee = PlaceholderMessage(makeFullName(sb, v))
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+}
+
+func (sd *Service) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) {
+ sd.L0.ParentFile = pf
+ sd.L0.Parent = pd
+ sd.L0.Index = i
+
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.BytesType:
+ v, m := protowire.ConsumeBytes(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.ServiceDescriptorProto_Name:
+ sd.L0.FullName = appendFullName(sb, pd.FullName(), v)
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+}
+
+var nameBuilderPool = sync.Pool{
+ New: func() interface{} { return new(strs.Builder) },
+}
+
+func getBuilder() *strs.Builder {
+ return nameBuilderPool.Get().(*strs.Builder)
+}
+func putBuilder(b *strs.Builder) {
+ nameBuilderPool.Put(b)
+}
+
+// makeFullName converts b to a protoreflect.FullName,
+// where b must start with a leading dot.
+func makeFullName(sb *strs.Builder, b []byte) pref.FullName {
+ if len(b) == 0 || b[0] != '.' {
+ panic("name reference must be fully qualified")
+ }
+ return pref.FullName(sb.MakeString(b[1:]))
+}
+
+func appendFullName(sb *strs.Builder, prefix pref.FullName, suffix []byte) pref.FullName {
+ return sb.AppendFullName(prefix, pref.Name(strs.UnsafeString(suffix)))
+}
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
new file mode 100644
index 000000000..bc215944a
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
@@ -0,0 +1,704 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package filedesc
+
+import (
+ "reflect"
+ "sync"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/descopts"
+ "google.golang.org/protobuf/internal/fieldnum"
+ "google.golang.org/protobuf/internal/strs"
+ "google.golang.org/protobuf/proto"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+func (fd *File) lazyRawInit() {
+ fd.unmarshalFull(fd.builder.RawDescriptor)
+ fd.resolveMessages()
+ fd.resolveExtensions()
+ fd.resolveServices()
+}
+
+func (file *File) resolveMessages() {
+ var depIdx int32
+ for i := range file.allMessages {
+ md := &file.allMessages[i]
+
+ // Resolve message field dependencies.
+ for j := range md.L2.Fields.List {
+ fd := &md.L2.Fields.List[j]
+
+ // Weak fields are resolved upon actual use.
+ if fd.L1.IsWeak {
+ continue
+ }
+
+ // Resolve message field dependency.
+ switch fd.L1.Kind {
+ case pref.EnumKind:
+ fd.L1.Enum = file.resolveEnumDependency(fd.L1.Enum, listFieldDeps, depIdx)
+ depIdx++
+ case pref.MessageKind, pref.GroupKind:
+ fd.L1.Message = file.resolveMessageDependency(fd.L1.Message, listFieldDeps, depIdx)
+ depIdx++
+ }
+
+ // Default is resolved here since it depends on Enum being resolved.
+ if v := fd.L1.Default.val; v.IsValid() {
+ fd.L1.Default = unmarshalDefault(v.Bytes(), fd.L1.Kind, file, fd.L1.Enum)
+ }
+ }
+ }
+}
+
+func (file *File) resolveExtensions() {
+ var depIdx int32
+ for i := range file.allExtensions {
+ xd := &file.allExtensions[i]
+
+ // Resolve extension field dependency.
+ switch xd.L1.Kind {
+ case pref.EnumKind:
+ xd.L2.Enum = file.resolveEnumDependency(xd.L2.Enum, listExtDeps, depIdx)
+ depIdx++
+ case pref.MessageKind, pref.GroupKind:
+ xd.L2.Message = file.resolveMessageDependency(xd.L2.Message, listExtDeps, depIdx)
+ depIdx++
+ }
+
+ // Default is resolved here since it depends on Enum being resolved.
+ if v := xd.L2.Default.val; v.IsValid() {
+ xd.L2.Default = unmarshalDefault(v.Bytes(), xd.L1.Kind, file, xd.L2.Enum)
+ }
+ }
+}
+
+func (file *File) resolveServices() {
+ var depIdx int32
+ for i := range file.allServices {
+ sd := &file.allServices[i]
+
+ // Resolve method dependencies.
+ for j := range sd.L2.Methods.List {
+ md := &sd.L2.Methods.List[j]
+ md.L1.Input = file.resolveMessageDependency(md.L1.Input, listMethInDeps, depIdx)
+ md.L1.Output = file.resolveMessageDependency(md.L1.Output, listMethOutDeps, depIdx)
+ depIdx++
+ }
+ }
+}
+
+func (file *File) resolveEnumDependency(ed pref.EnumDescriptor, i, j int32) pref.EnumDescriptor {
+ r := file.builder.FileRegistry
+ if r, ok := r.(resolverByIndex); ok {
+ if ed2 := r.FindEnumByIndex(i, j, file.allEnums, file.allMessages); ed2 != nil {
+ return ed2
+ }
+ }
+ for i := range file.allEnums {
+ if ed2 := &file.allEnums[i]; ed2.L0.FullName == ed.FullName() {
+ return ed2
+ }
+ }
+ if d, _ := r.FindDescriptorByName(ed.FullName()); d != nil {
+ return d.(pref.EnumDescriptor)
+ }
+ return ed
+}
+
+func (file *File) resolveMessageDependency(md pref.MessageDescriptor, i, j int32) pref.MessageDescriptor {
+ r := file.builder.FileRegistry
+ if r, ok := r.(resolverByIndex); ok {
+ if md2 := r.FindMessageByIndex(i, j, file.allEnums, file.allMessages); md2 != nil {
+ return md2
+ }
+ }
+ for i := range file.allMessages {
+ if md2 := &file.allMessages[i]; md2.L0.FullName == md.FullName() {
+ return md2
+ }
+ }
+ if d, _ := r.FindDescriptorByName(md.FullName()); d != nil {
+ return d.(pref.MessageDescriptor)
+ }
+ return md
+}
+
+func (fd *File) unmarshalFull(b []byte) {
+ sb := getBuilder()
+ defer putBuilder(sb)
+
+ var enumIdx, messageIdx, extensionIdx, serviceIdx int
+ var rawOptions []byte
+ fd.L2 = new(FileL2)
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.VarintType:
+ v, m := protowire.ConsumeVarint(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.FileDescriptorProto_PublicDependency:
+ fd.L2.Imports[v].IsPublic = true
+ case fieldnum.FileDescriptorProto_WeakDependency:
+ fd.L2.Imports[v].IsWeak = true
+ }
+ case protowire.BytesType:
+ v, m := protowire.ConsumeBytes(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.FileDescriptorProto_Dependency:
+ path := sb.MakeString(v)
+ imp, _ := fd.builder.FileRegistry.FindFileByPath(path)
+ if imp == nil {
+ imp = PlaceholderFile(path)
+ }
+ fd.L2.Imports = append(fd.L2.Imports, pref.FileImport{FileDescriptor: imp})
+ case fieldnum.FileDescriptorProto_EnumType:
+ fd.L1.Enums.List[enumIdx].unmarshalFull(v, sb)
+ enumIdx++
+ case fieldnum.FileDescriptorProto_MessageType:
+ fd.L1.Messages.List[messageIdx].unmarshalFull(v, sb)
+ messageIdx++
+ case fieldnum.FileDescriptorProto_Extension:
+ fd.L1.Extensions.List[extensionIdx].unmarshalFull(v, sb)
+ extensionIdx++
+ case fieldnum.FileDescriptorProto_Service:
+ fd.L1.Services.List[serviceIdx].unmarshalFull(v, sb)
+ serviceIdx++
+ case fieldnum.FileDescriptorProto_Options:
+ rawOptions = appendOptions(rawOptions, v)
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+ fd.L2.Options = fd.builder.optionsUnmarshaler(&descopts.File, rawOptions)
+}
+
+func (ed *Enum) unmarshalFull(b []byte, sb *strs.Builder) {
+ var rawValues [][]byte
+ var rawOptions []byte
+ if !ed.L1.eagerValues {
+ ed.L2 = new(EnumL2)
+ }
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.BytesType:
+ v, m := protowire.ConsumeBytes(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.EnumDescriptorProto_Value:
+ rawValues = append(rawValues, v)
+ case fieldnum.EnumDescriptorProto_ReservedName:
+ ed.L2.ReservedNames.List = append(ed.L2.ReservedNames.List, pref.Name(sb.MakeString(v)))
+ case fieldnum.EnumDescriptorProto_ReservedRange:
+ ed.L2.ReservedRanges.List = append(ed.L2.ReservedRanges.List, unmarshalEnumReservedRange(v))
+ case fieldnum.EnumDescriptorProto_Options:
+ rawOptions = appendOptions(rawOptions, v)
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+ if !ed.L1.eagerValues && len(rawValues) > 0 {
+ ed.L2.Values.List = make([]EnumValue, len(rawValues))
+ for i, b := range rawValues {
+ ed.L2.Values.List[i].unmarshalFull(b, sb, ed.L0.ParentFile, ed, i)
+ }
+ }
+ ed.L2.Options = ed.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Enum, rawOptions)
+}
+
+func unmarshalEnumReservedRange(b []byte) (r [2]pref.EnumNumber) {
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.VarintType:
+ v, m := protowire.ConsumeVarint(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.EnumDescriptorProto_EnumReservedRange_Start:
+ r[0] = pref.EnumNumber(v)
+ case fieldnum.EnumDescriptorProto_EnumReservedRange_End:
+ r[1] = pref.EnumNumber(v)
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+ return r
+}
+
+func (vd *EnumValue) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) {
+ vd.L0.ParentFile = pf
+ vd.L0.Parent = pd
+ vd.L0.Index = i
+
+ var rawOptions []byte
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.VarintType:
+ v, m := protowire.ConsumeVarint(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.EnumValueDescriptorProto_Number:
+ vd.L1.Number = pref.EnumNumber(v)
+ }
+ case protowire.BytesType:
+ v, m := protowire.ConsumeBytes(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.EnumValueDescriptorProto_Name:
+ // NOTE: Enum values are in the same scope as the enum parent.
+ vd.L0.FullName = appendFullName(sb, pd.Parent().FullName(), v)
+ case fieldnum.EnumValueDescriptorProto_Options:
+ rawOptions = appendOptions(rawOptions, v)
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+ vd.L1.Options = pf.builder.optionsUnmarshaler(&descopts.EnumValue, rawOptions)
+}
+
+func (md *Message) unmarshalFull(b []byte, sb *strs.Builder) {
+ var rawFields, rawOneofs [][]byte
+ var enumIdx, messageIdx, extensionIdx int
+ var rawOptions []byte
+ md.L2 = new(MessageL2)
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.BytesType:
+ v, m := protowire.ConsumeBytes(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.DescriptorProto_Field:
+ rawFields = append(rawFields, v)
+ case fieldnum.DescriptorProto_OneofDecl:
+ rawOneofs = append(rawOneofs, v)
+ case fieldnum.DescriptorProto_ReservedName:
+ md.L2.ReservedNames.List = append(md.L2.ReservedNames.List, pref.Name(sb.MakeString(v)))
+ case fieldnum.DescriptorProto_ReservedRange:
+ md.L2.ReservedRanges.List = append(md.L2.ReservedRanges.List, unmarshalMessageReservedRange(v))
+ case fieldnum.DescriptorProto_ExtensionRange:
+ r, rawOptions := unmarshalMessageExtensionRange(v)
+ opts := md.L0.ParentFile.builder.optionsUnmarshaler(&descopts.ExtensionRange, rawOptions)
+ md.L2.ExtensionRanges.List = append(md.L2.ExtensionRanges.List, r)
+ md.L2.ExtensionRangeOptions = append(md.L2.ExtensionRangeOptions, opts)
+ case fieldnum.DescriptorProto_EnumType:
+ md.L1.Enums.List[enumIdx].unmarshalFull(v, sb)
+ enumIdx++
+ case fieldnum.DescriptorProto_NestedType:
+ md.L1.Messages.List[messageIdx].unmarshalFull(v, sb)
+ messageIdx++
+ case fieldnum.DescriptorProto_Extension:
+ md.L1.Extensions.List[extensionIdx].unmarshalFull(v, sb)
+ extensionIdx++
+ case fieldnum.DescriptorProto_Options:
+ md.unmarshalOptions(v)
+ rawOptions = appendOptions(rawOptions, v)
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+ if len(rawFields) > 0 || len(rawOneofs) > 0 {
+ md.L2.Fields.List = make([]Field, len(rawFields))
+ md.L2.Oneofs.List = make([]Oneof, len(rawOneofs))
+ for i, b := range rawFields {
+ fd := &md.L2.Fields.List[i]
+ fd.unmarshalFull(b, sb, md.L0.ParentFile, md, i)
+ if fd.L1.Cardinality == pref.Required {
+ md.L2.RequiredNumbers.List = append(md.L2.RequiredNumbers.List, fd.L1.Number)
+ }
+ }
+ for i, b := range rawOneofs {
+ od := &md.L2.Oneofs.List[i]
+ od.unmarshalFull(b, sb, md.L0.ParentFile, md, i)
+ }
+ }
+ md.L2.Options = md.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Message, rawOptions)
+}
+
+func (md *Message) unmarshalOptions(b []byte) {
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.VarintType:
+ v, m := protowire.ConsumeVarint(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.MessageOptions_MapEntry:
+ md.L1.IsMapEntry = protowire.DecodeBool(v)
+ case fieldnum.MessageOptions_MessageSetWireFormat:
+ md.L1.IsMessageSet = protowire.DecodeBool(v)
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+}
+
+func unmarshalMessageReservedRange(b []byte) (r [2]pref.FieldNumber) {
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.VarintType:
+ v, m := protowire.ConsumeVarint(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.DescriptorProto_ReservedRange_Start:
+ r[0] = pref.FieldNumber(v)
+ case fieldnum.DescriptorProto_ReservedRange_End:
+ r[1] = pref.FieldNumber(v)
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+ return r
+}
+
+func unmarshalMessageExtensionRange(b []byte) (r [2]pref.FieldNumber, rawOptions []byte) {
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.VarintType:
+ v, m := protowire.ConsumeVarint(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.DescriptorProto_ExtensionRange_Start:
+ r[0] = pref.FieldNumber(v)
+ case fieldnum.DescriptorProto_ExtensionRange_End:
+ r[1] = pref.FieldNumber(v)
+ }
+ case protowire.BytesType:
+ v, m := protowire.ConsumeBytes(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.DescriptorProto_ExtensionRange_Options:
+ rawOptions = appendOptions(rawOptions, v)
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+ return r, rawOptions
+}
+
+func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) {
+ fd.L0.ParentFile = pf
+ fd.L0.Parent = pd
+ fd.L0.Index = i
+
+ var rawTypeName []byte
+ var rawOptions []byte
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.VarintType:
+ v, m := protowire.ConsumeVarint(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.FieldDescriptorProto_Number:
+ fd.L1.Number = pref.FieldNumber(v)
+ case fieldnum.FieldDescriptorProto_Label:
+ fd.L1.Cardinality = pref.Cardinality(v)
+ case fieldnum.FieldDescriptorProto_Type:
+ fd.L1.Kind = pref.Kind(v)
+ case fieldnum.FieldDescriptorProto_OneofIndex:
+ // In Message.unmarshalFull, we allocate slices for both
+ // the field and oneof descriptors before unmarshaling either
+ // of them. This ensures pointers to slice elements are stable.
+ od := &pd.(*Message).L2.Oneofs.List[v]
+ od.L1.Fields.List = append(od.L1.Fields.List, fd)
+ if fd.L1.ContainingOneof != nil {
+ panic("oneof type already set")
+ }
+ fd.L1.ContainingOneof = od
+ case fieldnum.FieldDescriptorProto_Proto3Optional:
+ fd.L1.IsProto3Optional = protowire.DecodeBool(v)
+ }
+ case protowire.BytesType:
+ v, m := protowire.ConsumeBytes(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.FieldDescriptorProto_Name:
+ fd.L0.FullName = appendFullName(sb, pd.FullName(), v)
+ case fieldnum.FieldDescriptorProto_JsonName:
+ fd.L1.JSONName.Init(sb.MakeString(v))
+ case fieldnum.FieldDescriptorProto_DefaultValue:
+ fd.L1.Default.val = pref.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveMessages
+ case fieldnum.FieldDescriptorProto_TypeName:
+ rawTypeName = v
+ case fieldnum.FieldDescriptorProto_Options:
+ fd.unmarshalOptions(v)
+ rawOptions = appendOptions(rawOptions, v)
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+ if rawTypeName != nil {
+ name := makeFullName(sb, rawTypeName)
+ switch fd.L1.Kind {
+ case pref.EnumKind:
+ fd.L1.Enum = PlaceholderEnum(name)
+ case pref.MessageKind, pref.GroupKind:
+ fd.L1.Message = PlaceholderMessage(name)
+ }
+ }
+ fd.L1.Options = pf.builder.optionsUnmarshaler(&descopts.Field, rawOptions)
+}
+
+func (fd *Field) unmarshalOptions(b []byte) {
+ const FieldOptions_EnforceUTF8 = 13
+
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.VarintType:
+ v, m := protowire.ConsumeVarint(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.FieldOptions_Packed:
+ fd.L1.HasPacked = true
+ fd.L1.IsPacked = protowire.DecodeBool(v)
+ case fieldnum.FieldOptions_Weak:
+ fd.L1.IsWeak = protowire.DecodeBool(v)
+ case FieldOptions_EnforceUTF8:
+ fd.L1.HasEnforceUTF8 = true
+ fd.L1.EnforceUTF8 = protowire.DecodeBool(v)
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+}
+
+func (od *Oneof) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) {
+ od.L0.ParentFile = pf
+ od.L0.Parent = pd
+ od.L0.Index = i
+
+ var rawOptions []byte
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.BytesType:
+ v, m := protowire.ConsumeBytes(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.OneofDescriptorProto_Name:
+ od.L0.FullName = appendFullName(sb, pd.FullName(), v)
+ case fieldnum.OneofDescriptorProto_Options:
+ rawOptions = appendOptions(rawOptions, v)
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+ od.L1.Options = pf.builder.optionsUnmarshaler(&descopts.Oneof, rawOptions)
+}
+
+func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) {
+ var rawTypeName []byte
+ var rawOptions []byte
+ xd.L2 = new(ExtensionL2)
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.VarintType:
+ v, m := protowire.ConsumeVarint(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.FieldDescriptorProto_Proto3Optional:
+ xd.L2.IsProto3Optional = protowire.DecodeBool(v)
+ }
+ case protowire.BytesType:
+ v, m := protowire.ConsumeBytes(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.FieldDescriptorProto_JsonName:
+ xd.L2.JSONName.Init(sb.MakeString(v))
+ case fieldnum.FieldDescriptorProto_DefaultValue:
+ xd.L2.Default.val = pref.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveExtensions
+ case fieldnum.FieldDescriptorProto_TypeName:
+ rawTypeName = v
+ case fieldnum.FieldDescriptorProto_Options:
+ xd.unmarshalOptions(v)
+ rawOptions = appendOptions(rawOptions, v)
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+ if rawTypeName != nil {
+ name := makeFullName(sb, rawTypeName)
+ switch xd.L1.Kind {
+ case pref.EnumKind:
+ xd.L2.Enum = PlaceholderEnum(name)
+ case pref.MessageKind, pref.GroupKind:
+ xd.L2.Message = PlaceholderMessage(name)
+ }
+ }
+ xd.L2.Options = xd.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Field, rawOptions)
+}
+
+func (xd *Extension) unmarshalOptions(b []byte) {
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.VarintType:
+ v, m := protowire.ConsumeVarint(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.FieldOptions_Packed:
+ xd.L2.IsPacked = protowire.DecodeBool(v)
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+}
+
+func (sd *Service) unmarshalFull(b []byte, sb *strs.Builder) {
+ var rawMethods [][]byte
+ var rawOptions []byte
+ sd.L2 = new(ServiceL2)
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.BytesType:
+ v, m := protowire.ConsumeBytes(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.ServiceDescriptorProto_Method:
+ rawMethods = append(rawMethods, v)
+ case fieldnum.ServiceDescriptorProto_Options:
+ rawOptions = appendOptions(rawOptions, v)
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+ if len(rawMethods) > 0 {
+ sd.L2.Methods.List = make([]Method, len(rawMethods))
+ for i, b := range rawMethods {
+ sd.L2.Methods.List[i].unmarshalFull(b, sb, sd.L0.ParentFile, sd, i)
+ }
+ }
+ sd.L2.Options = sd.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Service, rawOptions)
+}
+
+func (md *Method) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) {
+ md.L0.ParentFile = pf
+ md.L0.Parent = pd
+ md.L0.Index = i
+
+ var rawOptions []byte
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.VarintType:
+ v, m := protowire.ConsumeVarint(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.MethodDescriptorProto_ClientStreaming:
+ md.L1.IsStreamingClient = protowire.DecodeBool(v)
+ case fieldnum.MethodDescriptorProto_ServerStreaming:
+ md.L1.IsStreamingServer = protowire.DecodeBool(v)
+ }
+ case protowire.BytesType:
+ v, m := protowire.ConsumeBytes(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.MethodDescriptorProto_Name:
+ md.L0.FullName = appendFullName(sb, pd.FullName(), v)
+ case fieldnum.MethodDescriptorProto_InputType:
+ md.L1.Input = PlaceholderMessage(makeFullName(sb, v))
+ case fieldnum.MethodDescriptorProto_OutputType:
+ md.L1.Output = PlaceholderMessage(makeFullName(sb, v))
+ case fieldnum.MethodDescriptorProto_Options:
+ rawOptions = appendOptions(rawOptions, v)
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+ md.L1.Options = pf.builder.optionsUnmarshaler(&descopts.Method, rawOptions)
+}
+
+// appendOptions appends src to dst, where the returned slice is never nil.
+// This is necessary to distinguish between empty and unpopulated options.
+func appendOptions(dst, src []byte) []byte {
+ if dst == nil {
+ dst = []byte{}
+ }
+ return append(dst, src...)
+}
+
+// optionsUnmarshaler constructs a lazy unmarshal function for an options message.
+//
+// The type of message to unmarshal to is passed as a pointer since the
+// vars in descopts may not yet be populated at the time this function is called.
+func (db *Builder) optionsUnmarshaler(p *pref.ProtoMessage, b []byte) func() pref.ProtoMessage {
+ if b == nil {
+ return nil
+ }
+ var opts pref.ProtoMessage
+ var once sync.Once
+ return func() pref.ProtoMessage {
+ once.Do(func() {
+ if *p == nil {
+ panic("Descriptor.Options called without importing the descriptor package")
+ }
+ opts = reflect.New(reflect.TypeOf(*p).Elem()).Interface().(pref.ProtoMessage)
+ if err := (proto.UnmarshalOptions{
+ AllowPartial: true,
+ Resolver: db.TypeResolver,
+ }).Unmarshal(b, opts); err != nil {
+ panic(err)
+ }
+ })
+ return opts
+ }
+}
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go
new file mode 100644
index 000000000..1b7089b64
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go
@@ -0,0 +1,286 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package filedesc
+
+import (
+ "fmt"
+ "math"
+ "sort"
+ "sync"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/descfmt"
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/internal/pragma"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+type FileImports []pref.FileImport
+
+func (p *FileImports) Len() int { return len(*p) }
+func (p *FileImports) Get(i int) pref.FileImport { return (*p)[i] }
+func (p *FileImports) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) }
+func (p *FileImports) ProtoInternal(pragma.DoNotImplement) {}
+
+type Names struct {
+ List []pref.Name
+ once sync.Once
+ has map[pref.Name]int // protected by once
+}
+
+func (p *Names) Len() int { return len(p.List) }
+func (p *Names) Get(i int) pref.Name { return p.List[i] }
+func (p *Names) Has(s pref.Name) bool { return p.lazyInit().has[s] > 0 }
+func (p *Names) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) }
+func (p *Names) ProtoInternal(pragma.DoNotImplement) {}
+func (p *Names) lazyInit() *Names {
+ p.once.Do(func() {
+ if len(p.List) > 0 {
+ p.has = make(map[pref.Name]int, len(p.List))
+ for _, s := range p.List {
+ p.has[s] = p.has[s] + 1
+ }
+ }
+ })
+ return p
+}
+
+// CheckValid reports any errors with the set of names with an error message
+// that completes the sentence: "ranges is invalid because it has ..."
+func (p *Names) CheckValid() error {
+ for s, n := range p.lazyInit().has {
+ switch {
+ case n > 1:
+ return errors.New("duplicate name: %q", s)
+ case false && !s.IsValid():
+ // NOTE: The C++ implementation does not validate the identifier.
+ // See https://github.com/protocolbuffers/protobuf/issues/6335.
+ return errors.New("invalid name: %q", s)
+ }
+ }
+ return nil
+}
+
+type EnumRanges struct {
+ List [][2]pref.EnumNumber // start inclusive; end inclusive
+ once sync.Once
+ sorted [][2]pref.EnumNumber // protected by once
+}
+
+func (p *EnumRanges) Len() int { return len(p.List) }
+func (p *EnumRanges) Get(i int) [2]pref.EnumNumber { return p.List[i] }
+func (p *EnumRanges) Has(n pref.EnumNumber) bool {
+ for ls := p.lazyInit().sorted; len(ls) > 0; {
+ i := len(ls) / 2
+ switch r := enumRange(ls[i]); {
+ case n < r.Start():
+ ls = ls[:i] // search lower
+ case n > r.End():
+ ls = ls[i+1:] // search upper
+ default:
+ return true
+ }
+ }
+ return false
+}
+func (p *EnumRanges) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) }
+func (p *EnumRanges) ProtoInternal(pragma.DoNotImplement) {}
+func (p *EnumRanges) lazyInit() *EnumRanges {
+ p.once.Do(func() {
+ p.sorted = append(p.sorted, p.List...)
+ sort.Slice(p.sorted, func(i, j int) bool {
+ return p.sorted[i][0] < p.sorted[j][0]
+ })
+ })
+ return p
+}
+
+// CheckValid reports any errors with the set of names with an error message
+// that completes the sentence: "ranges is invalid because it has ..."
+func (p *EnumRanges) CheckValid() error {
+ var rp enumRange
+ for i, r := range p.lazyInit().sorted {
+ r := enumRange(r)
+ switch {
+ case !(r.Start() <= r.End()):
+ return errors.New("invalid range: %v", r)
+ case !(rp.End() < r.Start()) && i > 0:
+ return errors.New("overlapping ranges: %v with %v", rp, r)
+ }
+ rp = r
+ }
+ return nil
+}
+
+type enumRange [2]protoreflect.EnumNumber
+
+func (r enumRange) Start() protoreflect.EnumNumber { return r[0] } // inclusive
+func (r enumRange) End() protoreflect.EnumNumber { return r[1] } // inclusive
+func (r enumRange) String() string {
+ if r.Start() == r.End() {
+ return fmt.Sprintf("%d", r.Start())
+ }
+ return fmt.Sprintf("%d to %d", r.Start(), r.End())
+}
+
+type FieldRanges struct {
+ List [][2]pref.FieldNumber // start inclusive; end exclusive
+ once sync.Once
+ sorted [][2]pref.FieldNumber // protected by once
+}
+
+func (p *FieldRanges) Len() int { return len(p.List) }
+func (p *FieldRanges) Get(i int) [2]pref.FieldNumber { return p.List[i] }
+func (p *FieldRanges) Has(n pref.FieldNumber) bool {
+ for ls := p.lazyInit().sorted; len(ls) > 0; {
+ i := len(ls) / 2
+ switch r := fieldRange(ls[i]); {
+ case n < r.Start():
+ ls = ls[:i] // search lower
+ case n > r.End():
+ ls = ls[i+1:] // search upper
+ default:
+ return true
+ }
+ }
+ return false
+}
+func (p *FieldRanges) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) }
+func (p *FieldRanges) ProtoInternal(pragma.DoNotImplement) {}
+func (p *FieldRanges) lazyInit() *FieldRanges {
+ p.once.Do(func() {
+ p.sorted = append(p.sorted, p.List...)
+ sort.Slice(p.sorted, func(i, j int) bool {
+ return p.sorted[i][0] < p.sorted[j][0]
+ })
+ })
+ return p
+}
+
+// CheckValid reports any errors with the set of ranges with an error message
+// that completes the sentence: "ranges is invalid because it has ..."
+func (p *FieldRanges) CheckValid(isMessageSet bool) error {
+ var rp fieldRange
+ for i, r := range p.lazyInit().sorted {
+ r := fieldRange(r)
+ switch {
+ case !isValidFieldNumber(r.Start(), isMessageSet):
+ return errors.New("invalid field number: %d", r.Start())
+ case !isValidFieldNumber(r.End(), isMessageSet):
+ return errors.New("invalid field number: %d", r.End())
+ case !(r.Start() <= r.End()):
+ return errors.New("invalid range: %v", r)
+ case !(rp.End() < r.Start()) && i > 0:
+ return errors.New("overlapping ranges: %v with %v", rp, r)
+ }
+ rp = r
+ }
+ return nil
+}
+
+// isValidFieldNumber reports whether the field number is valid.
+// Unlike the FieldNumber.IsValid method, it allows ranges that cover the
+// reserved number range.
+func isValidFieldNumber(n protoreflect.FieldNumber, isMessageSet bool) bool {
+ if isMessageSet {
+ return protowire.MinValidNumber <= n && n <= math.MaxInt32
+ }
+ return protowire.MinValidNumber <= n && n <= protowire.MaxValidNumber
+}
+
+// CheckOverlap reports an error if p and q overlap.
+func (p *FieldRanges) CheckOverlap(q *FieldRanges) error {
+ rps := p.lazyInit().sorted
+ rqs := q.lazyInit().sorted
+ for pi, qi := 0, 0; pi < len(rps) && qi < len(rqs); {
+ rp := fieldRange(rps[pi])
+ rq := fieldRange(rqs[qi])
+ if !(rp.End() < rq.Start() || rq.End() < rp.Start()) {
+ return errors.New("overlapping ranges: %v with %v", rp, rq)
+ }
+ if rp.Start() < rq.Start() {
+ pi++
+ } else {
+ qi++
+ }
+ }
+ return nil
+}
+
+type fieldRange [2]protoreflect.FieldNumber
+
+func (r fieldRange) Start() protoreflect.FieldNumber { return r[0] } // inclusive
+func (r fieldRange) End() protoreflect.FieldNumber { return r[1] - 1 } // inclusive
+func (r fieldRange) String() string {
+ if r.Start() == r.End() {
+ return fmt.Sprintf("%d", r.Start())
+ }
+ return fmt.Sprintf("%d to %d", r.Start(), r.End())
+}
+
+type FieldNumbers struct {
+ List []pref.FieldNumber
+ once sync.Once
+ has map[pref.FieldNumber]struct{} // protected by once
+}
+
+func (p *FieldNumbers) Len() int { return len(p.List) }
+func (p *FieldNumbers) Get(i int) pref.FieldNumber { return p.List[i] }
+func (p *FieldNumbers) Has(n pref.FieldNumber) bool {
+ p.once.Do(func() {
+ if len(p.List) > 0 {
+ p.has = make(map[pref.FieldNumber]struct{}, len(p.List))
+ for _, n := range p.List {
+ p.has[n] = struct{}{}
+ }
+ }
+ })
+ _, ok := p.has[n]
+ return ok
+}
+func (p *FieldNumbers) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) }
+func (p *FieldNumbers) ProtoInternal(pragma.DoNotImplement) {}
+
+type OneofFields struct {
+ List []pref.FieldDescriptor
+ once sync.Once
+ byName map[pref.Name]pref.FieldDescriptor // protected by once
+ byJSON map[string]pref.FieldDescriptor // protected by once
+ byNum map[pref.FieldNumber]pref.FieldDescriptor // protected by once
+}
+
+func (p *OneofFields) Len() int { return len(p.List) }
+func (p *OneofFields) Get(i int) pref.FieldDescriptor { return p.List[i] }
+func (p *OneofFields) ByName(s pref.Name) pref.FieldDescriptor { return p.lazyInit().byName[s] }
+func (p *OneofFields) ByJSONName(s string) pref.FieldDescriptor { return p.lazyInit().byJSON[s] }
+func (p *OneofFields) ByNumber(n pref.FieldNumber) pref.FieldDescriptor { return p.lazyInit().byNum[n] }
+func (p *OneofFields) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) }
+func (p *OneofFields) ProtoInternal(pragma.DoNotImplement) {}
+
+func (p *OneofFields) lazyInit() *OneofFields {
+ p.once.Do(func() {
+ if len(p.List) > 0 {
+ p.byName = make(map[pref.Name]pref.FieldDescriptor, len(p.List))
+ p.byJSON = make(map[string]pref.FieldDescriptor, len(p.List))
+ p.byNum = make(map[pref.FieldNumber]pref.FieldDescriptor, len(p.List))
+ for _, f := range p.List {
+ // Field names and numbers are guaranteed to be unique.
+ p.byName[f.Name()] = f
+ p.byJSON[f.JSONName()] = f
+ p.byNum[f.Number()] = f
+ }
+ }
+ })
+ return p
+}
+
+type SourceLocations struct {
+ List []pref.SourceLocation
+}
+
+func (p *SourceLocations) Len() int { return len(p.List) }
+func (p *SourceLocations) Get(i int) pref.SourceLocation { return p.List[i] }
+func (p *SourceLocations) ProtoInternal(pragma.DoNotImplement) {}
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go
new file mode 100644
index 000000000..6a8825e80
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go
@@ -0,0 +1,345 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-types. DO NOT EDIT.
+
+package filedesc
+
+import (
+ "fmt"
+ "sync"
+
+ "google.golang.org/protobuf/internal/descfmt"
+ "google.golang.org/protobuf/internal/pragma"
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+type Enums struct {
+ List []Enum
+ once sync.Once
+ byName map[protoreflect.Name]*Enum // protected by once
+}
+
+func (p *Enums) Len() int {
+ return len(p.List)
+}
+func (p *Enums) Get(i int) protoreflect.EnumDescriptor {
+ return &p.List[i]
+}
+func (p *Enums) ByName(s protoreflect.Name) protoreflect.EnumDescriptor {
+ if d := p.lazyInit().byName[s]; d != nil {
+ return d
+ }
+ return nil
+}
+func (p *Enums) Format(s fmt.State, r rune) {
+ descfmt.FormatList(s, r, p)
+}
+func (p *Enums) ProtoInternal(pragma.DoNotImplement) {}
+func (p *Enums) lazyInit() *Enums {
+ p.once.Do(func() {
+ if len(p.List) > 0 {
+ p.byName = make(map[protoreflect.Name]*Enum, len(p.List))
+ for i := range p.List {
+ d := &p.List[i]
+ if _, ok := p.byName[d.Name()]; !ok {
+ p.byName[d.Name()] = d
+ }
+ }
+ }
+ })
+ return p
+}
+
+type EnumValues struct {
+ List []EnumValue
+ once sync.Once
+ byName map[protoreflect.Name]*EnumValue // protected by once
+ byNum map[protoreflect.EnumNumber]*EnumValue // protected by once
+}
+
+func (p *EnumValues) Len() int {
+ return len(p.List)
+}
+func (p *EnumValues) Get(i int) protoreflect.EnumValueDescriptor {
+ return &p.List[i]
+}
+func (p *EnumValues) ByName(s protoreflect.Name) protoreflect.EnumValueDescriptor {
+ if d := p.lazyInit().byName[s]; d != nil {
+ return d
+ }
+ return nil
+}
+func (p *EnumValues) ByNumber(n protoreflect.EnumNumber) protoreflect.EnumValueDescriptor {
+ if d := p.lazyInit().byNum[n]; d != nil {
+ return d
+ }
+ return nil
+}
+func (p *EnumValues) Format(s fmt.State, r rune) {
+ descfmt.FormatList(s, r, p)
+}
+func (p *EnumValues) ProtoInternal(pragma.DoNotImplement) {}
+func (p *EnumValues) lazyInit() *EnumValues {
+ p.once.Do(func() {
+ if len(p.List) > 0 {
+ p.byName = make(map[protoreflect.Name]*EnumValue, len(p.List))
+ p.byNum = make(map[protoreflect.EnumNumber]*EnumValue, len(p.List))
+ for i := range p.List {
+ d := &p.List[i]
+ if _, ok := p.byName[d.Name()]; !ok {
+ p.byName[d.Name()] = d
+ }
+ if _, ok := p.byNum[d.Number()]; !ok {
+ p.byNum[d.Number()] = d
+ }
+ }
+ }
+ })
+ return p
+}
+
+type Messages struct {
+ List []Message
+ once sync.Once
+ byName map[protoreflect.Name]*Message // protected by once
+}
+
+func (p *Messages) Len() int {
+ return len(p.List)
+}
+func (p *Messages) Get(i int) protoreflect.MessageDescriptor {
+ return &p.List[i]
+}
+func (p *Messages) ByName(s protoreflect.Name) protoreflect.MessageDescriptor {
+ if d := p.lazyInit().byName[s]; d != nil {
+ return d
+ }
+ return nil
+}
+func (p *Messages) Format(s fmt.State, r rune) {
+ descfmt.FormatList(s, r, p)
+}
+func (p *Messages) ProtoInternal(pragma.DoNotImplement) {}
+func (p *Messages) lazyInit() *Messages {
+ p.once.Do(func() {
+ if len(p.List) > 0 {
+ p.byName = make(map[protoreflect.Name]*Message, len(p.List))
+ for i := range p.List {
+ d := &p.List[i]
+ if _, ok := p.byName[d.Name()]; !ok {
+ p.byName[d.Name()] = d
+ }
+ }
+ }
+ })
+ return p
+}
+
+type Fields struct {
+ List []Field
+ once sync.Once
+ byName map[protoreflect.Name]*Field // protected by once
+ byJSON map[string]*Field // protected by once
+ byNum map[protoreflect.FieldNumber]*Field // protected by once
+}
+
+func (p *Fields) Len() int {
+ return len(p.List)
+}
+func (p *Fields) Get(i int) protoreflect.FieldDescriptor {
+ return &p.List[i]
+}
+func (p *Fields) ByName(s protoreflect.Name) protoreflect.FieldDescriptor {
+ if d := p.lazyInit().byName[s]; d != nil {
+ return d
+ }
+ return nil
+}
+func (p *Fields) ByJSONName(s string) protoreflect.FieldDescriptor {
+ if d := p.lazyInit().byJSON[s]; d != nil {
+ return d
+ }
+ return nil
+}
+func (p *Fields) ByNumber(n protoreflect.FieldNumber) protoreflect.FieldDescriptor {
+ if d := p.lazyInit().byNum[n]; d != nil {
+ return d
+ }
+ return nil
+}
+func (p *Fields) Format(s fmt.State, r rune) {
+ descfmt.FormatList(s, r, p)
+}
+func (p *Fields) ProtoInternal(pragma.DoNotImplement) {}
+func (p *Fields) lazyInit() *Fields {
+ p.once.Do(func() {
+ if len(p.List) > 0 {
+ p.byName = make(map[protoreflect.Name]*Field, len(p.List))
+ p.byJSON = make(map[string]*Field, len(p.List))
+ p.byNum = make(map[protoreflect.FieldNumber]*Field, len(p.List))
+ for i := range p.List {
+ d := &p.List[i]
+ if _, ok := p.byName[d.Name()]; !ok {
+ p.byName[d.Name()] = d
+ }
+ if _, ok := p.byJSON[d.JSONName()]; !ok {
+ p.byJSON[d.JSONName()] = d
+ }
+ if _, ok := p.byNum[d.Number()]; !ok {
+ p.byNum[d.Number()] = d
+ }
+ }
+ }
+ })
+ return p
+}
+
+type Oneofs struct {
+ List []Oneof
+ once sync.Once
+ byName map[protoreflect.Name]*Oneof // protected by once
+}
+
+func (p *Oneofs) Len() int {
+ return len(p.List)
+}
+func (p *Oneofs) Get(i int) protoreflect.OneofDescriptor {
+ return &p.List[i]
+}
+func (p *Oneofs) ByName(s protoreflect.Name) protoreflect.OneofDescriptor {
+ if d := p.lazyInit().byName[s]; d != nil {
+ return d
+ }
+ return nil
+}
+func (p *Oneofs) Format(s fmt.State, r rune) {
+ descfmt.FormatList(s, r, p)
+}
+func (p *Oneofs) ProtoInternal(pragma.DoNotImplement) {}
+func (p *Oneofs) lazyInit() *Oneofs {
+ p.once.Do(func() {
+ if len(p.List) > 0 {
+ p.byName = make(map[protoreflect.Name]*Oneof, len(p.List))
+ for i := range p.List {
+ d := &p.List[i]
+ if _, ok := p.byName[d.Name()]; !ok {
+ p.byName[d.Name()] = d
+ }
+ }
+ }
+ })
+ return p
+}
+
+type Extensions struct {
+ List []Extension
+ once sync.Once
+ byName map[protoreflect.Name]*Extension // protected by once
+}
+
+func (p *Extensions) Len() int {
+ return len(p.List)
+}
+func (p *Extensions) Get(i int) protoreflect.ExtensionDescriptor {
+ return &p.List[i]
+}
+func (p *Extensions) ByName(s protoreflect.Name) protoreflect.ExtensionDescriptor {
+ if d := p.lazyInit().byName[s]; d != nil {
+ return d
+ }
+ return nil
+}
+func (p *Extensions) Format(s fmt.State, r rune) {
+ descfmt.FormatList(s, r, p)
+}
+func (p *Extensions) ProtoInternal(pragma.DoNotImplement) {}
+func (p *Extensions) lazyInit() *Extensions {
+ p.once.Do(func() {
+ if len(p.List) > 0 {
+ p.byName = make(map[protoreflect.Name]*Extension, len(p.List))
+ for i := range p.List {
+ d := &p.List[i]
+ if _, ok := p.byName[d.Name()]; !ok {
+ p.byName[d.Name()] = d
+ }
+ }
+ }
+ })
+ return p
+}
+
+type Services struct {
+ List []Service
+ once sync.Once
+ byName map[protoreflect.Name]*Service // protected by once
+}
+
+func (p *Services) Len() int {
+ return len(p.List)
+}
+func (p *Services) Get(i int) protoreflect.ServiceDescriptor {
+ return &p.List[i]
+}
+func (p *Services) ByName(s protoreflect.Name) protoreflect.ServiceDescriptor {
+ if d := p.lazyInit().byName[s]; d != nil {
+ return d
+ }
+ return nil
+}
+func (p *Services) Format(s fmt.State, r rune) {
+ descfmt.FormatList(s, r, p)
+}
+func (p *Services) ProtoInternal(pragma.DoNotImplement) {}
+func (p *Services) lazyInit() *Services {
+ p.once.Do(func() {
+ if len(p.List) > 0 {
+ p.byName = make(map[protoreflect.Name]*Service, len(p.List))
+ for i := range p.List {
+ d := &p.List[i]
+ if _, ok := p.byName[d.Name()]; !ok {
+ p.byName[d.Name()] = d
+ }
+ }
+ }
+ })
+ return p
+}
+
+type Methods struct {
+ List []Method
+ once sync.Once
+ byName map[protoreflect.Name]*Method // protected by once
+}
+
+func (p *Methods) Len() int {
+ return len(p.List)
+}
+func (p *Methods) Get(i int) protoreflect.MethodDescriptor {
+ return &p.List[i]
+}
+func (p *Methods) ByName(s protoreflect.Name) protoreflect.MethodDescriptor {
+ if d := p.lazyInit().byName[s]; d != nil {
+ return d
+ }
+ return nil
+}
+func (p *Methods) Format(s fmt.State, r rune) {
+ descfmt.FormatList(s, r, p)
+}
+func (p *Methods) ProtoInternal(pragma.DoNotImplement) {}
+func (p *Methods) lazyInit() *Methods {
+ p.once.Do(func() {
+ if len(p.List) > 0 {
+ p.byName = make(map[protoreflect.Name]*Method, len(p.List))
+ for i := range p.List {
+ d := &p.List[i]
+ if _, ok := p.byName[d.Name()]; !ok {
+ p.byName[d.Name()] = d
+ }
+ }
+ }
+ })
+ return p
+}
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go b/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go
new file mode 100644
index 000000000..dbf2c605b
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go
@@ -0,0 +1,107 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package filedesc
+
+import (
+ "google.golang.org/protobuf/internal/descopts"
+ "google.golang.org/protobuf/internal/pragma"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+var (
+ emptyNames = new(Names)
+ emptyEnumRanges = new(EnumRanges)
+ emptyFieldRanges = new(FieldRanges)
+ emptyFieldNumbers = new(FieldNumbers)
+ emptySourceLocations = new(SourceLocations)
+
+ emptyFiles = new(FileImports)
+ emptyMessages = new(Messages)
+ emptyFields = new(Fields)
+ emptyOneofs = new(Oneofs)
+ emptyEnums = new(Enums)
+ emptyEnumValues = new(EnumValues)
+ emptyExtensions = new(Extensions)
+ emptyServices = new(Services)
+)
+
+// PlaceholderFile is a placeholder, representing only the file path.
+type PlaceholderFile string
+
+func (f PlaceholderFile) ParentFile() pref.FileDescriptor { return f }
+func (f PlaceholderFile) Parent() pref.Descriptor { return nil }
+func (f PlaceholderFile) Index() int { return 0 }
+func (f PlaceholderFile) Syntax() pref.Syntax { return 0 }
+func (f PlaceholderFile) Name() pref.Name { return "" }
+func (f PlaceholderFile) FullName() pref.FullName { return "" }
+func (f PlaceholderFile) IsPlaceholder() bool { return true }
+func (f PlaceholderFile) Options() pref.ProtoMessage { return descopts.File }
+func (f PlaceholderFile) Path() string { return string(f) }
+func (f PlaceholderFile) Package() pref.FullName { return "" }
+func (f PlaceholderFile) Imports() pref.FileImports { return emptyFiles }
+func (f PlaceholderFile) Messages() pref.MessageDescriptors { return emptyMessages }
+func (f PlaceholderFile) Enums() pref.EnumDescriptors { return emptyEnums }
+func (f PlaceholderFile) Extensions() pref.ExtensionDescriptors { return emptyExtensions }
+func (f PlaceholderFile) Services() pref.ServiceDescriptors { return emptyServices }
+func (f PlaceholderFile) SourceLocations() pref.SourceLocations { return emptySourceLocations }
+func (f PlaceholderFile) ProtoType(pref.FileDescriptor) { return }
+func (f PlaceholderFile) ProtoInternal(pragma.DoNotImplement) { return }
+
+// PlaceholderEnum is a placeholder, representing only the full name.
+type PlaceholderEnum pref.FullName
+
+func (e PlaceholderEnum) ParentFile() pref.FileDescriptor { return nil }
+func (e PlaceholderEnum) Parent() pref.Descriptor { return nil }
+func (e PlaceholderEnum) Index() int { return 0 }
+func (e PlaceholderEnum) Syntax() pref.Syntax { return 0 }
+func (e PlaceholderEnum) Name() pref.Name { return pref.FullName(e).Name() }
+func (e PlaceholderEnum) FullName() pref.FullName { return pref.FullName(e) }
+func (e PlaceholderEnum) IsPlaceholder() bool { return true }
+func (e PlaceholderEnum) Options() pref.ProtoMessage { return descopts.Enum }
+func (e PlaceholderEnum) Values() pref.EnumValueDescriptors { return emptyEnumValues }
+func (e PlaceholderEnum) ReservedNames() pref.Names { return emptyNames }
+func (e PlaceholderEnum) ReservedRanges() pref.EnumRanges { return emptyEnumRanges }
+func (e PlaceholderEnum) ProtoType(pref.EnumDescriptor) { return }
+func (e PlaceholderEnum) ProtoInternal(pragma.DoNotImplement) { return }
+
+// PlaceholderEnumValue is a placeholder, representing only the full name.
+type PlaceholderEnumValue pref.FullName
+
+func (e PlaceholderEnumValue) ParentFile() pref.FileDescriptor { return nil }
+func (e PlaceholderEnumValue) Parent() pref.Descriptor { return nil }
+func (e PlaceholderEnumValue) Index() int { return 0 }
+func (e PlaceholderEnumValue) Syntax() pref.Syntax { return 0 }
+func (e PlaceholderEnumValue) Name() pref.Name { return pref.FullName(e).Name() }
+func (e PlaceholderEnumValue) FullName() pref.FullName { return pref.FullName(e) }
+func (e PlaceholderEnumValue) IsPlaceholder() bool { return true }
+func (e PlaceholderEnumValue) Options() pref.ProtoMessage { return descopts.EnumValue }
+func (e PlaceholderEnumValue) Number() pref.EnumNumber { return 0 }
+func (e PlaceholderEnumValue) ProtoType(pref.EnumValueDescriptor) { return }
+func (e PlaceholderEnumValue) ProtoInternal(pragma.DoNotImplement) { return }
+
+// PlaceholderMessage is a placeholder, representing only the full name.
+type PlaceholderMessage pref.FullName
+
+func (m PlaceholderMessage) ParentFile() pref.FileDescriptor { return nil }
+func (m PlaceholderMessage) Parent() pref.Descriptor { return nil }
+func (m PlaceholderMessage) Index() int { return 0 }
+func (m PlaceholderMessage) Syntax() pref.Syntax { return 0 }
+func (m PlaceholderMessage) Name() pref.Name { return pref.FullName(m).Name() }
+func (m PlaceholderMessage) FullName() pref.FullName { return pref.FullName(m) }
+func (m PlaceholderMessage) IsPlaceholder() bool { return true }
+func (m PlaceholderMessage) Options() pref.ProtoMessage { return descopts.Message }
+func (m PlaceholderMessage) IsMapEntry() bool { return false }
+func (m PlaceholderMessage) Fields() pref.FieldDescriptors { return emptyFields }
+func (m PlaceholderMessage) Oneofs() pref.OneofDescriptors { return emptyOneofs }
+func (m PlaceholderMessage) ReservedNames() pref.Names { return emptyNames }
+func (m PlaceholderMessage) ReservedRanges() pref.FieldRanges { return emptyFieldRanges }
+func (m PlaceholderMessage) RequiredNumbers() pref.FieldNumbers { return emptyFieldNumbers }
+func (m PlaceholderMessage) ExtensionRanges() pref.FieldRanges { return emptyFieldRanges }
+func (m PlaceholderMessage) ExtensionRangeOptions(int) pref.ProtoMessage { panic("index out of range") }
+func (m PlaceholderMessage) Messages() pref.MessageDescriptors { return emptyMessages }
+func (m PlaceholderMessage) Enums() pref.EnumDescriptors { return emptyEnums }
+func (m PlaceholderMessage) Extensions() pref.ExtensionDescriptors { return emptyExtensions }
+func (m PlaceholderMessage) ProtoType(pref.MessageDescriptor) { return }
+func (m PlaceholderMessage) ProtoInternal(pragma.DoNotImplement) { return }
diff --git a/vendor/google.golang.org/protobuf/internal/filetype/build.go b/vendor/google.golang.org/protobuf/internal/filetype/build.go
new file mode 100644
index 000000000..0a0dd35de
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/filetype/build.go
@@ -0,0 +1,297 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package filetype provides functionality for wrapping descriptors
+// with Go type information.
+package filetype
+
+import (
+ "reflect"
+
+ "google.golang.org/protobuf/internal/descopts"
+ fdesc "google.golang.org/protobuf/internal/filedesc"
+ pimpl "google.golang.org/protobuf/internal/impl"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+ preg "google.golang.org/protobuf/reflect/protoregistry"
+)
+
+// Builder constructs type descriptors from a raw file descriptor
+// and associated Go types for each enum and message declaration.
+//
+//
+// Flattened Ordering
+//
+// The protobuf type system represents declarations as a tree. Certain nodes in
+// the tree require us to either associate it with a concrete Go type or to
+// resolve a dependency, which is information that must be provided separately
+// since it cannot be derived from the file descriptor alone.
+//
+// However, representing a tree as Go literals is difficult to simply do in a
+// space and time efficient way. Thus, we store them as a flattened list of
+// objects where the serialization order from the tree-based form is important.
+//
+// The "flattened ordering" is defined as a tree traversal of all enum, message,
+// extension, and service declarations using the following algorithm:
+//
+// def VisitFileDecls(fd):
+// for e in fd.Enums: yield e
+// for m in fd.Messages: yield m
+// for x in fd.Extensions: yield x
+// for s in fd.Services: yield s
+// for m in fd.Messages: yield from VisitMessageDecls(m)
+//
+// def VisitMessageDecls(md):
+// for e in md.Enums: yield e
+// for m in md.Messages: yield m
+// for x in md.Extensions: yield x
+// for m in md.Messages: yield from VisitMessageDecls(m)
+//
+// The traversal starts at the root file descriptor and yields each direct
+// declaration within each node before traversing into sub-declarations
+// that children themselves may have.
+type Builder struct {
+ // File is the underlying file descriptor builder.
+ File fdesc.Builder
+
+ // GoTypes is a unique set of the Go types for all declarations and
+ // dependencies. Each type is represented as a zero value of the Go type.
+ //
+ // Declarations are Go types generated for enums and messages directly
+ // declared (not publicly imported) in the proto source file.
+ // Messages for map entries are accounted for, but represented by nil.
+ // Enum declarations in "flattened ordering" come first, followed by
+ // message declarations in "flattened ordering".
+ //
+ // Dependencies are Go types for enums or messages referenced by
+ // message fields (excluding weak fields), for parent extended messages of
+ // extension fields, for enums or messages referenced by extension fields,
+ // and for input and output messages referenced by service methods.
+ // Dependencies must come after declarations, but the ordering of
+ // dependencies themselves is unspecified.
+ GoTypes []interface{}
+
+ // DependencyIndexes is an ordered list of indexes into GoTypes for the
+ // dependencies of messages, extensions, or services.
+ //
+ // There are 5 sub-lists in "flattened ordering" concatenated back-to-back:
+ // 0. Message field dependencies: list of the enum or message type
+ // referred to by every message field.
+ // 1. Extension field targets: list of the extended parent message of
+ // every extension.
+ // 2. Extension field dependencies: list of the enum or message type
+ // referred to by every extension field.
+ // 3. Service method inputs: list of the input message type
+ // referred to by every service method.
+ // 4. Service method outputs: list of the output message type
+ // referred to by every service method.
+ //
+ // The offset into DependencyIndexes for the start of each sub-list
+ // is appended to the end in reverse order.
+ DependencyIndexes []int32
+
+ // EnumInfos is a list of enum infos in "flattened ordering".
+ EnumInfos []pimpl.EnumInfo
+
+ // MessageInfos is a list of message infos in "flattened ordering".
+ // If provided, the GoType and PBType for each element is populated.
+ //
+ // Requirement: len(MessageInfos) == len(Build.Messages)
+ MessageInfos []pimpl.MessageInfo
+
+ // ExtensionInfos is a list of extension infos in "flattened ordering".
+ // Each element is initialized and registered with the protoregistry package.
+ //
+ // Requirement: len(LegacyExtensions) == len(Build.Extensions)
+ ExtensionInfos []pimpl.ExtensionInfo
+
+ // TypeRegistry is the registry to register each type descriptor.
+ // If nil, it uses protoregistry.GlobalTypes.
+ TypeRegistry interface {
+ RegisterMessage(pref.MessageType) error
+ RegisterEnum(pref.EnumType) error
+ RegisterExtension(pref.ExtensionType) error
+ }
+}
+
+// Out is the output of the builder.
+type Out struct {
+ File pref.FileDescriptor
+}
+
+func (tb Builder) Build() (out Out) {
+ // Replace the resolver with one that resolves dependencies by index,
+ // which is faster and more reliable than relying on the global registry.
+ if tb.File.FileRegistry == nil {
+ tb.File.FileRegistry = preg.GlobalFiles
+ }
+ tb.File.FileRegistry = &resolverByIndex{
+ goTypes: tb.GoTypes,
+ depIdxs: tb.DependencyIndexes,
+ fileRegistry: tb.File.FileRegistry,
+ }
+
+ // Initialize registry if unpopulated.
+ if tb.TypeRegistry == nil {
+ tb.TypeRegistry = preg.GlobalTypes
+ }
+
+ fbOut := tb.File.Build()
+ out.File = fbOut.File
+
+ // Process enums.
+ enumGoTypes := tb.GoTypes[:len(fbOut.Enums)]
+ if len(tb.EnumInfos) != len(fbOut.Enums) {
+ panic("mismatching enum lengths")
+ }
+ if len(fbOut.Enums) > 0 {
+ for i := range fbOut.Enums {
+ tb.EnumInfos[i] = pimpl.EnumInfo{
+ GoReflectType: reflect.TypeOf(enumGoTypes[i]),
+ Desc: &fbOut.Enums[i],
+ }
+ // Register enum types.
+ if err := tb.TypeRegistry.RegisterEnum(&tb.EnumInfos[i]); err != nil {
+ panic(err)
+ }
+ }
+ }
+
+ // Process messages.
+ messageGoTypes := tb.GoTypes[len(fbOut.Enums):][:len(fbOut.Messages)]
+ if len(tb.MessageInfos) != len(fbOut.Messages) {
+ panic("mismatching message lengths")
+ }
+ if len(fbOut.Messages) > 0 {
+ for i := range fbOut.Messages {
+ if messageGoTypes[i] == nil {
+ continue // skip map entry
+ }
+
+ tb.MessageInfos[i].GoReflectType = reflect.TypeOf(messageGoTypes[i])
+ tb.MessageInfos[i].Desc = &fbOut.Messages[i]
+
+ // Register message types.
+ if err := tb.TypeRegistry.RegisterMessage(&tb.MessageInfos[i]); err != nil {
+ panic(err)
+ }
+ }
+
+ // As a special-case for descriptor.proto,
+ // locally register concrete message type for the options.
+ if out.File.Path() == "google/protobuf/descriptor.proto" && out.File.Package() == "google.protobuf" {
+ for i := range fbOut.Messages {
+ switch fbOut.Messages[i].Name() {
+ case "FileOptions":
+ descopts.File = messageGoTypes[i].(pref.ProtoMessage)
+ case "EnumOptions":
+ descopts.Enum = messageGoTypes[i].(pref.ProtoMessage)
+ case "EnumValueOptions":
+ descopts.EnumValue = messageGoTypes[i].(pref.ProtoMessage)
+ case "MessageOptions":
+ descopts.Message = messageGoTypes[i].(pref.ProtoMessage)
+ case "FieldOptions":
+ descopts.Field = messageGoTypes[i].(pref.ProtoMessage)
+ case "OneofOptions":
+ descopts.Oneof = messageGoTypes[i].(pref.ProtoMessage)
+ case "ExtensionRangeOptions":
+ descopts.ExtensionRange = messageGoTypes[i].(pref.ProtoMessage)
+ case "ServiceOptions":
+ descopts.Service = messageGoTypes[i].(pref.ProtoMessage)
+ case "MethodOptions":
+ descopts.Method = messageGoTypes[i].(pref.ProtoMessage)
+ }
+ }
+ }
+ }
+
+ // Process extensions.
+ if len(tb.ExtensionInfos) != len(fbOut.Extensions) {
+ panic("mismatching extension lengths")
+ }
+ var depIdx int32
+ for i := range fbOut.Extensions {
+ // For enum and message kinds, determine the referent Go type so
+ // that we can construct their constructors.
+ const listExtDeps = 2
+ var goType reflect.Type
+ switch fbOut.Extensions[i].L1.Kind {
+ case pref.EnumKind:
+ j := depIdxs.Get(tb.DependencyIndexes, listExtDeps, depIdx)
+ goType = reflect.TypeOf(tb.GoTypes[j])
+ depIdx++
+ case pref.MessageKind, pref.GroupKind:
+ j := depIdxs.Get(tb.DependencyIndexes, listExtDeps, depIdx)
+ goType = reflect.TypeOf(tb.GoTypes[j])
+ depIdx++
+ default:
+ goType = goTypeForPBKind[fbOut.Extensions[i].L1.Kind]
+ }
+ if fbOut.Extensions[i].IsList() {
+ goType = reflect.SliceOf(goType)
+ }
+
+ pimpl.InitExtensionInfo(&tb.ExtensionInfos[i], &fbOut.Extensions[i], goType)
+
+ // Register extension types.
+ if err := tb.TypeRegistry.RegisterExtension(&tb.ExtensionInfos[i]); err != nil {
+ panic(err)
+ }
+ }
+
+ return out
+}
+
+var goTypeForPBKind = map[pref.Kind]reflect.Type{
+ pref.BoolKind: reflect.TypeOf(bool(false)),
+ pref.Int32Kind: reflect.TypeOf(int32(0)),
+ pref.Sint32Kind: reflect.TypeOf(int32(0)),
+ pref.Sfixed32Kind: reflect.TypeOf(int32(0)),
+ pref.Int64Kind: reflect.TypeOf(int64(0)),
+ pref.Sint64Kind: reflect.TypeOf(int64(0)),
+ pref.Sfixed64Kind: reflect.TypeOf(int64(0)),
+ pref.Uint32Kind: reflect.TypeOf(uint32(0)),
+ pref.Fixed32Kind: reflect.TypeOf(uint32(0)),
+ pref.Uint64Kind: reflect.TypeOf(uint64(0)),
+ pref.Fixed64Kind: reflect.TypeOf(uint64(0)),
+ pref.FloatKind: reflect.TypeOf(float32(0)),
+ pref.DoubleKind: reflect.TypeOf(float64(0)),
+ pref.StringKind: reflect.TypeOf(string("")),
+ pref.BytesKind: reflect.TypeOf([]byte(nil)),
+}
+
+type depIdxs []int32
+
+// Get retrieves the jth element of the ith sub-list.
+func (x depIdxs) Get(i, j int32) int32 {
+ return x[x[int32(len(x))-i-1]+j]
+}
+
+type (
+ resolverByIndex struct {
+ goTypes []interface{}
+ depIdxs depIdxs
+ fileRegistry
+ }
+ fileRegistry interface {
+ FindFileByPath(string) (pref.FileDescriptor, error)
+ FindDescriptorByName(pref.FullName) (pref.Descriptor, error)
+ RegisterFile(pref.FileDescriptor) error
+ }
+)
+
+func (r *resolverByIndex) FindEnumByIndex(i, j int32, es []fdesc.Enum, ms []fdesc.Message) pref.EnumDescriptor {
+ if depIdx := int(r.depIdxs.Get(i, j)); int(depIdx) < len(es)+len(ms) {
+ return &es[depIdx]
+ } else {
+ return pimpl.Export{}.EnumDescriptorOf(r.goTypes[depIdx])
+ }
+}
+
+func (r *resolverByIndex) FindMessageByIndex(i, j int32, es []fdesc.Enum, ms []fdesc.Message) pref.MessageDescriptor {
+ if depIdx := int(r.depIdxs.Get(i, j)); depIdx < len(es)+len(ms) {
+ return &ms[depIdx-len(es)]
+ } else {
+ return pimpl.Export{}.MessageDescriptorOf(r.goTypes[depIdx])
+ }
+}
diff --git a/vendor/google.golang.org/protobuf/internal/flags/flags.go b/vendor/google.golang.org/protobuf/internal/flags/flags.go
new file mode 100644
index 000000000..58372dd34
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/flags/flags.go
@@ -0,0 +1,24 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package flags provides a set of flags controlled by build tags.
+package flags
+
+// ProtoLegacy specifies whether to enable support for legacy functionality
+// such as MessageSets, weak fields, and various other obscure behavior
+// that is necessary to maintain backwards compatibility with proto1 or
+// the pre-release variants of proto2 and proto3.
+//
+// This is disabled by default unless built with the "protolegacy" tag.
+//
+// WARNING: The compatibility agreement covers nothing provided by this flag.
+// As such, functionality may suddenly be removed or changed at our discretion.
+const ProtoLegacy = protoLegacy
+
+// LazyUnmarshalExtensions specifies whether to lazily unmarshal extensions.
+//
+// Lazy extension unmarshaling validates the contents of message-valued
+// extension fields at unmarshal time, but defers creating the message
+// structure until the extension is first accessed.
+const LazyUnmarshalExtensions = ProtoLegacy
diff --git a/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go
new file mode 100644
index 000000000..a72995f02
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go
@@ -0,0 +1,9 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !protolegacy
+
+package flags
+
+const protoLegacy = false
diff --git a/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go
new file mode 100644
index 000000000..772e2f0e4
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go
@@ -0,0 +1,9 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build protolegacy
+
+package flags
+
+const protoLegacy = true
diff --git a/vendor/google.golang.org/protobuf/internal/genname/name.go b/vendor/google.golang.org/protobuf/internal/genname/name.go
new file mode 100644
index 000000000..f45509fbd
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/genname/name.go
@@ -0,0 +1,25 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package genname contains constants for generated names.
+package genname
+
+const (
+ State = "state"
+
+ SizeCache = "sizeCache"
+ SizeCacheA = "XXX_sizecache"
+
+ WeakFields = "weakFields"
+ WeakFieldsA = "XXX_weak"
+
+ UnknownFields = "unknownFields"
+ UnknownFieldsA = "XXX_unrecognized"
+
+ ExtensionFields = "extensionFields"
+ ExtensionFieldsA = "XXX_InternalExtensions"
+ ExtensionFieldsB = "XXX_extensions"
+
+ WeakFieldPrefix = "XXX_weak_"
+)
diff --git a/vendor/google.golang.org/protobuf/internal/impl/api_export.go b/vendor/google.golang.org/protobuf/internal/impl/api_export.go
new file mode 100644
index 000000000..4d22c9604
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/api_export.go
@@ -0,0 +1,170 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+
+ "google.golang.org/protobuf/encoding/prototext"
+ "google.golang.org/protobuf/proto"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+ piface "google.golang.org/protobuf/runtime/protoiface"
+)
+
+// Export is a zero-length named type that exists only to export a set of
+// functions that we do not want to appear in godoc.
+type Export struct{}
+
+// enum is any enum type generated by protoc-gen-go
+// and must be a named int32 type.
+type enum = interface{}
+
+// EnumOf returns the protoreflect.Enum interface over e.
+// It returns nil if e is nil.
+func (Export) EnumOf(e enum) pref.Enum {
+ switch e := e.(type) {
+ case nil:
+ return nil
+ case pref.Enum:
+ return e
+ default:
+ return legacyWrapEnum(reflect.ValueOf(e))
+ }
+}
+
+// EnumDescriptorOf returns the protoreflect.EnumDescriptor for e.
+// It returns nil if e is nil.
+func (Export) EnumDescriptorOf(e enum) pref.EnumDescriptor {
+ switch e := e.(type) {
+ case nil:
+ return nil
+ case pref.Enum:
+ return e.Descriptor()
+ default:
+ return LegacyLoadEnumDesc(reflect.TypeOf(e))
+ }
+}
+
+// EnumTypeOf returns the protoreflect.EnumType for e.
+// It returns nil if e is nil.
+func (Export) EnumTypeOf(e enum) pref.EnumType {
+ switch e := e.(type) {
+ case nil:
+ return nil
+ case pref.Enum:
+ return e.Type()
+ default:
+ return legacyLoadEnumType(reflect.TypeOf(e))
+ }
+}
+
+// EnumStringOf returns the enum value as a string, either as the name if
+// the number is resolvable, or the number formatted as a string.
+func (Export) EnumStringOf(ed pref.EnumDescriptor, n pref.EnumNumber) string {
+ ev := ed.Values().ByNumber(n)
+ if ev != nil {
+ return string(ev.Name())
+ }
+ return strconv.Itoa(int(n))
+}
+
+// message is any message type generated by protoc-gen-go
+// and must be a pointer to a named struct type.
+type message = interface{}
+
+// legacyMessageWrapper wraps a v2 message as a v1 message.
+type legacyMessageWrapper struct{ m pref.ProtoMessage }
+
+func (m legacyMessageWrapper) Reset() { proto.Reset(m.m) }
+func (m legacyMessageWrapper) String() string { return Export{}.MessageStringOf(m.m) }
+func (m legacyMessageWrapper) ProtoMessage() {}
+
+// ProtoMessageV1Of converts either a v1 or v2 message to a v1 message.
+// It returns nil if m is nil.
+func (Export) ProtoMessageV1Of(m message) piface.MessageV1 {
+ switch mv := m.(type) {
+ case nil:
+ return nil
+ case piface.MessageV1:
+ return mv
+ case unwrapper:
+ return Export{}.ProtoMessageV1Of(mv.protoUnwrap())
+ case pref.ProtoMessage:
+ return legacyMessageWrapper{mv}
+ default:
+ panic(fmt.Sprintf("message %T is neither a v1 or v2 Message", m))
+ }
+}
+
+func (Export) protoMessageV2Of(m message) pref.ProtoMessage {
+ switch mv := m.(type) {
+ case nil:
+ return nil
+ case pref.ProtoMessage:
+ return mv
+ case legacyMessageWrapper:
+ return mv.m
+ case piface.MessageV1:
+ return nil
+ default:
+ panic(fmt.Sprintf("message %T is neither a v1 or v2 Message", m))
+ }
+}
+
+// ProtoMessageV2Of converts either a v1 or v2 message to a v2 message.
+// It returns nil if m is nil.
+func (Export) ProtoMessageV2Of(m message) pref.ProtoMessage {
+ if m == nil {
+ return nil
+ }
+ if mv := (Export{}).protoMessageV2Of(m); mv != nil {
+ return mv
+ }
+ return legacyWrapMessage(reflect.ValueOf(m)).Interface()
+}
+
+// MessageOf returns the protoreflect.Message interface over m.
+// It returns nil if m is nil.
+func (Export) MessageOf(m message) pref.Message {
+ if m == nil {
+ return nil
+ }
+ if mv := (Export{}).protoMessageV2Of(m); mv != nil {
+ return mv.ProtoReflect()
+ }
+ return legacyWrapMessage(reflect.ValueOf(m))
+}
+
+// MessageDescriptorOf returns the protoreflect.MessageDescriptor for m.
+// It returns nil if m is nil.
+func (Export) MessageDescriptorOf(m message) pref.MessageDescriptor {
+ if m == nil {
+ return nil
+ }
+ if mv := (Export{}).protoMessageV2Of(m); mv != nil {
+ return mv.ProtoReflect().Descriptor()
+ }
+ return LegacyLoadMessageDesc(reflect.TypeOf(m))
+}
+
+// MessageTypeOf returns the protoreflect.MessageType for m.
+// It returns nil if m is nil.
+func (Export) MessageTypeOf(m message) pref.MessageType {
+ if m == nil {
+ return nil
+ }
+ if mv := (Export{}).protoMessageV2Of(m); mv != nil {
+ return mv.ProtoReflect().Type()
+ }
+ return legacyLoadMessageInfo(reflect.TypeOf(m), "")
+}
+
+// MessageStringOf returns the message value as a string,
+// which is the message serialized in the protobuf text format.
+func (Export) MessageStringOf(m pref.ProtoMessage) string {
+ return prototext.MarshalOptions{Multiline: false}.Format(m)
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go
new file mode 100644
index 000000000..b82341e57
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go
@@ -0,0 +1,141 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "sync"
+
+ "google.golang.org/protobuf/internal/errors"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+ piface "google.golang.org/protobuf/runtime/protoiface"
+)
+
+func (mi *MessageInfo) checkInitialized(in piface.CheckInitializedInput) (piface.CheckInitializedOutput, error) {
+ var p pointer
+ if ms, ok := in.Message.(*messageState); ok {
+ p = ms.pointer()
+ } else {
+ p = in.Message.(*messageReflectWrapper).pointer()
+ }
+ return piface.CheckInitializedOutput{}, mi.checkInitializedPointer(p)
+}
+
+func (mi *MessageInfo) checkInitializedPointer(p pointer) error {
+ mi.init()
+ if !mi.needsInitCheck {
+ return nil
+ }
+ if p.IsNil() {
+ for _, f := range mi.orderedCoderFields {
+ if f.isRequired {
+ return errors.RequiredNotSet(string(mi.Desc.Fields().ByNumber(f.num).FullName()))
+ }
+ }
+ return nil
+ }
+ if mi.extensionOffset.IsValid() {
+ e := p.Apply(mi.extensionOffset).Extensions()
+ if err := mi.isInitExtensions(e); err != nil {
+ return err
+ }
+ }
+ for _, f := range mi.orderedCoderFields {
+ if !f.isRequired && f.funcs.isInit == nil {
+ continue
+ }
+ fptr := p.Apply(f.offset)
+ if f.isPointer && fptr.Elem().IsNil() {
+ if f.isRequired {
+ return errors.RequiredNotSet(string(mi.Desc.Fields().ByNumber(f.num).FullName()))
+ }
+ continue
+ }
+ if f.funcs.isInit == nil {
+ continue
+ }
+ if err := f.funcs.isInit(fptr, f); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (mi *MessageInfo) isInitExtensions(ext *map[int32]ExtensionField) error {
+ if ext == nil {
+ return nil
+ }
+ for _, x := range *ext {
+ ei := getExtensionFieldInfo(x.Type())
+ if ei.funcs.isInit == nil {
+ continue
+ }
+ v := x.Value()
+ if !v.IsValid() {
+ continue
+ }
+ if err := ei.funcs.isInit(v); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+var (
+ needsInitCheckMu sync.Mutex
+ needsInitCheckMap sync.Map
+)
+
+// needsInitCheck reports whether a message needs to be checked for partial initialization.
+//
+// It returns true if the message transitively includes any required or extension fields.
+func needsInitCheck(md pref.MessageDescriptor) bool {
+ if v, ok := needsInitCheckMap.Load(md); ok {
+ if has, ok := v.(bool); ok {
+ return has
+ }
+ }
+ needsInitCheckMu.Lock()
+ defer needsInitCheckMu.Unlock()
+ return needsInitCheckLocked(md)
+}
+
+func needsInitCheckLocked(md pref.MessageDescriptor) (has bool) {
+ if v, ok := needsInitCheckMap.Load(md); ok {
+ // If has is true, we've previously determined that this message
+ // needs init checks.
+ //
+ // If has is false, we've previously determined that it can never
+ // be uninitialized.
+ //
+ // If has is not a bool, we've just encountered a cycle in the
+ // message graph. In this case, it is safe to return false: If
+ // the message does have required fields, we'll detect them later
+ // in the graph traversal.
+ has, ok := v.(bool)
+ return ok && has
+ }
+ needsInitCheckMap.Store(md, struct{}{}) // avoid cycles while descending into this message
+ defer func() {
+ needsInitCheckMap.Store(md, has)
+ }()
+ if md.RequiredNumbers().Len() > 0 {
+ return true
+ }
+ if md.ExtensionRanges().Len() > 0 {
+ return true
+ }
+ for i := 0; i < md.Fields().Len(); i++ {
+ fd := md.Fields().Get(i)
+ // Map keys are never messages, so just consider the map value.
+ if fd.IsMap() {
+ fd = fd.MapValue()
+ }
+ fmd := fd.Message()
+ if fmd != nil && needsInitCheckLocked(fmd) {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go
new file mode 100644
index 000000000..08d35170b
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go
@@ -0,0 +1,223 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "sync"
+ "sync/atomic"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/errors"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+type extensionFieldInfo struct {
+ wiretag uint64
+ tagsize int
+ unmarshalNeedsValue bool
+ funcs valueCoderFuncs
+ validation validationInfo
+}
+
+var legacyExtensionFieldInfoCache sync.Map // map[protoreflect.ExtensionType]*extensionFieldInfo
+
+func getExtensionFieldInfo(xt pref.ExtensionType) *extensionFieldInfo {
+ if xi, ok := xt.(*ExtensionInfo); ok {
+ xi.lazyInit()
+ return xi.info
+ }
+ return legacyLoadExtensionFieldInfo(xt)
+}
+
+// legacyLoadExtensionFieldInfo dynamically loads a *ExtensionInfo for xt.
+func legacyLoadExtensionFieldInfo(xt pref.ExtensionType) *extensionFieldInfo {
+ if xi, ok := legacyExtensionFieldInfoCache.Load(xt); ok {
+ return xi.(*extensionFieldInfo)
+ }
+ e := makeExtensionFieldInfo(xt.TypeDescriptor())
+ if e, ok := legacyMessageTypeCache.LoadOrStore(xt, e); ok {
+ return e.(*extensionFieldInfo)
+ }
+ return e
+}
+
+func makeExtensionFieldInfo(xd pref.ExtensionDescriptor) *extensionFieldInfo {
+ var wiretag uint64
+ if !xd.IsPacked() {
+ wiretag = protowire.EncodeTag(xd.Number(), wireTypes[xd.Kind()])
+ } else {
+ wiretag = protowire.EncodeTag(xd.Number(), protowire.BytesType)
+ }
+ e := &extensionFieldInfo{
+ wiretag: wiretag,
+ tagsize: protowire.SizeVarint(wiretag),
+ funcs: encoderFuncsForValue(xd),
+ }
+ // Does the unmarshal function need a value passed to it?
+ // This is true for composite types, where we pass in a message, list, or map to fill in,
+ // and for enums, where we pass in a prototype value to specify the concrete enum type.
+ switch xd.Kind() {
+ case pref.MessageKind, pref.GroupKind, pref.EnumKind:
+ e.unmarshalNeedsValue = true
+ default:
+ if xd.Cardinality() == pref.Repeated {
+ e.unmarshalNeedsValue = true
+ }
+ }
+ return e
+}
+
+type lazyExtensionValue struct {
+ atomicOnce uint32 // atomically set if value is valid
+ mu sync.Mutex
+ xi *extensionFieldInfo
+ value pref.Value
+ b []byte
+ fn func() pref.Value
+}
+
+type ExtensionField struct {
+ typ pref.ExtensionType
+
+ // value is either the value of GetValue,
+ // or a *lazyExtensionValue that then returns the value of GetValue.
+ value pref.Value
+ lazy *lazyExtensionValue
+}
+
+func (f *ExtensionField) appendLazyBytes(xt pref.ExtensionType, xi *extensionFieldInfo, num protowire.Number, wtyp protowire.Type, b []byte) {
+ if f.lazy == nil {
+ f.lazy = &lazyExtensionValue{xi: xi}
+ }
+ f.typ = xt
+ f.lazy.xi = xi
+ f.lazy.b = protowire.AppendTag(f.lazy.b, num, wtyp)
+ f.lazy.b = append(f.lazy.b, b...)
+}
+
+func (f *ExtensionField) canLazy(xt pref.ExtensionType) bool {
+ if f.typ == nil {
+ return true
+ }
+ if f.typ == xt && f.lazy != nil && atomic.LoadUint32(&f.lazy.atomicOnce) == 0 {
+ return true
+ }
+ return false
+}
+
+func (f *ExtensionField) lazyInit() {
+ f.lazy.mu.Lock()
+ defer f.lazy.mu.Unlock()
+ if atomic.LoadUint32(&f.lazy.atomicOnce) == 1 {
+ return
+ }
+ if f.lazy.xi != nil {
+ b := f.lazy.b
+ val := f.typ.New()
+ for len(b) > 0 {
+ var tag uint64
+ if b[0] < 0x80 {
+ tag = uint64(b[0])
+ b = b[1:]
+ } else if len(b) >= 2 && b[1] < 128 {
+ tag = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ b = b[2:]
+ } else {
+ var n int
+ tag, n = protowire.ConsumeVarint(b)
+ if n < 0 {
+ panic(errors.New("bad tag in lazy extension decoding"))
+ }
+ b = b[n:]
+ }
+ num := protowire.Number(tag >> 3)
+ wtyp := protowire.Type(tag & 7)
+ var out unmarshalOutput
+ var err error
+ val, out, err = f.lazy.xi.funcs.unmarshal(b, val, num, wtyp, lazyUnmarshalOptions)
+ if err != nil {
+ panic(errors.New("decode failure in lazy extension decoding: %v", err))
+ }
+ b = b[out.n:]
+ }
+ f.lazy.value = val
+ } else {
+ f.lazy.value = f.lazy.fn()
+ }
+ f.lazy.xi = nil
+ f.lazy.fn = nil
+ f.lazy.b = nil
+ atomic.StoreUint32(&f.lazy.atomicOnce, 1)
+}
+
+// Set sets the type and value of the extension field.
+// This must not be called concurrently.
+func (f *ExtensionField) Set(t pref.ExtensionType, v pref.Value) {
+ f.typ = t
+ f.value = v
+ f.lazy = nil
+}
+
+// SetLazy sets the type and a value that is to be lazily evaluated upon first use.
+// This must not be called concurrently.
+func (f *ExtensionField) SetLazy(t pref.ExtensionType, fn func() pref.Value) {
+ f.typ = t
+ f.lazy = &lazyExtensionValue{fn: fn}
+}
+
+// Value returns the value of the extension field.
+// This may be called concurrently.
+func (f *ExtensionField) Value() pref.Value {
+ if f.lazy != nil {
+ if atomic.LoadUint32(&f.lazy.atomicOnce) == 0 {
+ f.lazyInit()
+ }
+ return f.lazy.value
+ }
+ return f.value
+}
+
+// Type returns the type of the extension field.
+// This may be called concurrently.
+func (f ExtensionField) Type() pref.ExtensionType {
+ return f.typ
+}
+
+// IsSet returns whether the extension field is set.
+// This may be called concurrently.
+func (f ExtensionField) IsSet() bool {
+ return f.typ != nil
+}
+
+// IsLazy reports whether a field is lazily encoded.
+// It is exported for testing.
+func IsLazy(m pref.Message, fd pref.FieldDescriptor) bool {
+ var mi *MessageInfo
+ var p pointer
+ switch m := m.(type) {
+ case *messageState:
+ mi = m.messageInfo()
+ p = m.pointer()
+ case *messageReflectWrapper:
+ mi = m.messageInfo()
+ p = m.pointer()
+ default:
+ return false
+ }
+ xd, ok := fd.(pref.ExtensionTypeDescriptor)
+ if !ok {
+ return false
+ }
+ xt := xd.Type()
+ ext := mi.extensionMap(p)
+ if ext == nil {
+ return false
+ }
+ f, ok := (*ext)[int32(fd.Number())]
+ if !ok {
+ return false
+ }
+ return f.typ == xt && f.lazy != nil && atomic.LoadUint32(&f.lazy.atomicOnce) == 0
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go
new file mode 100644
index 000000000..c00744d38
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go
@@ -0,0 +1,828 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "fmt"
+ "reflect"
+ "sync"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/proto"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+ preg "google.golang.org/protobuf/reflect/protoregistry"
+ piface "google.golang.org/protobuf/runtime/protoiface"
+)
+
+type errInvalidUTF8 struct{}
+
+func (errInvalidUTF8) Error() string { return "string field contains invalid UTF-8" }
+func (errInvalidUTF8) InvalidUTF8() bool { return true }
+
+// initOneofFieldCoders initializes the fast-path functions for the fields in a oneof.
+//
+// For size, marshal, and isInit operations, functions are set only on the first field
+// in the oneof. The functions are called when the oneof is non-nil, and will dispatch
+// to the appropriate field-specific function as necessary.
+//
+// The unmarshal function is set on each field individually as usual.
+func (mi *MessageInfo) initOneofFieldCoders(od pref.OneofDescriptor, si structInfo) {
+ fs := si.oneofsByName[od.Name()]
+ ft := fs.Type
+ oneofFields := make(map[reflect.Type]*coderFieldInfo)
+ needIsInit := false
+ fields := od.Fields()
+ for i, lim := 0, fields.Len(); i < lim; i++ {
+ fd := od.Fields().Get(i)
+ num := fd.Number()
+ // Make a copy of the original coderFieldInfo for use in unmarshaling.
+ //
+ // oneofFields[oneofType].funcs.marshal is the field-specific marshal function.
+ //
+ // mi.coderFields[num].marshal is set on only the first field in the oneof,
+ // and dispatches to the field-specific marshaler in oneofFields.
+ cf := *mi.coderFields[num]
+ ot := si.oneofWrappersByNumber[num]
+ cf.ft = ot.Field(0).Type
+ cf.mi, cf.funcs = fieldCoder(fd, cf.ft)
+ oneofFields[ot] = &cf
+ if cf.funcs.isInit != nil {
+ needIsInit = true
+ }
+ mi.coderFields[num].funcs.unmarshal = func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) {
+ var vw reflect.Value // pointer to wrapper type
+ vi := p.AsValueOf(ft).Elem() // oneof field value of interface kind
+ if !vi.IsNil() && !vi.Elem().IsNil() && vi.Elem().Elem().Type() == ot {
+ vw = vi.Elem()
+ } else {
+ vw = reflect.New(ot)
+ }
+ out, err := cf.funcs.unmarshal(b, pointerOfValue(vw).Apply(zeroOffset), wtyp, &cf, opts)
+ if err != nil {
+ return out, err
+ }
+ vi.Set(vw)
+ return out, nil
+ }
+ }
+ getInfo := func(p pointer) (pointer, *coderFieldInfo) {
+ v := p.AsValueOf(ft).Elem()
+ if v.IsNil() {
+ return pointer{}, nil
+ }
+ v = v.Elem() // interface -> *struct
+ if v.IsNil() {
+ return pointer{}, nil
+ }
+ return pointerOfValue(v).Apply(zeroOffset), oneofFields[v.Elem().Type()]
+ }
+ first := mi.coderFields[od.Fields().Get(0).Number()]
+ first.funcs.size = func(p pointer, _ *coderFieldInfo, opts marshalOptions) int {
+ p, info := getInfo(p)
+ if info == nil || info.funcs.size == nil {
+ return 0
+ }
+ return info.funcs.size(p, info, opts)
+ }
+ first.funcs.marshal = func(b []byte, p pointer, _ *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ p, info := getInfo(p)
+ if info == nil || info.funcs.marshal == nil {
+ return b, nil
+ }
+ return info.funcs.marshal(b, p, info, opts)
+ }
+ first.funcs.merge = func(dst, src pointer, _ *coderFieldInfo, opts mergeOptions) {
+ srcp, srcinfo := getInfo(src)
+ if srcinfo == nil || srcinfo.funcs.merge == nil {
+ return
+ }
+ dstp, dstinfo := getInfo(dst)
+ if dstinfo != srcinfo {
+ dst.AsValueOf(ft).Elem().Set(reflect.New(src.AsValueOf(ft).Elem().Elem().Elem().Type()))
+ dstp = pointerOfValue(dst.AsValueOf(ft).Elem().Elem()).Apply(zeroOffset)
+ }
+ srcinfo.funcs.merge(dstp, srcp, srcinfo, opts)
+ }
+ if needIsInit {
+ first.funcs.isInit = func(p pointer, _ *coderFieldInfo) error {
+ p, info := getInfo(p)
+ if info == nil || info.funcs.isInit == nil {
+ return nil
+ }
+ return info.funcs.isInit(p, info)
+ }
+ }
+}
+
+func makeWeakMessageFieldCoder(fd pref.FieldDescriptor) pointerCoderFuncs {
+ var once sync.Once
+ var messageType pref.MessageType
+ lazyInit := func() {
+ once.Do(func() {
+ messageName := fd.Message().FullName()
+ messageType, _ = preg.GlobalTypes.FindMessageByName(messageName)
+ })
+ }
+
+ return pointerCoderFuncs{
+ size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int {
+ m, ok := p.WeakFields().get(f.num)
+ if !ok {
+ return 0
+ }
+ lazyInit()
+ if messageType == nil {
+ panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName()))
+ }
+ return sizeMessage(m, f.tagsize, opts)
+ },
+ marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ m, ok := p.WeakFields().get(f.num)
+ if !ok {
+ return b, nil
+ }
+ lazyInit()
+ if messageType == nil {
+ panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName()))
+ }
+ return appendMessage(b, m, f.wiretag, opts)
+ },
+ unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) {
+ fs := p.WeakFields()
+ m, ok := fs.get(f.num)
+ if !ok {
+ lazyInit()
+ if messageType == nil {
+ return unmarshalOutput{}, errUnknown
+ }
+ m = messageType.New().Interface()
+ fs.set(f.num, m)
+ }
+ return consumeMessage(b, m, wtyp, opts)
+ },
+ isInit: func(p pointer, f *coderFieldInfo) error {
+ m, ok := p.WeakFields().get(f.num)
+ if !ok {
+ return nil
+ }
+ return proto.CheckInitialized(m)
+ },
+ merge: func(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
+ sm, ok := src.WeakFields().get(f.num)
+ if !ok {
+ return
+ }
+ dm, ok := dst.WeakFields().get(f.num)
+ if !ok {
+ lazyInit()
+ if messageType == nil {
+ panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName()))
+ }
+ dm = messageType.New().Interface()
+ dst.WeakFields().set(f.num, dm)
+ }
+ opts.Merge(dm, sm)
+ },
+ }
+}
+
+func makeMessageFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs {
+ if mi := getMessageInfo(ft); mi != nil {
+ funcs := pointerCoderFuncs{
+ size: sizeMessageInfo,
+ marshal: appendMessageInfo,
+ unmarshal: consumeMessageInfo,
+ merge: mergeMessage,
+ }
+ if needsInitCheck(mi.Desc) {
+ funcs.isInit = isInitMessageInfo
+ }
+ return funcs
+ } else {
+ return pointerCoderFuncs{
+ size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int {
+ m := asMessage(p.AsValueOf(ft).Elem())
+ return sizeMessage(m, f.tagsize, opts)
+ },
+ marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ m := asMessage(p.AsValueOf(ft).Elem())
+ return appendMessage(b, m, f.wiretag, opts)
+ },
+ unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) {
+ mp := p.AsValueOf(ft).Elem()
+ if mp.IsNil() {
+ mp.Set(reflect.New(ft.Elem()))
+ }
+ return consumeMessage(b, asMessage(mp), wtyp, opts)
+ },
+ isInit: func(p pointer, f *coderFieldInfo) error {
+ m := asMessage(p.AsValueOf(ft).Elem())
+ return proto.CheckInitialized(m)
+ },
+ merge: mergeMessage,
+ }
+ }
+}
+
+func sizeMessageInfo(p pointer, f *coderFieldInfo, opts marshalOptions) int {
+ return protowire.SizeBytes(f.mi.sizePointer(p.Elem(), opts)) + f.tagsize
+}
+
+func appendMessageInfo(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, uint64(f.mi.sizePointer(p.Elem(), opts)))
+ return f.mi.marshalAppendPointer(b, p.Elem(), opts)
+}
+
+func consumeMessageInfo(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ if p.Elem().IsNil() {
+ p.SetPointer(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem())))
+ }
+ o, err := f.mi.unmarshalPointer(v, p.Elem(), 0, opts)
+ if err != nil {
+ return out, err
+ }
+ out.n = n
+ out.initialized = o.initialized
+ return out, nil
+}
+
+func isInitMessageInfo(p pointer, f *coderFieldInfo) error {
+ return f.mi.checkInitializedPointer(p.Elem())
+}
+
+func sizeMessage(m proto.Message, tagsize int, _ marshalOptions) int {
+ return protowire.SizeBytes(proto.Size(m)) + tagsize
+}
+
+func appendMessage(b []byte, m proto.Message, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendVarint(b, uint64(proto.Size(m)))
+ return opts.Options().MarshalAppend(b, m)
+}
+
+func consumeMessage(b []byte, m proto.Message, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{
+ Buf: v,
+ Message: m.ProtoReflect(),
+ })
+ if err != nil {
+ return out, err
+ }
+ out.n = n
+ out.initialized = o.Flags&piface.UnmarshalInitialized != 0
+ return out, nil
+}
+
+func sizeMessageValue(v pref.Value, tagsize int, opts marshalOptions) int {
+ m := v.Message().Interface()
+ return sizeMessage(m, tagsize, opts)
+}
+
+func appendMessageValue(b []byte, v pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ m := v.Message().Interface()
+ return appendMessage(b, m, wiretag, opts)
+}
+
+func consumeMessageValue(b []byte, v pref.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (pref.Value, unmarshalOutput, error) {
+ m := v.Message().Interface()
+ out, err := consumeMessage(b, m, wtyp, opts)
+ return v, out, err
+}
+
+func isInitMessageValue(v pref.Value) error {
+ m := v.Message().Interface()
+ return proto.CheckInitialized(m)
+}
+
+var coderMessageValue = valueCoderFuncs{
+ size: sizeMessageValue,
+ marshal: appendMessageValue,
+ unmarshal: consumeMessageValue,
+ isInit: isInitMessageValue,
+ merge: mergeMessageValue,
+}
+
+func sizeGroupValue(v pref.Value, tagsize int, opts marshalOptions) int {
+ m := v.Message().Interface()
+ return sizeGroup(m, tagsize, opts)
+}
+
+func appendGroupValue(b []byte, v pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ m := v.Message().Interface()
+ return appendGroup(b, m, wiretag, opts)
+}
+
+func consumeGroupValue(b []byte, v pref.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (pref.Value, unmarshalOutput, error) {
+ m := v.Message().Interface()
+ out, err := consumeGroup(b, m, num, wtyp, opts)
+ return v, out, err
+}
+
+var coderGroupValue = valueCoderFuncs{
+ size: sizeGroupValue,
+ marshal: appendGroupValue,
+ unmarshal: consumeGroupValue,
+ isInit: isInitMessageValue,
+ merge: mergeMessageValue,
+}
+
+func makeGroupFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs {
+ num := fd.Number()
+ if mi := getMessageInfo(ft); mi != nil {
+ funcs := pointerCoderFuncs{
+ size: sizeGroupType,
+ marshal: appendGroupType,
+ unmarshal: consumeGroupType,
+ merge: mergeMessage,
+ }
+ if needsInitCheck(mi.Desc) {
+ funcs.isInit = isInitMessageInfo
+ }
+ return funcs
+ } else {
+ return pointerCoderFuncs{
+ size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int {
+ m := asMessage(p.AsValueOf(ft).Elem())
+ return sizeGroup(m, f.tagsize, opts)
+ },
+ marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ m := asMessage(p.AsValueOf(ft).Elem())
+ return appendGroup(b, m, f.wiretag, opts)
+ },
+ unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) {
+ mp := p.AsValueOf(ft).Elem()
+ if mp.IsNil() {
+ mp.Set(reflect.New(ft.Elem()))
+ }
+ return consumeGroup(b, asMessage(mp), num, wtyp, opts)
+ },
+ isInit: func(p pointer, f *coderFieldInfo) error {
+ m := asMessage(p.AsValueOf(ft).Elem())
+ return proto.CheckInitialized(m)
+ },
+ merge: mergeMessage,
+ }
+ }
+}
+
+func sizeGroupType(p pointer, f *coderFieldInfo, opts marshalOptions) int {
+ return 2*f.tagsize + f.mi.sizePointer(p.Elem(), opts)
+}
+
+func appendGroupType(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, f.wiretag) // start group
+ b, err := f.mi.marshalAppendPointer(b, p.Elem(), opts)
+ b = protowire.AppendVarint(b, f.wiretag+1) // end group
+ return b, err
+}
+
+func consumeGroupType(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.StartGroupType {
+ return out, errUnknown
+ }
+ if p.Elem().IsNil() {
+ p.SetPointer(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem())))
+ }
+ return f.mi.unmarshalPointer(b, p.Elem(), f.num, opts)
+}
+
+func sizeGroup(m proto.Message, tagsize int, _ marshalOptions) int {
+ return 2*tagsize + proto.Size(m)
+}
+
+func appendGroup(b []byte, m proto.Message, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, wiretag) // start group
+ b, err := opts.Options().MarshalAppend(b, m)
+ b = protowire.AppendVarint(b, wiretag+1) // end group
+ return b, err
+}
+
+func consumeGroup(b []byte, m proto.Message, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.StartGroupType {
+ return out, errUnknown
+ }
+ b, n := protowire.ConsumeGroup(num, b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{
+ Buf: b,
+ Message: m.ProtoReflect(),
+ })
+ if err != nil {
+ return out, err
+ }
+ out.n = n
+ out.initialized = o.Flags&piface.UnmarshalInitialized != 0
+ return out, nil
+}
+
+func makeMessageSliceFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs {
+ if mi := getMessageInfo(ft); mi != nil {
+ funcs := pointerCoderFuncs{
+ size: sizeMessageSliceInfo,
+ marshal: appendMessageSliceInfo,
+ unmarshal: consumeMessageSliceInfo,
+ merge: mergeMessageSlice,
+ }
+ if needsInitCheck(mi.Desc) {
+ funcs.isInit = isInitMessageSliceInfo
+ }
+ return funcs
+ }
+ return pointerCoderFuncs{
+ size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int {
+ return sizeMessageSlice(p, ft, f.tagsize, opts)
+ },
+ marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ return appendMessageSlice(b, p, f.wiretag, ft, opts)
+ },
+ unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) {
+ return consumeMessageSlice(b, p, ft, wtyp, opts)
+ },
+ isInit: func(p pointer, f *coderFieldInfo) error {
+ return isInitMessageSlice(p, ft)
+ },
+ merge: mergeMessageSlice,
+ }
+}
+
+func sizeMessageSliceInfo(p pointer, f *coderFieldInfo, opts marshalOptions) int {
+ s := p.PointerSlice()
+ n := 0
+ for _, v := range s {
+ n += protowire.SizeBytes(f.mi.sizePointer(v, opts)) + f.tagsize
+ }
+ return n
+}
+
+func appendMessageSliceInfo(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ s := p.PointerSlice()
+ var err error
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag)
+ siz := f.mi.sizePointer(v, opts)
+ b = protowire.AppendVarint(b, uint64(siz))
+ b, err = f.mi.marshalAppendPointer(b, v, opts)
+ if err != nil {
+ return b, err
+ }
+ }
+ return b, nil
+}
+
+func consumeMessageSliceInfo(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ m := reflect.New(f.mi.GoReflectType.Elem()).Interface()
+ mp := pointerOfIface(m)
+ o, err := f.mi.unmarshalPointer(v, mp, 0, opts)
+ if err != nil {
+ return out, err
+ }
+ p.AppendPointerSlice(mp)
+ out.n = n
+ out.initialized = o.initialized
+ return out, nil
+}
+
+func isInitMessageSliceInfo(p pointer, f *coderFieldInfo) error {
+ s := p.PointerSlice()
+ for _, v := range s {
+ if err := f.mi.checkInitializedPointer(v); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func sizeMessageSlice(p pointer, goType reflect.Type, tagsize int, _ marshalOptions) int {
+ s := p.PointerSlice()
+ n := 0
+ for _, v := range s {
+ m := asMessage(v.AsValueOf(goType.Elem()))
+ n += protowire.SizeBytes(proto.Size(m)) + tagsize
+ }
+ return n
+}
+
+func appendMessageSlice(b []byte, p pointer, wiretag uint64, goType reflect.Type, opts marshalOptions) ([]byte, error) {
+ s := p.PointerSlice()
+ var err error
+ for _, v := range s {
+ m := asMessage(v.AsValueOf(goType.Elem()))
+ b = protowire.AppendVarint(b, wiretag)
+ siz := proto.Size(m)
+ b = protowire.AppendVarint(b, uint64(siz))
+ b, err = opts.Options().MarshalAppend(b, m)
+ if err != nil {
+ return b, err
+ }
+ }
+ return b, nil
+}
+
+func consumeMessageSlice(b []byte, p pointer, goType reflect.Type, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ mp := reflect.New(goType.Elem())
+ o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{
+ Buf: v,
+ Message: asMessage(mp).ProtoReflect(),
+ })
+ if err != nil {
+ return out, err
+ }
+ p.AppendPointerSlice(pointerOfValue(mp))
+ out.n = n
+ out.initialized = o.Flags&piface.UnmarshalInitialized != 0
+ return out, nil
+}
+
+func isInitMessageSlice(p pointer, goType reflect.Type) error {
+ s := p.PointerSlice()
+ for _, v := range s {
+ m := asMessage(v.AsValueOf(goType.Elem()))
+ if err := proto.CheckInitialized(m); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Slices of messages
+
+func sizeMessageSliceValue(listv pref.Value, tagsize int, opts marshalOptions) int {
+ list := listv.List()
+ n := 0
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ m := list.Get(i).Message().Interface()
+ n += protowire.SizeBytes(proto.Size(m)) + tagsize
+ }
+ return n
+}
+
+func appendMessageSliceValue(b []byte, listv pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ list := listv.List()
+ mopts := opts.Options()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ m := list.Get(i).Message().Interface()
+ b = protowire.AppendVarint(b, wiretag)
+ siz := proto.Size(m)
+ b = protowire.AppendVarint(b, uint64(siz))
+ var err error
+ b, err = mopts.MarshalAppend(b, m)
+ if err != nil {
+ return b, err
+ }
+ }
+ return b, nil
+}
+
+func consumeMessageSliceValue(b []byte, listv pref.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ pref.Value, out unmarshalOutput, err error) {
+ list := listv.List()
+ if wtyp != protowire.BytesType {
+ return pref.Value{}, out, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return pref.Value{}, out, protowire.ParseError(n)
+ }
+ m := list.NewElement()
+ o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{
+ Buf: v,
+ Message: m.Message(),
+ })
+ if err != nil {
+ return pref.Value{}, out, err
+ }
+ list.Append(m)
+ out.n = n
+ out.initialized = o.Flags&piface.UnmarshalInitialized != 0
+ return listv, out, nil
+}
+
+func isInitMessageSliceValue(listv pref.Value) error {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ m := list.Get(i).Message().Interface()
+ if err := proto.CheckInitialized(m); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+var coderMessageSliceValue = valueCoderFuncs{
+ size: sizeMessageSliceValue,
+ marshal: appendMessageSliceValue,
+ unmarshal: consumeMessageSliceValue,
+ isInit: isInitMessageSliceValue,
+ merge: mergeMessageListValue,
+}
+
+func sizeGroupSliceValue(listv pref.Value, tagsize int, opts marshalOptions) int {
+ list := listv.List()
+ n := 0
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ m := list.Get(i).Message().Interface()
+ n += 2*tagsize + proto.Size(m)
+ }
+ return n
+}
+
+func appendGroupSliceValue(b []byte, listv pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ list := listv.List()
+ mopts := opts.Options()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ m := list.Get(i).Message().Interface()
+ b = protowire.AppendVarint(b, wiretag) // start group
+ var err error
+ b, err = mopts.MarshalAppend(b, m)
+ if err != nil {
+ return b, err
+ }
+ b = protowire.AppendVarint(b, wiretag+1) // end group
+ }
+ return b, nil
+}
+
+func consumeGroupSliceValue(b []byte, listv pref.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ pref.Value, out unmarshalOutput, err error) {
+ list := listv.List()
+ if wtyp != protowire.StartGroupType {
+ return pref.Value{}, out, errUnknown
+ }
+ b, n := protowire.ConsumeGroup(num, b)
+ if n < 0 {
+ return pref.Value{}, out, protowire.ParseError(n)
+ }
+ m := list.NewElement()
+ o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{
+ Buf: b,
+ Message: m.Message(),
+ })
+ if err != nil {
+ return pref.Value{}, out, err
+ }
+ list.Append(m)
+ out.n = n
+ out.initialized = o.Flags&piface.UnmarshalInitialized != 0
+ return listv, out, nil
+}
+
+var coderGroupSliceValue = valueCoderFuncs{
+ size: sizeGroupSliceValue,
+ marshal: appendGroupSliceValue,
+ unmarshal: consumeGroupSliceValue,
+ isInit: isInitMessageSliceValue,
+ merge: mergeMessageListValue,
+}
+
+func makeGroupSliceFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs {
+ num := fd.Number()
+ if mi := getMessageInfo(ft); mi != nil {
+ funcs := pointerCoderFuncs{
+ size: sizeGroupSliceInfo,
+ marshal: appendGroupSliceInfo,
+ unmarshal: consumeGroupSliceInfo,
+ merge: mergeMessageSlice,
+ }
+ if needsInitCheck(mi.Desc) {
+ funcs.isInit = isInitMessageSliceInfo
+ }
+ return funcs
+ }
+ return pointerCoderFuncs{
+ size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int {
+ return sizeGroupSlice(p, ft, f.tagsize, opts)
+ },
+ marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ return appendGroupSlice(b, p, f.wiretag, ft, opts)
+ },
+ unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) {
+ return consumeGroupSlice(b, p, num, wtyp, ft, opts)
+ },
+ isInit: func(p pointer, f *coderFieldInfo) error {
+ return isInitMessageSlice(p, ft)
+ },
+ merge: mergeMessageSlice,
+ }
+}
+
+func sizeGroupSlice(p pointer, messageType reflect.Type, tagsize int, _ marshalOptions) int {
+ s := p.PointerSlice()
+ n := 0
+ for _, v := range s {
+ m := asMessage(v.AsValueOf(messageType.Elem()))
+ n += 2*tagsize + proto.Size(m)
+ }
+ return n
+}
+
+func appendGroupSlice(b []byte, p pointer, wiretag uint64, messageType reflect.Type, opts marshalOptions) ([]byte, error) {
+ s := p.PointerSlice()
+ var err error
+ for _, v := range s {
+ m := asMessage(v.AsValueOf(messageType.Elem()))
+ b = protowire.AppendVarint(b, wiretag) // start group
+ b, err = opts.Options().MarshalAppend(b, m)
+ if err != nil {
+ return b, err
+ }
+ b = protowire.AppendVarint(b, wiretag+1) // end group
+ }
+ return b, nil
+}
+
+func consumeGroupSlice(b []byte, p pointer, num protowire.Number, wtyp protowire.Type, goType reflect.Type, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.StartGroupType {
+ return out, errUnknown
+ }
+ b, n := protowire.ConsumeGroup(num, b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ mp := reflect.New(goType.Elem())
+ o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{
+ Buf: b,
+ Message: asMessage(mp).ProtoReflect(),
+ })
+ if err != nil {
+ return out, err
+ }
+ p.AppendPointerSlice(pointerOfValue(mp))
+ out.n = n
+ out.initialized = o.Flags&piface.UnmarshalInitialized != 0
+ return out, nil
+}
+
+func sizeGroupSliceInfo(p pointer, f *coderFieldInfo, opts marshalOptions) int {
+ s := p.PointerSlice()
+ n := 0
+ for _, v := range s {
+ n += 2*f.tagsize + f.mi.sizePointer(v, opts)
+ }
+ return n
+}
+
+func appendGroupSliceInfo(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ s := p.PointerSlice()
+ var err error
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag) // start group
+ b, err = f.mi.marshalAppendPointer(b, v, opts)
+ if err != nil {
+ return b, err
+ }
+ b = protowire.AppendVarint(b, f.wiretag+1) // end group
+ }
+ return b, nil
+}
+
+func consumeGroupSliceInfo(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) {
+ if wtyp != protowire.StartGroupType {
+ return unmarshalOutput{}, errUnknown
+ }
+ m := reflect.New(f.mi.GoReflectType.Elem()).Interface()
+ mp := pointerOfIface(m)
+ out, err := f.mi.unmarshalPointer(b, mp, f.num, opts)
+ if err != nil {
+ return out, err
+ }
+ p.AppendPointerSlice(mp)
+ return out, nil
+}
+
+func asMessage(v reflect.Value) pref.ProtoMessage {
+ if m, ok := v.Interface().(pref.ProtoMessage); ok {
+ return m
+ }
+ return legacyWrapMessage(v).Interface()
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go
new file mode 100644
index 000000000..ff198d0a1
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go
@@ -0,0 +1,5637 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-types. DO NOT EDIT.
+
+package impl
+
+import (
+ "math"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// sizeBool returns the size of wire encoding a bool pointer as a Bool.
+func sizeBool(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := *p.Bool()
+ return f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v))
+}
+
+// appendBool wire encodes a bool pointer as a Bool.
+func appendBool(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Bool()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, protowire.EncodeBool(v))
+ return b, nil
+}
+
+// consumeBool wire decodes a bool pointer as a Bool.
+func consumeBool(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *p.Bool() = protowire.DecodeBool(v)
+ out.n = n
+ return out, nil
+}
+
+var coderBool = pointerCoderFuncs{
+ size: sizeBool,
+ marshal: appendBool,
+ unmarshal: consumeBool,
+ merge: mergeBool,
+}
+
+// sizeBoolNoZero returns the size of wire encoding a bool pointer as a Bool.
+// The zero value is not encoded.
+func sizeBoolNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := *p.Bool()
+ if v == false {
+ return 0
+ }
+ return f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v))
+}
+
+// appendBoolNoZero wire encodes a bool pointer as a Bool.
+// The zero value is not encoded.
+func appendBoolNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Bool()
+ if v == false {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, protowire.EncodeBool(v))
+ return b, nil
+}
+
+var coderBoolNoZero = pointerCoderFuncs{
+ size: sizeBoolNoZero,
+ marshal: appendBoolNoZero,
+ unmarshal: consumeBool,
+ merge: mergeBoolNoZero,
+}
+
+// sizeBoolPtr returns the size of wire encoding a *bool pointer as a Bool.
+// It panics if the pointer is nil.
+func sizeBoolPtr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := **p.BoolPtr()
+ return f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v))
+}
+
+// appendBoolPtr wire encodes a *bool pointer as a Bool.
+// It panics if the pointer is nil.
+func appendBoolPtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := **p.BoolPtr()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, protowire.EncodeBool(v))
+ return b, nil
+}
+
+// consumeBoolPtr wire decodes a *bool pointer as a Bool.
+func consumeBoolPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ vp := p.BoolPtr()
+ if *vp == nil {
+ *vp = new(bool)
+ }
+ **vp = protowire.DecodeBool(v)
+ out.n = n
+ return out, nil
+}
+
+var coderBoolPtr = pointerCoderFuncs{
+ size: sizeBoolPtr,
+ marshal: appendBoolPtr,
+ unmarshal: consumeBoolPtr,
+ merge: mergeBoolPtr,
+}
+
+// sizeBoolSlice returns the size of wire encoding a []bool pointer as a repeated Bool.
+func sizeBoolSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ s := *p.BoolSlice()
+ for _, v := range s {
+ size += f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v))
+ }
+ return size
+}
+
+// appendBoolSlice encodes a []bool pointer as a repeated Bool.
+func appendBoolSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.BoolSlice()
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, protowire.EncodeBool(v))
+ }
+ return b, nil
+}
+
+// consumeBoolSlice wire decodes a []bool pointer as a repeated Bool.
+func consumeBoolSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ sp := p.BoolSlice()
+ if wtyp == protowire.BytesType {
+ s := *sp
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ for len(b) > 0 {
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ s = append(s, protowire.DecodeBool(v))
+ b = b[n:]
+ }
+ *sp = s
+ out.n = n
+ return out, nil
+ }
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *sp = append(*sp, protowire.DecodeBool(v))
+ out.n = n
+ return out, nil
+}
+
+var coderBoolSlice = pointerCoderFuncs{
+ size: sizeBoolSlice,
+ marshal: appendBoolSlice,
+ unmarshal: consumeBoolSlice,
+ merge: mergeBoolSlice,
+}
+
+// sizeBoolPackedSlice returns the size of wire encoding a []bool pointer as a packed repeated Bool.
+func sizeBoolPackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ s := *p.BoolSlice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += protowire.SizeVarint(protowire.EncodeBool(v))
+ }
+ return f.tagsize + protowire.SizeBytes(n)
+}
+
+// appendBoolPackedSlice encodes a []bool pointer as a packed repeated Bool.
+func appendBoolPackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.BoolSlice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ n := 0
+ for _, v := range s {
+ n += protowire.SizeVarint(protowire.EncodeBool(v))
+ }
+ b = protowire.AppendVarint(b, uint64(n))
+ for _, v := range s {
+ b = protowire.AppendVarint(b, protowire.EncodeBool(v))
+ }
+ return b, nil
+}
+
+var coderBoolPackedSlice = pointerCoderFuncs{
+ size: sizeBoolPackedSlice,
+ marshal: appendBoolPackedSlice,
+ unmarshal: consumeBoolSlice,
+ merge: mergeBoolSlice,
+}
+
+// sizeBoolValue returns the size of wire encoding a bool value as a Bool.
+func sizeBoolValue(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+ return tagsize + protowire.SizeVarint(protowire.EncodeBool(v.Bool()))
+}
+
+// appendBoolValue encodes a bool value as a Bool.
+func appendBoolValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendVarint(b, protowire.EncodeBool(v.Bool()))
+ return b, nil
+}
+
+// consumeBoolValue decodes a bool value as a Bool.
+func consumeBoolValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ out.n = n
+ return protoreflect.ValueOfBool(protowire.DecodeBool(v)), out, nil
+}
+
+var coderBoolValue = valueCoderFuncs{
+ size: sizeBoolValue,
+ marshal: appendBoolValue,
+ unmarshal: consumeBoolValue,
+ merge: mergeScalarValue,
+}
+
+// sizeBoolSliceValue returns the size of wire encoding a []bool value as a repeated Bool.
+func sizeBoolSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ size += tagsize + protowire.SizeVarint(protowire.EncodeBool(v.Bool()))
+ }
+ return size
+}
+
+// appendBoolSliceValue encodes a []bool value as a repeated Bool.
+func appendBoolSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendVarint(b, protowire.EncodeBool(v.Bool()))
+ }
+ return b, nil
+}
+
+// consumeBoolSliceValue wire decodes a []bool value as a repeated Bool.
+func consumeBoolSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ list := listv.List()
+ if wtyp == protowire.BytesType {
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ for len(b) > 0 {
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v)))
+ b = b[n:]
+ }
+ out.n = n
+ return listv, out, nil
+ }
+ if wtyp != protowire.VarintType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v)))
+ out.n = n
+ return listv, out, nil
+}
+
+var coderBoolSliceValue = valueCoderFuncs{
+ size: sizeBoolSliceValue,
+ marshal: appendBoolSliceValue,
+ unmarshal: consumeBoolSliceValue,
+ merge: mergeListValue,
+}
+
+// sizeBoolPackedSliceValue returns the size of wire encoding a []bool value as a packed repeated Bool.
+func sizeBoolPackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return 0
+ }
+ n := 0
+ for i, llen := 0, llen; i < llen; i++ {
+ v := list.Get(i)
+ n += protowire.SizeVarint(protowire.EncodeBool(v.Bool()))
+ }
+ return tagsize + protowire.SizeBytes(n)
+}
+
+// appendBoolPackedSliceValue encodes a []bool value as a packed repeated Bool.
+func appendBoolPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, wiretag)
+ n := 0
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ n += protowire.SizeVarint(protowire.EncodeBool(v.Bool()))
+ }
+ b = protowire.AppendVarint(b, uint64(n))
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, protowire.EncodeBool(v.Bool()))
+ }
+ return b, nil
+}
+
+var coderBoolPackedSliceValue = valueCoderFuncs{
+ size: sizeBoolPackedSliceValue,
+ marshal: appendBoolPackedSliceValue,
+ unmarshal: consumeBoolSliceValue,
+ merge: mergeListValue,
+}
+
+// sizeEnumValue returns the size of wire encoding a value as a Enum.
+func sizeEnumValue(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+ return tagsize + protowire.SizeVarint(uint64(v.Enum()))
+}
+
+// appendEnumValue encodes a value as a Enum.
+func appendEnumValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendVarint(b, uint64(v.Enum()))
+ return b, nil
+}
+
+// consumeEnumValue decodes a value as a Enum.
+func consumeEnumValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ out.n = n
+ return protoreflect.ValueOfEnum(protoreflect.EnumNumber(v)), out, nil
+}
+
+var coderEnumValue = valueCoderFuncs{
+ size: sizeEnumValue,
+ marshal: appendEnumValue,
+ unmarshal: consumeEnumValue,
+ merge: mergeScalarValue,
+}
+
+// sizeEnumSliceValue returns the size of wire encoding a [] value as a repeated Enum.
+func sizeEnumSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ size += tagsize + protowire.SizeVarint(uint64(v.Enum()))
+ }
+ return size
+}
+
+// appendEnumSliceValue encodes a [] value as a repeated Enum.
+func appendEnumSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendVarint(b, uint64(v.Enum()))
+ }
+ return b, nil
+}
+
+// consumeEnumSliceValue wire decodes a [] value as a repeated Enum.
+func consumeEnumSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ list := listv.List()
+ if wtyp == protowire.BytesType {
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ for len(b) > 0 {
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v)))
+ b = b[n:]
+ }
+ out.n = n
+ return listv, out, nil
+ }
+ if wtyp != protowire.VarintType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v)))
+ out.n = n
+ return listv, out, nil
+}
+
+var coderEnumSliceValue = valueCoderFuncs{
+ size: sizeEnumSliceValue,
+ marshal: appendEnumSliceValue,
+ unmarshal: consumeEnumSliceValue,
+ merge: mergeListValue,
+}
+
+// sizeEnumPackedSliceValue returns the size of wire encoding a [] value as a packed repeated Enum.
+func sizeEnumPackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return 0
+ }
+ n := 0
+ for i, llen := 0, llen; i < llen; i++ {
+ v := list.Get(i)
+ n += protowire.SizeVarint(uint64(v.Enum()))
+ }
+ return tagsize + protowire.SizeBytes(n)
+}
+
+// appendEnumPackedSliceValue encodes a [] value as a packed repeated Enum.
+func appendEnumPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, wiretag)
+ n := 0
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ n += protowire.SizeVarint(uint64(v.Enum()))
+ }
+ b = protowire.AppendVarint(b, uint64(n))
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, uint64(v.Enum()))
+ }
+ return b, nil
+}
+
+var coderEnumPackedSliceValue = valueCoderFuncs{
+ size: sizeEnumPackedSliceValue,
+ marshal: appendEnumPackedSliceValue,
+ unmarshal: consumeEnumSliceValue,
+ merge: mergeListValue,
+}
+
+// sizeInt32 returns the size of wire encoding a int32 pointer as a Int32.
+func sizeInt32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := *p.Int32()
+ return f.tagsize + protowire.SizeVarint(uint64(v))
+}
+
+// appendInt32 wire encodes a int32 pointer as a Int32.
+func appendInt32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Int32()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, uint64(v))
+ return b, nil
+}
+
+// consumeInt32 wire decodes a int32 pointer as a Int32.
+func consumeInt32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *p.Int32() = int32(v)
+ out.n = n
+ return out, nil
+}
+
+var coderInt32 = pointerCoderFuncs{
+ size: sizeInt32,
+ marshal: appendInt32,
+ unmarshal: consumeInt32,
+ merge: mergeInt32,
+}
+
+// sizeInt32NoZero returns the size of wire encoding a int32 pointer as a Int32.
+// The zero value is not encoded.
+func sizeInt32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := *p.Int32()
+ if v == 0 {
+ return 0
+ }
+ return f.tagsize + protowire.SizeVarint(uint64(v))
+}
+
+// appendInt32NoZero wire encodes a int32 pointer as a Int32.
+// The zero value is not encoded.
+func appendInt32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Int32()
+ if v == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, uint64(v))
+ return b, nil
+}
+
+var coderInt32NoZero = pointerCoderFuncs{
+ size: sizeInt32NoZero,
+ marshal: appendInt32NoZero,
+ unmarshal: consumeInt32,
+ merge: mergeInt32NoZero,
+}
+
+// sizeInt32Ptr returns the size of wire encoding a *int32 pointer as a Int32.
+// It panics if the pointer is nil.
+func sizeInt32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := **p.Int32Ptr()
+ return f.tagsize + protowire.SizeVarint(uint64(v))
+}
+
+// appendInt32Ptr wire encodes a *int32 pointer as a Int32.
+// It panics if the pointer is nil.
+func appendInt32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := **p.Int32Ptr()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, uint64(v))
+ return b, nil
+}
+
+// consumeInt32Ptr wire decodes a *int32 pointer as a Int32.
+func consumeInt32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ vp := p.Int32Ptr()
+ if *vp == nil {
+ *vp = new(int32)
+ }
+ **vp = int32(v)
+ out.n = n
+ return out, nil
+}
+
+var coderInt32Ptr = pointerCoderFuncs{
+ size: sizeInt32Ptr,
+ marshal: appendInt32Ptr,
+ unmarshal: consumeInt32Ptr,
+ merge: mergeInt32Ptr,
+}
+
+// sizeInt32Slice returns the size of wire encoding a []int32 pointer as a repeated Int32.
+func sizeInt32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ s := *p.Int32Slice()
+ for _, v := range s {
+ size += f.tagsize + protowire.SizeVarint(uint64(v))
+ }
+ return size
+}
+
+// appendInt32Slice encodes a []int32 pointer as a repeated Int32.
+func appendInt32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.Int32Slice()
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+
+// consumeInt32Slice wire decodes a []int32 pointer as a repeated Int32.
+func consumeInt32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ sp := p.Int32Slice()
+ if wtyp == protowire.BytesType {
+ s := *sp
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ for len(b) > 0 {
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ s = append(s, int32(v))
+ b = b[n:]
+ }
+ *sp = s
+ out.n = n
+ return out, nil
+ }
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *sp = append(*sp, int32(v))
+ out.n = n
+ return out, nil
+}
+
+var coderInt32Slice = pointerCoderFuncs{
+ size: sizeInt32Slice,
+ marshal: appendInt32Slice,
+ unmarshal: consumeInt32Slice,
+ merge: mergeInt32Slice,
+}
+
+// sizeInt32PackedSlice returns the size of wire encoding a []int32 pointer as a packed repeated Int32.
+func sizeInt32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ s := *p.Int32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += protowire.SizeVarint(uint64(v))
+ }
+ return f.tagsize + protowire.SizeBytes(n)
+}
+
+// appendInt32PackedSlice encodes a []int32 pointer as a packed repeated Int32.
+func appendInt32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.Int32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ n := 0
+ for _, v := range s {
+ n += protowire.SizeVarint(uint64(v))
+ }
+ b = protowire.AppendVarint(b, uint64(n))
+ for _, v := range s {
+ b = protowire.AppendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+
+var coderInt32PackedSlice = pointerCoderFuncs{
+ size: sizeInt32PackedSlice,
+ marshal: appendInt32PackedSlice,
+ unmarshal: consumeInt32Slice,
+ merge: mergeInt32Slice,
+}
+
+// sizeInt32Value returns the size of wire encoding a int32 value as a Int32.
+func sizeInt32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+ return tagsize + protowire.SizeVarint(uint64(int32(v.Int())))
+}
+
+// appendInt32Value encodes a int32 value as a Int32.
+func appendInt32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendVarint(b, uint64(int32(v.Int())))
+ return b, nil
+}
+
+// consumeInt32Value decodes a int32 value as a Int32.
+func consumeInt32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ out.n = n
+ return protoreflect.ValueOfInt32(int32(v)), out, nil
+}
+
+var coderInt32Value = valueCoderFuncs{
+ size: sizeInt32Value,
+ marshal: appendInt32Value,
+ unmarshal: consumeInt32Value,
+ merge: mergeScalarValue,
+}
+
+// sizeInt32SliceValue returns the size of wire encoding a []int32 value as a repeated Int32.
+func sizeInt32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ size += tagsize + protowire.SizeVarint(uint64(int32(v.Int())))
+ }
+ return size
+}
+
+// appendInt32SliceValue encodes a []int32 value as a repeated Int32.
+func appendInt32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendVarint(b, uint64(int32(v.Int())))
+ }
+ return b, nil
+}
+
+// consumeInt32SliceValue wire decodes a []int32 value as a repeated Int32.
+func consumeInt32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ list := listv.List()
+ if wtyp == protowire.BytesType {
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ for len(b) > 0 {
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfInt32(int32(v)))
+ b = b[n:]
+ }
+ out.n = n
+ return listv, out, nil
+ }
+ if wtyp != protowire.VarintType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfInt32(int32(v)))
+ out.n = n
+ return listv, out, nil
+}
+
+var coderInt32SliceValue = valueCoderFuncs{
+ size: sizeInt32SliceValue,
+ marshal: appendInt32SliceValue,
+ unmarshal: consumeInt32SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeInt32PackedSliceValue returns the size of wire encoding a []int32 value as a packed repeated Int32.
+func sizeInt32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return 0
+ }
+ n := 0
+ for i, llen := 0, llen; i < llen; i++ {
+ v := list.Get(i)
+ n += protowire.SizeVarint(uint64(int32(v.Int())))
+ }
+ return tagsize + protowire.SizeBytes(n)
+}
+
+// appendInt32PackedSliceValue encodes a []int32 value as a packed repeated Int32.
+func appendInt32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, wiretag)
+ n := 0
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ n += protowire.SizeVarint(uint64(int32(v.Int())))
+ }
+ b = protowire.AppendVarint(b, uint64(n))
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, uint64(int32(v.Int())))
+ }
+ return b, nil
+}
+
+var coderInt32PackedSliceValue = valueCoderFuncs{
+ size: sizeInt32PackedSliceValue,
+ marshal: appendInt32PackedSliceValue,
+ unmarshal: consumeInt32SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeSint32 returns the size of wire encoding a int32 pointer as a Sint32.
+func sizeSint32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := *p.Int32()
+ return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v)))
+}
+
+// appendSint32 wire encodes a int32 pointer as a Sint32.
+func appendSint32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Int32()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v)))
+ return b, nil
+}
+
+// consumeSint32 wire decodes a int32 pointer as a Sint32.
+func consumeSint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *p.Int32() = int32(protowire.DecodeZigZag(v & math.MaxUint32))
+ out.n = n
+ return out, nil
+}
+
+var coderSint32 = pointerCoderFuncs{
+ size: sizeSint32,
+ marshal: appendSint32,
+ unmarshal: consumeSint32,
+ merge: mergeInt32,
+}
+
+// sizeSint32NoZero returns the size of wire encoding a int32 pointer as a Sint32.
+// The zero value is not encoded.
+func sizeSint32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := *p.Int32()
+ if v == 0 {
+ return 0
+ }
+ return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v)))
+}
+
+// appendSint32NoZero wire encodes a int32 pointer as a Sint32.
+// The zero value is not encoded.
+func appendSint32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Int32()
+ if v == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v)))
+ return b, nil
+}
+
+var coderSint32NoZero = pointerCoderFuncs{
+ size: sizeSint32NoZero,
+ marshal: appendSint32NoZero,
+ unmarshal: consumeSint32,
+ merge: mergeInt32NoZero,
+}
+
+// sizeSint32Ptr returns the size of wire encoding a *int32 pointer as a Sint32.
+// It panics if the pointer is nil.
+func sizeSint32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := **p.Int32Ptr()
+ return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v)))
+}
+
+// appendSint32Ptr wire encodes a *int32 pointer as a Sint32.
+// It panics if the pointer is nil.
+func appendSint32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := **p.Int32Ptr()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v)))
+ return b, nil
+}
+
+// consumeSint32Ptr wire decodes a *int32 pointer as a Sint32.
+func consumeSint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ vp := p.Int32Ptr()
+ if *vp == nil {
+ *vp = new(int32)
+ }
+ **vp = int32(protowire.DecodeZigZag(v & math.MaxUint32))
+ out.n = n
+ return out, nil
+}
+
+var coderSint32Ptr = pointerCoderFuncs{
+ size: sizeSint32Ptr,
+ marshal: appendSint32Ptr,
+ unmarshal: consumeSint32Ptr,
+ merge: mergeInt32Ptr,
+}
+
+// sizeSint32Slice returns the size of wire encoding a []int32 pointer as a repeated Sint32.
+func sizeSint32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ s := *p.Int32Slice()
+ for _, v := range s {
+ size += f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v)))
+ }
+ return size
+}
+
+// appendSint32Slice encodes a []int32 pointer as a repeated Sint32.
+func appendSint32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.Int32Slice()
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v)))
+ }
+ return b, nil
+}
+
+// consumeSint32Slice wire decodes a []int32 pointer as a repeated Sint32.
+func consumeSint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ sp := p.Int32Slice()
+ if wtyp == protowire.BytesType {
+ s := *sp
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ for len(b) > 0 {
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ s = append(s, int32(protowire.DecodeZigZag(v&math.MaxUint32)))
+ b = b[n:]
+ }
+ *sp = s
+ out.n = n
+ return out, nil
+ }
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *sp = append(*sp, int32(protowire.DecodeZigZag(v&math.MaxUint32)))
+ out.n = n
+ return out, nil
+}
+
+var coderSint32Slice = pointerCoderFuncs{
+ size: sizeSint32Slice,
+ marshal: appendSint32Slice,
+ unmarshal: consumeSint32Slice,
+ merge: mergeInt32Slice,
+}
+
+// sizeSint32PackedSlice returns the size of wire encoding a []int32 pointer as a packed repeated Sint32.
+func sizeSint32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ s := *p.Int32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += protowire.SizeVarint(protowire.EncodeZigZag(int64(v)))
+ }
+ return f.tagsize + protowire.SizeBytes(n)
+}
+
+// appendSint32PackedSlice encodes a []int32 pointer as a packed repeated Sint32.
+func appendSint32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.Int32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ n := 0
+ for _, v := range s {
+ n += protowire.SizeVarint(protowire.EncodeZigZag(int64(v)))
+ }
+ b = protowire.AppendVarint(b, uint64(n))
+ for _, v := range s {
+ b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v)))
+ }
+ return b, nil
+}
+
+var coderSint32PackedSlice = pointerCoderFuncs{
+ size: sizeSint32PackedSlice,
+ marshal: appendSint32PackedSlice,
+ unmarshal: consumeSint32Slice,
+ merge: mergeInt32Slice,
+}
+
+// sizeSint32Value returns the size of wire encoding a int32 value as a Sint32.
+func sizeSint32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+ return tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(int32(v.Int()))))
+}
+
+// appendSint32Value encodes a int32 value as a Sint32.
+func appendSint32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(int32(v.Int()))))
+ return b, nil
+}
+
+// consumeSint32Value decodes a int32 value as a Sint32.
+func consumeSint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ out.n = n
+ return protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32))), out, nil
+}
+
+var coderSint32Value = valueCoderFuncs{
+ size: sizeSint32Value,
+ marshal: appendSint32Value,
+ unmarshal: consumeSint32Value,
+ merge: mergeScalarValue,
+}
+
+// sizeSint32SliceValue returns the size of wire encoding a []int32 value as a repeated Sint32.
+func sizeSint32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ size += tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(int32(v.Int()))))
+ }
+ return size
+}
+
+// appendSint32SliceValue encodes a []int32 value as a repeated Sint32.
+func appendSint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(int32(v.Int()))))
+ }
+ return b, nil
+}
+
+// consumeSint32SliceValue wire decodes a []int32 value as a repeated Sint32.
+func consumeSint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ list := listv.List()
+ if wtyp == protowire.BytesType {
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ for len(b) > 0 {
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32))))
+ b = b[n:]
+ }
+ out.n = n
+ return listv, out, nil
+ }
+ if wtyp != protowire.VarintType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32))))
+ out.n = n
+ return listv, out, nil
+}
+
+var coderSint32SliceValue = valueCoderFuncs{
+ size: sizeSint32SliceValue,
+ marshal: appendSint32SliceValue,
+ unmarshal: consumeSint32SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeSint32PackedSliceValue returns the size of wire encoding a []int32 value as a packed repeated Sint32.
+func sizeSint32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return 0
+ }
+ n := 0
+ for i, llen := 0, llen; i < llen; i++ {
+ v := list.Get(i)
+ n += protowire.SizeVarint(protowire.EncodeZigZag(int64(int32(v.Int()))))
+ }
+ return tagsize + protowire.SizeBytes(n)
+}
+
+// appendSint32PackedSliceValue encodes a []int32 value as a packed repeated Sint32.
+func appendSint32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, wiretag)
+ n := 0
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ n += protowire.SizeVarint(protowire.EncodeZigZag(int64(int32(v.Int()))))
+ }
+ b = protowire.AppendVarint(b, uint64(n))
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(int32(v.Int()))))
+ }
+ return b, nil
+}
+
+var coderSint32PackedSliceValue = valueCoderFuncs{
+ size: sizeSint32PackedSliceValue,
+ marshal: appendSint32PackedSliceValue,
+ unmarshal: consumeSint32SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeUint32 returns the size of wire encoding a uint32 pointer as a Uint32.
+func sizeUint32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := *p.Uint32()
+ return f.tagsize + protowire.SizeVarint(uint64(v))
+}
+
+// appendUint32 wire encodes a uint32 pointer as a Uint32.
+func appendUint32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Uint32()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, uint64(v))
+ return b, nil
+}
+
+// consumeUint32 wire decodes a uint32 pointer as a Uint32.
+func consumeUint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *p.Uint32() = uint32(v)
+ out.n = n
+ return out, nil
+}
+
+var coderUint32 = pointerCoderFuncs{
+ size: sizeUint32,
+ marshal: appendUint32,
+ unmarshal: consumeUint32,
+ merge: mergeUint32,
+}
+
+// sizeUint32NoZero returns the size of wire encoding a uint32 pointer as a Uint32.
+// The zero value is not encoded.
+func sizeUint32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := *p.Uint32()
+ if v == 0 {
+ return 0
+ }
+ return f.tagsize + protowire.SizeVarint(uint64(v))
+}
+
+// appendUint32NoZero wire encodes a uint32 pointer as a Uint32.
+// The zero value is not encoded.
+func appendUint32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Uint32()
+ if v == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, uint64(v))
+ return b, nil
+}
+
+var coderUint32NoZero = pointerCoderFuncs{
+ size: sizeUint32NoZero,
+ marshal: appendUint32NoZero,
+ unmarshal: consumeUint32,
+ merge: mergeUint32NoZero,
+}
+
+// sizeUint32Ptr returns the size of wire encoding a *uint32 pointer as a Uint32.
+// It panics if the pointer is nil.
+func sizeUint32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := **p.Uint32Ptr()
+ return f.tagsize + protowire.SizeVarint(uint64(v))
+}
+
+// appendUint32Ptr wire encodes a *uint32 pointer as a Uint32.
+// It panics if the pointer is nil.
+func appendUint32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := **p.Uint32Ptr()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, uint64(v))
+ return b, nil
+}
+
+// consumeUint32Ptr wire decodes a *uint32 pointer as a Uint32.
+func consumeUint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ vp := p.Uint32Ptr()
+ if *vp == nil {
+ *vp = new(uint32)
+ }
+ **vp = uint32(v)
+ out.n = n
+ return out, nil
+}
+
+var coderUint32Ptr = pointerCoderFuncs{
+ size: sizeUint32Ptr,
+ marshal: appendUint32Ptr,
+ unmarshal: consumeUint32Ptr,
+ merge: mergeUint32Ptr,
+}
+
+// sizeUint32Slice returns the size of wire encoding a []uint32 pointer as a repeated Uint32.
+func sizeUint32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ s := *p.Uint32Slice()
+ for _, v := range s {
+ size += f.tagsize + protowire.SizeVarint(uint64(v))
+ }
+ return size
+}
+
+// appendUint32Slice encodes a []uint32 pointer as a repeated Uint32.
+func appendUint32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.Uint32Slice()
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+
+// consumeUint32Slice wire decodes a []uint32 pointer as a repeated Uint32.
+func consumeUint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ sp := p.Uint32Slice()
+ if wtyp == protowire.BytesType {
+ s := *sp
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ for len(b) > 0 {
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ s = append(s, uint32(v))
+ b = b[n:]
+ }
+ *sp = s
+ out.n = n
+ return out, nil
+ }
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *sp = append(*sp, uint32(v))
+ out.n = n
+ return out, nil
+}
+
+var coderUint32Slice = pointerCoderFuncs{
+ size: sizeUint32Slice,
+ marshal: appendUint32Slice,
+ unmarshal: consumeUint32Slice,
+ merge: mergeUint32Slice,
+}
+
+// sizeUint32PackedSlice returns the size of wire encoding a []uint32 pointer as a packed repeated Uint32.
+func sizeUint32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ s := *p.Uint32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += protowire.SizeVarint(uint64(v))
+ }
+ return f.tagsize + protowire.SizeBytes(n)
+}
+
+// appendUint32PackedSlice encodes a []uint32 pointer as a packed repeated Uint32.
+func appendUint32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.Uint32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ n := 0
+ for _, v := range s {
+ n += protowire.SizeVarint(uint64(v))
+ }
+ b = protowire.AppendVarint(b, uint64(n))
+ for _, v := range s {
+ b = protowire.AppendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+
+var coderUint32PackedSlice = pointerCoderFuncs{
+ size: sizeUint32PackedSlice,
+ marshal: appendUint32PackedSlice,
+ unmarshal: consumeUint32Slice,
+ merge: mergeUint32Slice,
+}
+
+// sizeUint32Value returns the size of wire encoding a uint32 value as a Uint32.
+func sizeUint32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+ return tagsize + protowire.SizeVarint(uint64(uint32(v.Uint())))
+}
+
+// appendUint32Value encodes a uint32 value as a Uint32.
+func appendUint32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendVarint(b, uint64(uint32(v.Uint())))
+ return b, nil
+}
+
+// consumeUint32Value decodes a uint32 value as a Uint32.
+func consumeUint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ out.n = n
+ return protoreflect.ValueOfUint32(uint32(v)), out, nil
+}
+
+var coderUint32Value = valueCoderFuncs{
+ size: sizeUint32Value,
+ marshal: appendUint32Value,
+ unmarshal: consumeUint32Value,
+ merge: mergeScalarValue,
+}
+
+// sizeUint32SliceValue returns the size of wire encoding a []uint32 value as a repeated Uint32.
+func sizeUint32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ size += tagsize + protowire.SizeVarint(uint64(uint32(v.Uint())))
+ }
+ return size
+}
+
+// appendUint32SliceValue encodes a []uint32 value as a repeated Uint32.
+func appendUint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendVarint(b, uint64(uint32(v.Uint())))
+ }
+ return b, nil
+}
+
+// consumeUint32SliceValue wire decodes a []uint32 value as a repeated Uint32.
+func consumeUint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ list := listv.List()
+ if wtyp == protowire.BytesType {
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ for len(b) > 0 {
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfUint32(uint32(v)))
+ b = b[n:]
+ }
+ out.n = n
+ return listv, out, nil
+ }
+ if wtyp != protowire.VarintType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfUint32(uint32(v)))
+ out.n = n
+ return listv, out, nil
+}
+
+var coderUint32SliceValue = valueCoderFuncs{
+ size: sizeUint32SliceValue,
+ marshal: appendUint32SliceValue,
+ unmarshal: consumeUint32SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeUint32PackedSliceValue returns the size of wire encoding a []uint32 value as a packed repeated Uint32.
+func sizeUint32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return 0
+ }
+ n := 0
+ for i, llen := 0, llen; i < llen; i++ {
+ v := list.Get(i)
+ n += protowire.SizeVarint(uint64(uint32(v.Uint())))
+ }
+ return tagsize + protowire.SizeBytes(n)
+}
+
+// appendUint32PackedSliceValue encodes a []uint32 value as a packed repeated Uint32.
+func appendUint32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, wiretag)
+ n := 0
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ n += protowire.SizeVarint(uint64(uint32(v.Uint())))
+ }
+ b = protowire.AppendVarint(b, uint64(n))
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, uint64(uint32(v.Uint())))
+ }
+ return b, nil
+}
+
+var coderUint32PackedSliceValue = valueCoderFuncs{
+ size: sizeUint32PackedSliceValue,
+ marshal: appendUint32PackedSliceValue,
+ unmarshal: consumeUint32SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeInt64 returns the size of wire encoding a int64 pointer as a Int64.
+func sizeInt64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := *p.Int64()
+ return f.tagsize + protowire.SizeVarint(uint64(v))
+}
+
+// appendInt64 wire encodes a int64 pointer as a Int64.
+func appendInt64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Int64()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, uint64(v))
+ return b, nil
+}
+
+// consumeInt64 wire decodes a int64 pointer as a Int64.
+func consumeInt64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *p.Int64() = int64(v)
+ out.n = n
+ return out, nil
+}
+
+var coderInt64 = pointerCoderFuncs{
+ size: sizeInt64,
+ marshal: appendInt64,
+ unmarshal: consumeInt64,
+ merge: mergeInt64,
+}
+
+// sizeInt64NoZero returns the size of wire encoding a int64 pointer as a Int64.
+// The zero value is not encoded.
+func sizeInt64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := *p.Int64()
+ if v == 0 {
+ return 0
+ }
+ return f.tagsize + protowire.SizeVarint(uint64(v))
+}
+
+// appendInt64NoZero wire encodes a int64 pointer as a Int64.
+// The zero value is not encoded.
+func appendInt64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Int64()
+ if v == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, uint64(v))
+ return b, nil
+}
+
+var coderInt64NoZero = pointerCoderFuncs{
+ size: sizeInt64NoZero,
+ marshal: appendInt64NoZero,
+ unmarshal: consumeInt64,
+ merge: mergeInt64NoZero,
+}
+
+// sizeInt64Ptr returns the size of wire encoding a *int64 pointer as a Int64.
+// It panics if the pointer is nil.
+func sizeInt64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := **p.Int64Ptr()
+ return f.tagsize + protowire.SizeVarint(uint64(v))
+}
+
+// appendInt64Ptr wire encodes a *int64 pointer as a Int64.
+// It panics if the pointer is nil.
+func appendInt64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := **p.Int64Ptr()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, uint64(v))
+ return b, nil
+}
+
+// consumeInt64Ptr wire decodes a *int64 pointer as a Int64.
+func consumeInt64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ vp := p.Int64Ptr()
+ if *vp == nil {
+ *vp = new(int64)
+ }
+ **vp = int64(v)
+ out.n = n
+ return out, nil
+}
+
+var coderInt64Ptr = pointerCoderFuncs{
+ size: sizeInt64Ptr,
+ marshal: appendInt64Ptr,
+ unmarshal: consumeInt64Ptr,
+ merge: mergeInt64Ptr,
+}
+
+// sizeInt64Slice returns the size of wire encoding a []int64 pointer as a repeated Int64.
+func sizeInt64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ s := *p.Int64Slice()
+ for _, v := range s {
+ size += f.tagsize + protowire.SizeVarint(uint64(v))
+ }
+ return size
+}
+
+// appendInt64Slice encodes a []int64 pointer as a repeated Int64.
+func appendInt64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.Int64Slice()
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+
+// consumeInt64Slice wire decodes a []int64 pointer as a repeated Int64.
+func consumeInt64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ sp := p.Int64Slice()
+ if wtyp == protowire.BytesType {
+ s := *sp
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ for len(b) > 0 {
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ s = append(s, int64(v))
+ b = b[n:]
+ }
+ *sp = s
+ out.n = n
+ return out, nil
+ }
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *sp = append(*sp, int64(v))
+ out.n = n
+ return out, nil
+}
+
+var coderInt64Slice = pointerCoderFuncs{
+ size: sizeInt64Slice,
+ marshal: appendInt64Slice,
+ unmarshal: consumeInt64Slice,
+ merge: mergeInt64Slice,
+}
+
+// sizeInt64PackedSlice returns the size of wire encoding a []int64 pointer as a packed repeated Int64.
+func sizeInt64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ s := *p.Int64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += protowire.SizeVarint(uint64(v))
+ }
+ return f.tagsize + protowire.SizeBytes(n)
+}
+
+// appendInt64PackedSlice encodes a []int64 pointer as a packed repeated Int64.
+func appendInt64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.Int64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ n := 0
+ for _, v := range s {
+ n += protowire.SizeVarint(uint64(v))
+ }
+ b = protowire.AppendVarint(b, uint64(n))
+ for _, v := range s {
+ b = protowire.AppendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+
+var coderInt64PackedSlice = pointerCoderFuncs{
+ size: sizeInt64PackedSlice,
+ marshal: appendInt64PackedSlice,
+ unmarshal: consumeInt64Slice,
+ merge: mergeInt64Slice,
+}
+
+// sizeInt64Value returns the size of wire encoding a int64 value as a Int64.
+func sizeInt64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+ return tagsize + protowire.SizeVarint(uint64(v.Int()))
+}
+
+// appendInt64Value encodes a int64 value as a Int64.
+func appendInt64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendVarint(b, uint64(v.Int()))
+ return b, nil
+}
+
+// consumeInt64Value decodes a int64 value as a Int64.
+func consumeInt64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ out.n = n
+ return protoreflect.ValueOfInt64(int64(v)), out, nil
+}
+
+var coderInt64Value = valueCoderFuncs{
+ size: sizeInt64Value,
+ marshal: appendInt64Value,
+ unmarshal: consumeInt64Value,
+ merge: mergeScalarValue,
+}
+
+// sizeInt64SliceValue returns the size of wire encoding a []int64 value as a repeated Int64.
+func sizeInt64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ size += tagsize + protowire.SizeVarint(uint64(v.Int()))
+ }
+ return size
+}
+
+// appendInt64SliceValue encodes a []int64 value as a repeated Int64.
+func appendInt64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendVarint(b, uint64(v.Int()))
+ }
+ return b, nil
+}
+
+// consumeInt64SliceValue wire decodes a []int64 value as a repeated Int64.
+func consumeInt64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ list := listv.List()
+ if wtyp == protowire.BytesType {
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ for len(b) > 0 {
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfInt64(int64(v)))
+ b = b[n:]
+ }
+ out.n = n
+ return listv, out, nil
+ }
+ if wtyp != protowire.VarintType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfInt64(int64(v)))
+ out.n = n
+ return listv, out, nil
+}
+
+var coderInt64SliceValue = valueCoderFuncs{
+ size: sizeInt64SliceValue,
+ marshal: appendInt64SliceValue,
+ unmarshal: consumeInt64SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeInt64PackedSliceValue returns the size of wire encoding a []int64 value as a packed repeated Int64.
+func sizeInt64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return 0
+ }
+ n := 0
+ for i, llen := 0, llen; i < llen; i++ {
+ v := list.Get(i)
+ n += protowire.SizeVarint(uint64(v.Int()))
+ }
+ return tagsize + protowire.SizeBytes(n)
+}
+
+// appendInt64PackedSliceValue encodes a []int64 value as a packed repeated Int64.
+func appendInt64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, wiretag)
+ n := 0
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ n += protowire.SizeVarint(uint64(v.Int()))
+ }
+ b = protowire.AppendVarint(b, uint64(n))
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, uint64(v.Int()))
+ }
+ return b, nil
+}
+
+var coderInt64PackedSliceValue = valueCoderFuncs{
+ size: sizeInt64PackedSliceValue,
+ marshal: appendInt64PackedSliceValue,
+ unmarshal: consumeInt64SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeSint64 returns the size of wire encoding a int64 pointer as a Sint64.
+func sizeSint64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := *p.Int64()
+ return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v))
+}
+
+// appendSint64 wire encodes a int64 pointer as a Sint64.
+func appendSint64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Int64()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, protowire.EncodeZigZag(v))
+ return b, nil
+}
+
+// consumeSint64 wire decodes a int64 pointer as a Sint64.
+func consumeSint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *p.Int64() = protowire.DecodeZigZag(v)
+ out.n = n
+ return out, nil
+}
+
+var coderSint64 = pointerCoderFuncs{
+ size: sizeSint64,
+ marshal: appendSint64,
+ unmarshal: consumeSint64,
+ merge: mergeInt64,
+}
+
+// sizeSint64NoZero returns the size of wire encoding a int64 pointer as a Sint64.
+// The zero value is not encoded.
+func sizeSint64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := *p.Int64()
+ if v == 0 {
+ return 0
+ }
+ return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v))
+}
+
+// appendSint64NoZero wire encodes a int64 pointer as a Sint64.
+// The zero value is not encoded.
+func appendSint64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Int64()
+ if v == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, protowire.EncodeZigZag(v))
+ return b, nil
+}
+
+var coderSint64NoZero = pointerCoderFuncs{
+ size: sizeSint64NoZero,
+ marshal: appendSint64NoZero,
+ unmarshal: consumeSint64,
+ merge: mergeInt64NoZero,
+}
+
+// sizeSint64Ptr returns the size of wire encoding a *int64 pointer as a Sint64.
+// It panics if the pointer is nil.
+func sizeSint64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := **p.Int64Ptr()
+ return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v))
+}
+
+// appendSint64Ptr wire encodes a *int64 pointer as a Sint64.
+// It panics if the pointer is nil.
+func appendSint64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := **p.Int64Ptr()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, protowire.EncodeZigZag(v))
+ return b, nil
+}
+
+// consumeSint64Ptr wire decodes a *int64 pointer as a Sint64.
+func consumeSint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ vp := p.Int64Ptr()
+ if *vp == nil {
+ *vp = new(int64)
+ }
+ **vp = protowire.DecodeZigZag(v)
+ out.n = n
+ return out, nil
+}
+
+var coderSint64Ptr = pointerCoderFuncs{
+ size: sizeSint64Ptr,
+ marshal: appendSint64Ptr,
+ unmarshal: consumeSint64Ptr,
+ merge: mergeInt64Ptr,
+}
+
+// sizeSint64Slice returns the size of wire encoding a []int64 pointer as a repeated Sint64.
+func sizeSint64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ s := *p.Int64Slice()
+ for _, v := range s {
+ size += f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v))
+ }
+ return size
+}
+
+// appendSint64Slice encodes a []int64 pointer as a repeated Sint64.
+func appendSint64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.Int64Slice()
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, protowire.EncodeZigZag(v))
+ }
+ return b, nil
+}
+
+// consumeSint64Slice wire decodes a []int64 pointer as a repeated Sint64.
+func consumeSint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ sp := p.Int64Slice()
+ if wtyp == protowire.BytesType {
+ s := *sp
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ for len(b) > 0 {
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ s = append(s, protowire.DecodeZigZag(v))
+ b = b[n:]
+ }
+ *sp = s
+ out.n = n
+ return out, nil
+ }
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *sp = append(*sp, protowire.DecodeZigZag(v))
+ out.n = n
+ return out, nil
+}
+
+var coderSint64Slice = pointerCoderFuncs{
+ size: sizeSint64Slice,
+ marshal: appendSint64Slice,
+ unmarshal: consumeSint64Slice,
+ merge: mergeInt64Slice,
+}
+
+// sizeSint64PackedSlice returns the size of wire encoding a []int64 pointer as a packed repeated Sint64.
+func sizeSint64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ s := *p.Int64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += protowire.SizeVarint(protowire.EncodeZigZag(v))
+ }
+ return f.tagsize + protowire.SizeBytes(n)
+}
+
+// appendSint64PackedSlice encodes a []int64 pointer as a packed repeated Sint64.
+func appendSint64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.Int64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ n := 0
+ for _, v := range s {
+ n += protowire.SizeVarint(protowire.EncodeZigZag(v))
+ }
+ b = protowire.AppendVarint(b, uint64(n))
+ for _, v := range s {
+ b = protowire.AppendVarint(b, protowire.EncodeZigZag(v))
+ }
+ return b, nil
+}
+
+var coderSint64PackedSlice = pointerCoderFuncs{
+ size: sizeSint64PackedSlice,
+ marshal: appendSint64PackedSlice,
+ unmarshal: consumeSint64Slice,
+ merge: mergeInt64Slice,
+}
+
+// sizeSint64Value returns the size of wire encoding a int64 value as a Sint64.
+func sizeSint64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+ return tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v.Int()))
+}
+
+// appendSint64Value encodes a int64 value as a Sint64.
+func appendSint64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendVarint(b, protowire.EncodeZigZag(v.Int()))
+ return b, nil
+}
+
+// consumeSint64Value decodes a int64 value as a Sint64.
+func consumeSint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ out.n = n
+ return protoreflect.ValueOfInt64(protowire.DecodeZigZag(v)), out, nil
+}
+
+var coderSint64Value = valueCoderFuncs{
+ size: sizeSint64Value,
+ marshal: appendSint64Value,
+ unmarshal: consumeSint64Value,
+ merge: mergeScalarValue,
+}
+
+// sizeSint64SliceValue returns the size of wire encoding a []int64 value as a repeated Sint64.
+func sizeSint64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ size += tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v.Int()))
+ }
+ return size
+}
+
+// appendSint64SliceValue encodes a []int64 value as a repeated Sint64.
+func appendSint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendVarint(b, protowire.EncodeZigZag(v.Int()))
+ }
+ return b, nil
+}
+
+// consumeSint64SliceValue wire decodes a []int64 value as a repeated Sint64.
+func consumeSint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ list := listv.List()
+ if wtyp == protowire.BytesType {
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ for len(b) > 0 {
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v)))
+ b = b[n:]
+ }
+ out.n = n
+ return listv, out, nil
+ }
+ if wtyp != protowire.VarintType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v)))
+ out.n = n
+ return listv, out, nil
+}
+
+var coderSint64SliceValue = valueCoderFuncs{
+ size: sizeSint64SliceValue,
+ marshal: appendSint64SliceValue,
+ unmarshal: consumeSint64SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeSint64PackedSliceValue returns the size of wire encoding a []int64 value as a packed repeated Sint64.
+func sizeSint64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return 0
+ }
+ n := 0
+ for i, llen := 0, llen; i < llen; i++ {
+ v := list.Get(i)
+ n += protowire.SizeVarint(protowire.EncodeZigZag(v.Int()))
+ }
+ return tagsize + protowire.SizeBytes(n)
+}
+
+// appendSint64PackedSliceValue encodes a []int64 value as a packed repeated Sint64.
+func appendSint64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, wiretag)
+ n := 0
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ n += protowire.SizeVarint(protowire.EncodeZigZag(v.Int()))
+ }
+ b = protowire.AppendVarint(b, uint64(n))
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, protowire.EncodeZigZag(v.Int()))
+ }
+ return b, nil
+}
+
+var coderSint64PackedSliceValue = valueCoderFuncs{
+ size: sizeSint64PackedSliceValue,
+ marshal: appendSint64PackedSliceValue,
+ unmarshal: consumeSint64SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeUint64 returns the size of wire encoding a uint64 pointer as a Uint64.
+func sizeUint64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := *p.Uint64()
+ return f.tagsize + protowire.SizeVarint(v)
+}
+
+// appendUint64 wire encodes a uint64 pointer as a Uint64.
+func appendUint64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Uint64()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, v)
+ return b, nil
+}
+
+// consumeUint64 wire decodes a uint64 pointer as a Uint64.
+func consumeUint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *p.Uint64() = v
+ out.n = n
+ return out, nil
+}
+
+var coderUint64 = pointerCoderFuncs{
+ size: sizeUint64,
+ marshal: appendUint64,
+ unmarshal: consumeUint64,
+ merge: mergeUint64,
+}
+
+// sizeUint64NoZero returns the size of wire encoding a uint64 pointer as a Uint64.
+// The zero value is not encoded.
+func sizeUint64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := *p.Uint64()
+ if v == 0 {
+ return 0
+ }
+ return f.tagsize + protowire.SizeVarint(v)
+}
+
+// appendUint64NoZero wire encodes a uint64 pointer as a Uint64.
+// The zero value is not encoded.
+func appendUint64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Uint64()
+ if v == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, v)
+ return b, nil
+}
+
+var coderUint64NoZero = pointerCoderFuncs{
+ size: sizeUint64NoZero,
+ marshal: appendUint64NoZero,
+ unmarshal: consumeUint64,
+ merge: mergeUint64NoZero,
+}
+
+// sizeUint64Ptr returns the size of wire encoding a *uint64 pointer as a Uint64.
+// It panics if the pointer is nil.
+func sizeUint64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := **p.Uint64Ptr()
+ return f.tagsize + protowire.SizeVarint(v)
+}
+
+// appendUint64Ptr wire encodes a *uint64 pointer as a Uint64.
+// It panics if the pointer is nil.
+func appendUint64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := **p.Uint64Ptr()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, v)
+ return b, nil
+}
+
+// consumeUint64Ptr wire decodes a *uint64 pointer as a Uint64.
+func consumeUint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ vp := p.Uint64Ptr()
+ if *vp == nil {
+ *vp = new(uint64)
+ }
+ **vp = v
+ out.n = n
+ return out, nil
+}
+
+var coderUint64Ptr = pointerCoderFuncs{
+ size: sizeUint64Ptr,
+ marshal: appendUint64Ptr,
+ unmarshal: consumeUint64Ptr,
+ merge: mergeUint64Ptr,
+}
+
+// sizeUint64Slice returns the size of wire encoding a []uint64 pointer as a repeated Uint64.
+func sizeUint64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ s := *p.Uint64Slice()
+ for _, v := range s {
+ size += f.tagsize + protowire.SizeVarint(v)
+ }
+ return size
+}
+
+// appendUint64Slice encodes a []uint64 pointer as a repeated Uint64.
+func appendUint64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.Uint64Slice()
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, v)
+ }
+ return b, nil
+}
+
+// consumeUint64Slice wire decodes a []uint64 pointer as a repeated Uint64.
+func consumeUint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ sp := p.Uint64Slice()
+ if wtyp == protowire.BytesType {
+ s := *sp
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ for len(b) > 0 {
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ s = append(s, v)
+ b = b[n:]
+ }
+ *sp = s
+ out.n = n
+ return out, nil
+ }
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *sp = append(*sp, v)
+ out.n = n
+ return out, nil
+}
+
+var coderUint64Slice = pointerCoderFuncs{
+ size: sizeUint64Slice,
+ marshal: appendUint64Slice,
+ unmarshal: consumeUint64Slice,
+ merge: mergeUint64Slice,
+}
+
+// sizeUint64PackedSlice returns the size of wire encoding a []uint64 pointer as a packed repeated Uint64.
+func sizeUint64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ s := *p.Uint64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += protowire.SizeVarint(v)
+ }
+ return f.tagsize + protowire.SizeBytes(n)
+}
+
+// appendUint64PackedSlice encodes a []uint64 pointer as a packed repeated Uint64.
+func appendUint64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.Uint64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ n := 0
+ for _, v := range s {
+ n += protowire.SizeVarint(v)
+ }
+ b = protowire.AppendVarint(b, uint64(n))
+ for _, v := range s {
+ b = protowire.AppendVarint(b, v)
+ }
+ return b, nil
+}
+
+var coderUint64PackedSlice = pointerCoderFuncs{
+ size: sizeUint64PackedSlice,
+ marshal: appendUint64PackedSlice,
+ unmarshal: consumeUint64Slice,
+ merge: mergeUint64Slice,
+}
+
+// sizeUint64Value returns the size of wire encoding a uint64 value as a Uint64.
+func sizeUint64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+ return tagsize + protowire.SizeVarint(v.Uint())
+}
+
+// appendUint64Value encodes a uint64 value as a Uint64.
+func appendUint64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendVarint(b, v.Uint())
+ return b, nil
+}
+
+// consumeUint64Value decodes a uint64 value as a Uint64.
+func consumeUint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ out.n = n
+ return protoreflect.ValueOfUint64(v), out, nil
+}
+
+var coderUint64Value = valueCoderFuncs{
+ size: sizeUint64Value,
+ marshal: appendUint64Value,
+ unmarshal: consumeUint64Value,
+ merge: mergeScalarValue,
+}
+
+// sizeUint64SliceValue returns the size of wire encoding a []uint64 value as a repeated Uint64.
+func sizeUint64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ size += tagsize + protowire.SizeVarint(v.Uint())
+ }
+ return size
+}
+
+// appendUint64SliceValue encodes a []uint64 value as a repeated Uint64.
+func appendUint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendVarint(b, v.Uint())
+ }
+ return b, nil
+}
+
+// consumeUint64SliceValue wire decodes a []uint64 value as a repeated Uint64.
+func consumeUint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ list := listv.List()
+ if wtyp == protowire.BytesType {
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ for len(b) > 0 {
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfUint64(v))
+ b = b[n:]
+ }
+ out.n = n
+ return listv, out, nil
+ }
+ if wtyp != protowire.VarintType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfUint64(v))
+ out.n = n
+ return listv, out, nil
+}
+
+var coderUint64SliceValue = valueCoderFuncs{
+ size: sizeUint64SliceValue,
+ marshal: appendUint64SliceValue,
+ unmarshal: consumeUint64SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeUint64PackedSliceValue returns the size of wire encoding a []uint64 value as a packed repeated Uint64.
+func sizeUint64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return 0
+ }
+ n := 0
+ for i, llen := 0, llen; i < llen; i++ {
+ v := list.Get(i)
+ n += protowire.SizeVarint(v.Uint())
+ }
+ return tagsize + protowire.SizeBytes(n)
+}
+
+// appendUint64PackedSliceValue encodes a []uint64 value as a packed repeated Uint64.
+func appendUint64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, wiretag)
+ n := 0
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ n += protowire.SizeVarint(v.Uint())
+ }
+ b = protowire.AppendVarint(b, uint64(n))
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, v.Uint())
+ }
+ return b, nil
+}
+
+var coderUint64PackedSliceValue = valueCoderFuncs{
+ size: sizeUint64PackedSliceValue,
+ marshal: appendUint64PackedSliceValue,
+ unmarshal: consumeUint64SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeSfixed32 returns the size of wire encoding a int32 pointer as a Sfixed32.
+func sizeSfixed32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+
+ return f.tagsize + protowire.SizeFixed32()
+}
+
+// appendSfixed32 wire encodes a int32 pointer as a Sfixed32.
+func appendSfixed32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Int32()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed32(b, uint32(v))
+ return b, nil
+}
+
+// consumeSfixed32 wire decodes a int32 pointer as a Sfixed32.
+func consumeSfixed32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.Fixed32Type {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *p.Int32() = int32(v)
+ out.n = n
+ return out, nil
+}
+
+var coderSfixed32 = pointerCoderFuncs{
+ size: sizeSfixed32,
+ marshal: appendSfixed32,
+ unmarshal: consumeSfixed32,
+ merge: mergeInt32,
+}
+
+// sizeSfixed32NoZero returns the size of wire encoding a int32 pointer as a Sfixed32.
+// The zero value is not encoded.
+func sizeSfixed32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := *p.Int32()
+ if v == 0 {
+ return 0
+ }
+ return f.tagsize + protowire.SizeFixed32()
+}
+
+// appendSfixed32NoZero wire encodes a int32 pointer as a Sfixed32.
+// The zero value is not encoded.
+func appendSfixed32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Int32()
+ if v == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed32(b, uint32(v))
+ return b, nil
+}
+
+var coderSfixed32NoZero = pointerCoderFuncs{
+ size: sizeSfixed32NoZero,
+ marshal: appendSfixed32NoZero,
+ unmarshal: consumeSfixed32,
+ merge: mergeInt32NoZero,
+}
+
+// sizeSfixed32Ptr returns the size of wire encoding a *int32 pointer as a Sfixed32.
+// It panics if the pointer is nil.
+func sizeSfixed32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ return f.tagsize + protowire.SizeFixed32()
+}
+
+// appendSfixed32Ptr wire encodes a *int32 pointer as a Sfixed32.
+// It panics if the pointer is nil.
+func appendSfixed32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := **p.Int32Ptr()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed32(b, uint32(v))
+ return b, nil
+}
+
+// consumeSfixed32Ptr wire decodes a *int32 pointer as a Sfixed32.
+func consumeSfixed32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.Fixed32Type {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ vp := p.Int32Ptr()
+ if *vp == nil {
+ *vp = new(int32)
+ }
+ **vp = int32(v)
+ out.n = n
+ return out, nil
+}
+
+var coderSfixed32Ptr = pointerCoderFuncs{
+ size: sizeSfixed32Ptr,
+ marshal: appendSfixed32Ptr,
+ unmarshal: consumeSfixed32Ptr,
+ merge: mergeInt32Ptr,
+}
+
+// sizeSfixed32Slice returns the size of wire encoding a []int32 pointer as a repeated Sfixed32.
+func sizeSfixed32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ s := *p.Int32Slice()
+ size = len(s) * (f.tagsize + protowire.SizeFixed32())
+ return size
+}
+
+// appendSfixed32Slice encodes a []int32 pointer as a repeated Sfixed32.
+func appendSfixed32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.Int32Slice()
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed32(b, uint32(v))
+ }
+ return b, nil
+}
+
+// consumeSfixed32Slice wire decodes a []int32 pointer as a repeated Sfixed32.
+func consumeSfixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ sp := p.Int32Slice()
+ if wtyp == protowire.BytesType {
+ s := *sp
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ for len(b) > 0 {
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ s = append(s, int32(v))
+ b = b[n:]
+ }
+ *sp = s
+ out.n = n
+ return out, nil
+ }
+ if wtyp != protowire.Fixed32Type {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *sp = append(*sp, int32(v))
+ out.n = n
+ return out, nil
+}
+
+var coderSfixed32Slice = pointerCoderFuncs{
+ size: sizeSfixed32Slice,
+ marshal: appendSfixed32Slice,
+ unmarshal: consumeSfixed32Slice,
+ merge: mergeInt32Slice,
+}
+
+// sizeSfixed32PackedSlice returns the size of wire encoding a []int32 pointer as a packed repeated Sfixed32.
+func sizeSfixed32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ s := *p.Int32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := len(s) * protowire.SizeFixed32()
+ return f.tagsize + protowire.SizeBytes(n)
+}
+
+// appendSfixed32PackedSlice encodes a []int32 pointer as a packed repeated Sfixed32.
+func appendSfixed32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.Int32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ n := len(s) * protowire.SizeFixed32()
+ b = protowire.AppendVarint(b, uint64(n))
+ for _, v := range s {
+ b = protowire.AppendFixed32(b, uint32(v))
+ }
+ return b, nil
+}
+
+var coderSfixed32PackedSlice = pointerCoderFuncs{
+ size: sizeSfixed32PackedSlice,
+ marshal: appendSfixed32PackedSlice,
+ unmarshal: consumeSfixed32Slice,
+ merge: mergeInt32Slice,
+}
+
+// sizeSfixed32Value returns the size of wire encoding a int32 value as a Sfixed32.
+func sizeSfixed32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+ return tagsize + protowire.SizeFixed32()
+}
+
+// appendSfixed32Value encodes a int32 value as a Sfixed32.
+func appendSfixed32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendFixed32(b, uint32(v.Int()))
+ return b, nil
+}
+
+// consumeSfixed32Value decodes a int32 value as a Sfixed32.
+func consumeSfixed32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ if wtyp != protowire.Fixed32Type {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ out.n = n
+ return protoreflect.ValueOfInt32(int32(v)), out, nil
+}
+
+var coderSfixed32Value = valueCoderFuncs{
+ size: sizeSfixed32Value,
+ marshal: appendSfixed32Value,
+ unmarshal: consumeSfixed32Value,
+ merge: mergeScalarValue,
+}
+
+// sizeSfixed32SliceValue returns the size of wire encoding a []int32 value as a repeated Sfixed32.
+func sizeSfixed32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ size = list.Len() * (tagsize + protowire.SizeFixed32())
+ return size
+}
+
+// appendSfixed32SliceValue encodes a []int32 value as a repeated Sfixed32.
+func appendSfixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendFixed32(b, uint32(v.Int()))
+ }
+ return b, nil
+}
+
+// consumeSfixed32SliceValue wire decodes a []int32 value as a repeated Sfixed32.
+func consumeSfixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ list := listv.List()
+ if wtyp == protowire.BytesType {
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ for len(b) > 0 {
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfInt32(int32(v)))
+ b = b[n:]
+ }
+ out.n = n
+ return listv, out, nil
+ }
+ if wtyp != protowire.Fixed32Type {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfInt32(int32(v)))
+ out.n = n
+ return listv, out, nil
+}
+
+var coderSfixed32SliceValue = valueCoderFuncs{
+ size: sizeSfixed32SliceValue,
+ marshal: appendSfixed32SliceValue,
+ unmarshal: consumeSfixed32SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeSfixed32PackedSliceValue returns the size of wire encoding a []int32 value as a packed repeated Sfixed32.
+func sizeSfixed32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return 0
+ }
+ n := llen * protowire.SizeFixed32()
+ return tagsize + protowire.SizeBytes(n)
+}
+
+// appendSfixed32PackedSliceValue encodes a []int32 value as a packed repeated Sfixed32.
+func appendSfixed32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, wiretag)
+ n := llen * protowire.SizeFixed32()
+ b = protowire.AppendVarint(b, uint64(n))
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendFixed32(b, uint32(v.Int()))
+ }
+ return b, nil
+}
+
+var coderSfixed32PackedSliceValue = valueCoderFuncs{
+ size: sizeSfixed32PackedSliceValue,
+ marshal: appendSfixed32PackedSliceValue,
+ unmarshal: consumeSfixed32SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeFixed32 returns the size of wire encoding a uint32 pointer as a Fixed32.
+func sizeFixed32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+
+ return f.tagsize + protowire.SizeFixed32()
+}
+
+// appendFixed32 wire encodes a uint32 pointer as a Fixed32.
+func appendFixed32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Uint32()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed32(b, v)
+ return b, nil
+}
+
+// consumeFixed32 wire decodes a uint32 pointer as a Fixed32.
+func consumeFixed32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.Fixed32Type {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *p.Uint32() = v
+ out.n = n
+ return out, nil
+}
+
+var coderFixed32 = pointerCoderFuncs{
+ size: sizeFixed32,
+ marshal: appendFixed32,
+ unmarshal: consumeFixed32,
+ merge: mergeUint32,
+}
+
+// sizeFixed32NoZero returns the size of wire encoding a uint32 pointer as a Fixed32.
+// The zero value is not encoded.
+func sizeFixed32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := *p.Uint32()
+ if v == 0 {
+ return 0
+ }
+ return f.tagsize + protowire.SizeFixed32()
+}
+
+// appendFixed32NoZero wire encodes a uint32 pointer as a Fixed32.
+// The zero value is not encoded.
+func appendFixed32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Uint32()
+ if v == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed32(b, v)
+ return b, nil
+}
+
+var coderFixed32NoZero = pointerCoderFuncs{
+ size: sizeFixed32NoZero,
+ marshal: appendFixed32NoZero,
+ unmarshal: consumeFixed32,
+ merge: mergeUint32NoZero,
+}
+
+// sizeFixed32Ptr returns the size of wire encoding a *uint32 pointer as a Fixed32.
+// It panics if the pointer is nil.
+func sizeFixed32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ return f.tagsize + protowire.SizeFixed32()
+}
+
+// appendFixed32Ptr wire encodes a *uint32 pointer as a Fixed32.
+// It panics if the pointer is nil.
+func appendFixed32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := **p.Uint32Ptr()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed32(b, v)
+ return b, nil
+}
+
+// consumeFixed32Ptr wire decodes a *uint32 pointer as a Fixed32.
+func consumeFixed32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.Fixed32Type {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ vp := p.Uint32Ptr()
+ if *vp == nil {
+ *vp = new(uint32)
+ }
+ **vp = v
+ out.n = n
+ return out, nil
+}
+
+var coderFixed32Ptr = pointerCoderFuncs{
+ size: sizeFixed32Ptr,
+ marshal: appendFixed32Ptr,
+ unmarshal: consumeFixed32Ptr,
+ merge: mergeUint32Ptr,
+}
+
+// sizeFixed32Slice returns the size of wire encoding a []uint32 pointer as a repeated Fixed32.
+func sizeFixed32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ s := *p.Uint32Slice()
+ size = len(s) * (f.tagsize + protowire.SizeFixed32())
+ return size
+}
+
+// appendFixed32Slice encodes a []uint32 pointer as a repeated Fixed32.
+func appendFixed32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.Uint32Slice()
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed32(b, v)
+ }
+ return b, nil
+}
+
+// consumeFixed32Slice wire decodes a []uint32 pointer as a repeated Fixed32.
+func consumeFixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ sp := p.Uint32Slice()
+ if wtyp == protowire.BytesType {
+ s := *sp
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ for len(b) > 0 {
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ s = append(s, v)
+ b = b[n:]
+ }
+ *sp = s
+ out.n = n
+ return out, nil
+ }
+ if wtyp != protowire.Fixed32Type {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *sp = append(*sp, v)
+ out.n = n
+ return out, nil
+}
+
+var coderFixed32Slice = pointerCoderFuncs{
+ size: sizeFixed32Slice,
+ marshal: appendFixed32Slice,
+ unmarshal: consumeFixed32Slice,
+ merge: mergeUint32Slice,
+}
+
+// sizeFixed32PackedSlice returns the size of wire encoding a []uint32 pointer as a packed repeated Fixed32.
+func sizeFixed32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ s := *p.Uint32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := len(s) * protowire.SizeFixed32()
+ return f.tagsize + protowire.SizeBytes(n)
+}
+
+// appendFixed32PackedSlice encodes a []uint32 pointer as a packed repeated Fixed32.
+func appendFixed32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.Uint32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ n := len(s) * protowire.SizeFixed32()
+ b = protowire.AppendVarint(b, uint64(n))
+ for _, v := range s {
+ b = protowire.AppendFixed32(b, v)
+ }
+ return b, nil
+}
+
+var coderFixed32PackedSlice = pointerCoderFuncs{
+ size: sizeFixed32PackedSlice,
+ marshal: appendFixed32PackedSlice,
+ unmarshal: consumeFixed32Slice,
+ merge: mergeUint32Slice,
+}
+
+// sizeFixed32Value returns the size of wire encoding a uint32 value as a Fixed32.
+func sizeFixed32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+ return tagsize + protowire.SizeFixed32()
+}
+
+// appendFixed32Value encodes a uint32 value as a Fixed32.
+func appendFixed32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendFixed32(b, uint32(v.Uint()))
+ return b, nil
+}
+
+// consumeFixed32Value decodes a uint32 value as a Fixed32.
+func consumeFixed32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ if wtyp != protowire.Fixed32Type {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ out.n = n
+ return protoreflect.ValueOfUint32(uint32(v)), out, nil
+}
+
+var coderFixed32Value = valueCoderFuncs{
+ size: sizeFixed32Value,
+ marshal: appendFixed32Value,
+ unmarshal: consumeFixed32Value,
+ merge: mergeScalarValue,
+}
+
+// sizeFixed32SliceValue returns the size of wire encoding a []uint32 value as a repeated Fixed32.
+func sizeFixed32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ size = list.Len() * (tagsize + protowire.SizeFixed32())
+ return size
+}
+
+// appendFixed32SliceValue encodes a []uint32 value as a repeated Fixed32.
+func appendFixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendFixed32(b, uint32(v.Uint()))
+ }
+ return b, nil
+}
+
+// consumeFixed32SliceValue wire decodes a []uint32 value as a repeated Fixed32.
+func consumeFixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ list := listv.List()
+ if wtyp == protowire.BytesType {
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ for len(b) > 0 {
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfUint32(uint32(v)))
+ b = b[n:]
+ }
+ out.n = n
+ return listv, out, nil
+ }
+ if wtyp != protowire.Fixed32Type {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfUint32(uint32(v)))
+ out.n = n
+ return listv, out, nil
+}
+
+var coderFixed32SliceValue = valueCoderFuncs{
+ size: sizeFixed32SliceValue,
+ marshal: appendFixed32SliceValue,
+ unmarshal: consumeFixed32SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeFixed32PackedSliceValue returns the size of wire encoding a []uint32 value as a packed repeated Fixed32.
+func sizeFixed32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return 0
+ }
+ n := llen * protowire.SizeFixed32()
+ return tagsize + protowire.SizeBytes(n)
+}
+
+// appendFixed32PackedSliceValue encodes a []uint32 value as a packed repeated Fixed32.
+func appendFixed32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, wiretag)
+ n := llen * protowire.SizeFixed32()
+ b = protowire.AppendVarint(b, uint64(n))
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendFixed32(b, uint32(v.Uint()))
+ }
+ return b, nil
+}
+
+var coderFixed32PackedSliceValue = valueCoderFuncs{
+ size: sizeFixed32PackedSliceValue,
+ marshal: appendFixed32PackedSliceValue,
+ unmarshal: consumeFixed32SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeFloat returns the size of wire encoding a float32 pointer as a Float.
+func sizeFloat(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+
+ return f.tagsize + protowire.SizeFixed32()
+}
+
+// appendFloat wire encodes a float32 pointer as a Float.
+func appendFloat(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Float32()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed32(b, math.Float32bits(v))
+ return b, nil
+}
+
+// consumeFloat wire decodes a float32 pointer as a Float.
+func consumeFloat(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.Fixed32Type {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *p.Float32() = math.Float32frombits(v)
+ out.n = n
+ return out, nil
+}
+
+var coderFloat = pointerCoderFuncs{
+ size: sizeFloat,
+ marshal: appendFloat,
+ unmarshal: consumeFloat,
+ merge: mergeFloat32,
+}
+
+// sizeFloatNoZero returns the size of wire encoding a float32 pointer as a Float.
+// The zero value is not encoded.
+func sizeFloatNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := *p.Float32()
+ if v == 0 && !math.Signbit(float64(v)) {
+ return 0
+ }
+ return f.tagsize + protowire.SizeFixed32()
+}
+
+// appendFloatNoZero wire encodes a float32 pointer as a Float.
+// The zero value is not encoded.
+func appendFloatNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Float32()
+ if v == 0 && !math.Signbit(float64(v)) {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed32(b, math.Float32bits(v))
+ return b, nil
+}
+
+var coderFloatNoZero = pointerCoderFuncs{
+ size: sizeFloatNoZero,
+ marshal: appendFloatNoZero,
+ unmarshal: consumeFloat,
+ merge: mergeFloat32NoZero,
+}
+
+// sizeFloatPtr returns the size of wire encoding a *float32 pointer as a Float.
+// It panics if the pointer is nil.
+func sizeFloatPtr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ return f.tagsize + protowire.SizeFixed32()
+}
+
+// appendFloatPtr wire encodes a *float32 pointer as a Float.
+// It panics if the pointer is nil.
+func appendFloatPtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := **p.Float32Ptr()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed32(b, math.Float32bits(v))
+ return b, nil
+}
+
+// consumeFloatPtr wire decodes a *float32 pointer as a Float.
+func consumeFloatPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.Fixed32Type {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ vp := p.Float32Ptr()
+ if *vp == nil {
+ *vp = new(float32)
+ }
+ **vp = math.Float32frombits(v)
+ out.n = n
+ return out, nil
+}
+
+var coderFloatPtr = pointerCoderFuncs{
+ size: sizeFloatPtr,
+ marshal: appendFloatPtr,
+ unmarshal: consumeFloatPtr,
+ merge: mergeFloat32Ptr,
+}
+
+// sizeFloatSlice returns the size of wire encoding a []float32 pointer as a repeated Float.
+func sizeFloatSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ s := *p.Float32Slice()
+ size = len(s) * (f.tagsize + protowire.SizeFixed32())
+ return size
+}
+
+// appendFloatSlice encodes a []float32 pointer as a repeated Float.
+func appendFloatSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.Float32Slice()
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed32(b, math.Float32bits(v))
+ }
+ return b, nil
+}
+
+// consumeFloatSlice wire decodes a []float32 pointer as a repeated Float.
+func consumeFloatSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ sp := p.Float32Slice()
+ if wtyp == protowire.BytesType {
+ s := *sp
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ for len(b) > 0 {
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ s = append(s, math.Float32frombits(v))
+ b = b[n:]
+ }
+ *sp = s
+ out.n = n
+ return out, nil
+ }
+ if wtyp != protowire.Fixed32Type {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *sp = append(*sp, math.Float32frombits(v))
+ out.n = n
+ return out, nil
+}
+
+var coderFloatSlice = pointerCoderFuncs{
+ size: sizeFloatSlice,
+ marshal: appendFloatSlice,
+ unmarshal: consumeFloatSlice,
+ merge: mergeFloat32Slice,
+}
+
+// sizeFloatPackedSlice returns the size of wire encoding a []float32 pointer as a packed repeated Float.
+func sizeFloatPackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ s := *p.Float32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := len(s) * protowire.SizeFixed32()
+ return f.tagsize + protowire.SizeBytes(n)
+}
+
+// appendFloatPackedSlice encodes a []float32 pointer as a packed repeated Float.
+func appendFloatPackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.Float32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ n := len(s) * protowire.SizeFixed32()
+ b = protowire.AppendVarint(b, uint64(n))
+ for _, v := range s {
+ b = protowire.AppendFixed32(b, math.Float32bits(v))
+ }
+ return b, nil
+}
+
+var coderFloatPackedSlice = pointerCoderFuncs{
+ size: sizeFloatPackedSlice,
+ marshal: appendFloatPackedSlice,
+ unmarshal: consumeFloatSlice,
+ merge: mergeFloat32Slice,
+}
+
+// sizeFloatValue returns the size of wire encoding a float32 value as a Float.
+func sizeFloatValue(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+ return tagsize + protowire.SizeFixed32()
+}
+
+// appendFloatValue encodes a float32 value as a Float.
+func appendFloatValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendFixed32(b, math.Float32bits(float32(v.Float())))
+ return b, nil
+}
+
+// consumeFloatValue decodes a float32 value as a Float.
+func consumeFloatValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ if wtyp != protowire.Fixed32Type {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ out.n = n
+ return protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v))), out, nil
+}
+
+var coderFloatValue = valueCoderFuncs{
+ size: sizeFloatValue,
+ marshal: appendFloatValue,
+ unmarshal: consumeFloatValue,
+ merge: mergeScalarValue,
+}
+
+// sizeFloatSliceValue returns the size of wire encoding a []float32 value as a repeated Float.
+func sizeFloatSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ size = list.Len() * (tagsize + protowire.SizeFixed32())
+ return size
+}
+
+// appendFloatSliceValue encodes a []float32 value as a repeated Float.
+func appendFloatSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendFixed32(b, math.Float32bits(float32(v.Float())))
+ }
+ return b, nil
+}
+
+// consumeFloatSliceValue wire decodes a []float32 value as a repeated Float.
+func consumeFloatSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ list := listv.List()
+ if wtyp == protowire.BytesType {
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ for len(b) > 0 {
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v))))
+ b = b[n:]
+ }
+ out.n = n
+ return listv, out, nil
+ }
+ if wtyp != protowire.Fixed32Type {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v))))
+ out.n = n
+ return listv, out, nil
+}
+
+var coderFloatSliceValue = valueCoderFuncs{
+ size: sizeFloatSliceValue,
+ marshal: appendFloatSliceValue,
+ unmarshal: consumeFloatSliceValue,
+ merge: mergeListValue,
+}
+
+// sizeFloatPackedSliceValue returns the size of wire encoding a []float32 value as a packed repeated Float.
+func sizeFloatPackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return 0
+ }
+ n := llen * protowire.SizeFixed32()
+ return tagsize + protowire.SizeBytes(n)
+}
+
+// appendFloatPackedSliceValue encodes a []float32 value as a packed repeated Float.
+func appendFloatPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, wiretag)
+ n := llen * protowire.SizeFixed32()
+ b = protowire.AppendVarint(b, uint64(n))
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendFixed32(b, math.Float32bits(float32(v.Float())))
+ }
+ return b, nil
+}
+
+var coderFloatPackedSliceValue = valueCoderFuncs{
+ size: sizeFloatPackedSliceValue,
+ marshal: appendFloatPackedSliceValue,
+ unmarshal: consumeFloatSliceValue,
+ merge: mergeListValue,
+}
+
+// sizeSfixed64 returns the size of wire encoding a int64 pointer as a Sfixed64.
+func sizeSfixed64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+
+ return f.tagsize + protowire.SizeFixed64()
+}
+
+// appendSfixed64 wire encodes a int64 pointer as a Sfixed64.
+func appendSfixed64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Int64()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed64(b, uint64(v))
+ return b, nil
+}
+
+// consumeSfixed64 wire decodes a int64 pointer as a Sfixed64.
+func consumeSfixed64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.Fixed64Type {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *p.Int64() = int64(v)
+ out.n = n
+ return out, nil
+}
+
+var coderSfixed64 = pointerCoderFuncs{
+ size: sizeSfixed64,
+ marshal: appendSfixed64,
+ unmarshal: consumeSfixed64,
+ merge: mergeInt64,
+}
+
+// sizeSfixed64NoZero returns the size of wire encoding a int64 pointer as a Sfixed64.
+// The zero value is not encoded.
+func sizeSfixed64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := *p.Int64()
+ if v == 0 {
+ return 0
+ }
+ return f.tagsize + protowire.SizeFixed64()
+}
+
+// appendSfixed64NoZero wire encodes a int64 pointer as a Sfixed64.
+// The zero value is not encoded.
+func appendSfixed64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Int64()
+ if v == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed64(b, uint64(v))
+ return b, nil
+}
+
+var coderSfixed64NoZero = pointerCoderFuncs{
+ size: sizeSfixed64NoZero,
+ marshal: appendSfixed64NoZero,
+ unmarshal: consumeSfixed64,
+ merge: mergeInt64NoZero,
+}
+
+// sizeSfixed64Ptr returns the size of wire encoding a *int64 pointer as a Sfixed64.
+// It panics if the pointer is nil.
+func sizeSfixed64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ return f.tagsize + protowire.SizeFixed64()
+}
+
+// appendSfixed64Ptr wire encodes a *int64 pointer as a Sfixed64.
+// It panics if the pointer is nil.
+func appendSfixed64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := **p.Int64Ptr()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed64(b, uint64(v))
+ return b, nil
+}
+
+// consumeSfixed64Ptr wire decodes a *int64 pointer as a Sfixed64.
+func consumeSfixed64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.Fixed64Type {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ vp := p.Int64Ptr()
+ if *vp == nil {
+ *vp = new(int64)
+ }
+ **vp = int64(v)
+ out.n = n
+ return out, nil
+}
+
+var coderSfixed64Ptr = pointerCoderFuncs{
+ size: sizeSfixed64Ptr,
+ marshal: appendSfixed64Ptr,
+ unmarshal: consumeSfixed64Ptr,
+ merge: mergeInt64Ptr,
+}
+
+// sizeSfixed64Slice returns the size of wire encoding a []int64 pointer as a repeated Sfixed64.
+func sizeSfixed64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ s := *p.Int64Slice()
+ size = len(s) * (f.tagsize + protowire.SizeFixed64())
+ return size
+}
+
+// appendSfixed64Slice encodes a []int64 pointer as a repeated Sfixed64.
+func appendSfixed64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.Int64Slice()
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed64(b, uint64(v))
+ }
+ return b, nil
+}
+
+// consumeSfixed64Slice wire decodes a []int64 pointer as a repeated Sfixed64.
+func consumeSfixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ sp := p.Int64Slice()
+ if wtyp == protowire.BytesType {
+ s := *sp
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ for len(b) > 0 {
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ s = append(s, int64(v))
+ b = b[n:]
+ }
+ *sp = s
+ out.n = n
+ return out, nil
+ }
+ if wtyp != protowire.Fixed64Type {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *sp = append(*sp, int64(v))
+ out.n = n
+ return out, nil
+}
+
+var coderSfixed64Slice = pointerCoderFuncs{
+ size: sizeSfixed64Slice,
+ marshal: appendSfixed64Slice,
+ unmarshal: consumeSfixed64Slice,
+ merge: mergeInt64Slice,
+}
+
+// sizeSfixed64PackedSlice returns the size of wire encoding a []int64 pointer as a packed repeated Sfixed64.
+func sizeSfixed64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ s := *p.Int64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := len(s) * protowire.SizeFixed64()
+ return f.tagsize + protowire.SizeBytes(n)
+}
+
+// appendSfixed64PackedSlice encodes a []int64 pointer as a packed repeated Sfixed64.
+func appendSfixed64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.Int64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ n := len(s) * protowire.SizeFixed64()
+ b = protowire.AppendVarint(b, uint64(n))
+ for _, v := range s {
+ b = protowire.AppendFixed64(b, uint64(v))
+ }
+ return b, nil
+}
+
+var coderSfixed64PackedSlice = pointerCoderFuncs{
+ size: sizeSfixed64PackedSlice,
+ marshal: appendSfixed64PackedSlice,
+ unmarshal: consumeSfixed64Slice,
+ merge: mergeInt64Slice,
+}
+
+// sizeSfixed64Value returns the size of wire encoding a int64 value as a Sfixed64.
+func sizeSfixed64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+ return tagsize + protowire.SizeFixed64()
+}
+
+// appendSfixed64Value encodes a int64 value as a Sfixed64.
+func appendSfixed64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendFixed64(b, uint64(v.Int()))
+ return b, nil
+}
+
+// consumeSfixed64Value decodes a int64 value as a Sfixed64.
+func consumeSfixed64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ if wtyp != protowire.Fixed64Type {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ out.n = n
+ return protoreflect.ValueOfInt64(int64(v)), out, nil
+}
+
+var coderSfixed64Value = valueCoderFuncs{
+ size: sizeSfixed64Value,
+ marshal: appendSfixed64Value,
+ unmarshal: consumeSfixed64Value,
+ merge: mergeScalarValue,
+}
+
+// sizeSfixed64SliceValue returns the size of wire encoding a []int64 value as a repeated Sfixed64.
+func sizeSfixed64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ size = list.Len() * (tagsize + protowire.SizeFixed64())
+ return size
+}
+
+// appendSfixed64SliceValue encodes a []int64 value as a repeated Sfixed64.
+func appendSfixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendFixed64(b, uint64(v.Int()))
+ }
+ return b, nil
+}
+
+// consumeSfixed64SliceValue wire decodes a []int64 value as a repeated Sfixed64.
+func consumeSfixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ list := listv.List()
+ if wtyp == protowire.BytesType {
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ for len(b) > 0 {
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfInt64(int64(v)))
+ b = b[n:]
+ }
+ out.n = n
+ return listv, out, nil
+ }
+ if wtyp != protowire.Fixed64Type {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfInt64(int64(v)))
+ out.n = n
+ return listv, out, nil
+}
+
+var coderSfixed64SliceValue = valueCoderFuncs{
+ size: sizeSfixed64SliceValue,
+ marshal: appendSfixed64SliceValue,
+ unmarshal: consumeSfixed64SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeSfixed64PackedSliceValue returns the size of wire encoding a []int64 value as a packed repeated Sfixed64.
+func sizeSfixed64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return 0
+ }
+ n := llen * protowire.SizeFixed64()
+ return tagsize + protowire.SizeBytes(n)
+}
+
+// appendSfixed64PackedSliceValue encodes a []int64 value as a packed repeated Sfixed64.
+func appendSfixed64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, wiretag)
+ n := llen * protowire.SizeFixed64()
+ b = protowire.AppendVarint(b, uint64(n))
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendFixed64(b, uint64(v.Int()))
+ }
+ return b, nil
+}
+
+var coderSfixed64PackedSliceValue = valueCoderFuncs{
+ size: sizeSfixed64PackedSliceValue,
+ marshal: appendSfixed64PackedSliceValue,
+ unmarshal: consumeSfixed64SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeFixed64 returns the size of wire encoding a uint64 pointer as a Fixed64.
+func sizeFixed64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+
+ return f.tagsize + protowire.SizeFixed64()
+}
+
+// appendFixed64 wire encodes a uint64 pointer as a Fixed64.
+func appendFixed64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Uint64()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed64(b, v)
+ return b, nil
+}
+
+// consumeFixed64 wire decodes a uint64 pointer as a Fixed64.
+func consumeFixed64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.Fixed64Type {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *p.Uint64() = v
+ out.n = n
+ return out, nil
+}
+
+var coderFixed64 = pointerCoderFuncs{
+ size: sizeFixed64,
+ marshal: appendFixed64,
+ unmarshal: consumeFixed64,
+ merge: mergeUint64,
+}
+
+// sizeFixed64NoZero returns the size of wire encoding a uint64 pointer as a Fixed64.
+// The zero value is not encoded.
+func sizeFixed64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := *p.Uint64()
+ if v == 0 {
+ return 0
+ }
+ return f.tagsize + protowire.SizeFixed64()
+}
+
+// appendFixed64NoZero wire encodes a uint64 pointer as a Fixed64.
+// The zero value is not encoded.
+func appendFixed64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Uint64()
+ if v == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed64(b, v)
+ return b, nil
+}
+
+var coderFixed64NoZero = pointerCoderFuncs{
+ size: sizeFixed64NoZero,
+ marshal: appendFixed64NoZero,
+ unmarshal: consumeFixed64,
+ merge: mergeUint64NoZero,
+}
+
+// sizeFixed64Ptr returns the size of wire encoding a *uint64 pointer as a Fixed64.
+// It panics if the pointer is nil.
+func sizeFixed64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ return f.tagsize + protowire.SizeFixed64()
+}
+
+// appendFixed64Ptr wire encodes a *uint64 pointer as a Fixed64.
+// It panics if the pointer is nil.
+func appendFixed64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := **p.Uint64Ptr()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed64(b, v)
+ return b, nil
+}
+
+// consumeFixed64Ptr wire decodes a *uint64 pointer as a Fixed64.
+func consumeFixed64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.Fixed64Type {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ vp := p.Uint64Ptr()
+ if *vp == nil {
+ *vp = new(uint64)
+ }
+ **vp = v
+ out.n = n
+ return out, nil
+}
+
+var coderFixed64Ptr = pointerCoderFuncs{
+ size: sizeFixed64Ptr,
+ marshal: appendFixed64Ptr,
+ unmarshal: consumeFixed64Ptr,
+ merge: mergeUint64Ptr,
+}
+
+// sizeFixed64Slice returns the size of wire encoding a []uint64 pointer as a repeated Fixed64.
+func sizeFixed64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ s := *p.Uint64Slice()
+ size = len(s) * (f.tagsize + protowire.SizeFixed64())
+ return size
+}
+
+// appendFixed64Slice encodes a []uint64 pointer as a repeated Fixed64.
+func appendFixed64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.Uint64Slice()
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed64(b, v)
+ }
+ return b, nil
+}
+
+// consumeFixed64Slice wire decodes a []uint64 pointer as a repeated Fixed64.
+func consumeFixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ sp := p.Uint64Slice()
+ if wtyp == protowire.BytesType {
+ s := *sp
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ for len(b) > 0 {
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ s = append(s, v)
+ b = b[n:]
+ }
+ *sp = s
+ out.n = n
+ return out, nil
+ }
+ if wtyp != protowire.Fixed64Type {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *sp = append(*sp, v)
+ out.n = n
+ return out, nil
+}
+
+var coderFixed64Slice = pointerCoderFuncs{
+ size: sizeFixed64Slice,
+ marshal: appendFixed64Slice,
+ unmarshal: consumeFixed64Slice,
+ merge: mergeUint64Slice,
+}
+
+// sizeFixed64PackedSlice returns the size of wire encoding a []uint64 pointer as a packed repeated Fixed64.
+func sizeFixed64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ s := *p.Uint64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := len(s) * protowire.SizeFixed64()
+ return f.tagsize + protowire.SizeBytes(n)
+}
+
+// appendFixed64PackedSlice encodes a []uint64 pointer as a packed repeated Fixed64.
+func appendFixed64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.Uint64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ n := len(s) * protowire.SizeFixed64()
+ b = protowire.AppendVarint(b, uint64(n))
+ for _, v := range s {
+ b = protowire.AppendFixed64(b, v)
+ }
+ return b, nil
+}
+
+var coderFixed64PackedSlice = pointerCoderFuncs{
+ size: sizeFixed64PackedSlice,
+ marshal: appendFixed64PackedSlice,
+ unmarshal: consumeFixed64Slice,
+ merge: mergeUint64Slice,
+}
+
+// sizeFixed64Value returns the size of wire encoding a uint64 value as a Fixed64.
+func sizeFixed64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+ return tagsize + protowire.SizeFixed64()
+}
+
+// appendFixed64Value encodes a uint64 value as a Fixed64.
+func appendFixed64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendFixed64(b, v.Uint())
+ return b, nil
+}
+
+// consumeFixed64Value decodes a uint64 value as a Fixed64.
+func consumeFixed64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ if wtyp != protowire.Fixed64Type {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ out.n = n
+ return protoreflect.ValueOfUint64(v), out, nil
+}
+
+var coderFixed64Value = valueCoderFuncs{
+ size: sizeFixed64Value,
+ marshal: appendFixed64Value,
+ unmarshal: consumeFixed64Value,
+ merge: mergeScalarValue,
+}
+
+// sizeFixed64SliceValue returns the size of wire encoding a []uint64 value as a repeated Fixed64.
+func sizeFixed64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ size = list.Len() * (tagsize + protowire.SizeFixed64())
+ return size
+}
+
+// appendFixed64SliceValue encodes a []uint64 value as a repeated Fixed64.
+func appendFixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendFixed64(b, v.Uint())
+ }
+ return b, nil
+}
+
+// consumeFixed64SliceValue wire decodes a []uint64 value as a repeated Fixed64.
+func consumeFixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ list := listv.List()
+ if wtyp == protowire.BytesType {
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ for len(b) > 0 {
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfUint64(v))
+ b = b[n:]
+ }
+ out.n = n
+ return listv, out, nil
+ }
+ if wtyp != protowire.Fixed64Type {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfUint64(v))
+ out.n = n
+ return listv, out, nil
+}
+
+var coderFixed64SliceValue = valueCoderFuncs{
+ size: sizeFixed64SliceValue,
+ marshal: appendFixed64SliceValue,
+ unmarshal: consumeFixed64SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeFixed64PackedSliceValue returns the size of wire encoding a []uint64 value as a packed repeated Fixed64.
+func sizeFixed64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return 0
+ }
+ n := llen * protowire.SizeFixed64()
+ return tagsize + protowire.SizeBytes(n)
+}
+
+// appendFixed64PackedSliceValue encodes a []uint64 value as a packed repeated Fixed64.
+func appendFixed64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, wiretag)
+ n := llen * protowire.SizeFixed64()
+ b = protowire.AppendVarint(b, uint64(n))
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendFixed64(b, v.Uint())
+ }
+ return b, nil
+}
+
+var coderFixed64PackedSliceValue = valueCoderFuncs{
+ size: sizeFixed64PackedSliceValue,
+ marshal: appendFixed64PackedSliceValue,
+ unmarshal: consumeFixed64SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeDouble returns the size of wire encoding a float64 pointer as a Double.
+func sizeDouble(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+
+ return f.tagsize + protowire.SizeFixed64()
+}
+
+// appendDouble wire encodes a float64 pointer as a Double.
+func appendDouble(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Float64()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed64(b, math.Float64bits(v))
+ return b, nil
+}
+
+// consumeDouble wire decodes a float64 pointer as a Double.
+func consumeDouble(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.Fixed64Type {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *p.Float64() = math.Float64frombits(v)
+ out.n = n
+ return out, nil
+}
+
+var coderDouble = pointerCoderFuncs{
+ size: sizeDouble,
+ marshal: appendDouble,
+ unmarshal: consumeDouble,
+ merge: mergeFloat64,
+}
+
+// sizeDoubleNoZero returns the size of wire encoding a float64 pointer as a Double.
+// The zero value is not encoded.
+func sizeDoubleNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := *p.Float64()
+ if v == 0 && !math.Signbit(float64(v)) {
+ return 0
+ }
+ return f.tagsize + protowire.SizeFixed64()
+}
+
+// appendDoubleNoZero wire encodes a float64 pointer as a Double.
+// The zero value is not encoded.
+func appendDoubleNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Float64()
+ if v == 0 && !math.Signbit(float64(v)) {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed64(b, math.Float64bits(v))
+ return b, nil
+}
+
+var coderDoubleNoZero = pointerCoderFuncs{
+ size: sizeDoubleNoZero,
+ marshal: appendDoubleNoZero,
+ unmarshal: consumeDouble,
+ merge: mergeFloat64NoZero,
+}
+
+// sizeDoublePtr returns the size of wire encoding a *float64 pointer as a Double.
+// It panics if the pointer is nil.
+func sizeDoublePtr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ return f.tagsize + protowire.SizeFixed64()
+}
+
+// appendDoublePtr wire encodes a *float64 pointer as a Double.
+// It panics if the pointer is nil.
+func appendDoublePtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := **p.Float64Ptr()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed64(b, math.Float64bits(v))
+ return b, nil
+}
+
+// consumeDoublePtr wire decodes a *float64 pointer as a Double.
+func consumeDoublePtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.Fixed64Type {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ vp := p.Float64Ptr()
+ if *vp == nil {
+ *vp = new(float64)
+ }
+ **vp = math.Float64frombits(v)
+ out.n = n
+ return out, nil
+}
+
+var coderDoublePtr = pointerCoderFuncs{
+ size: sizeDoublePtr,
+ marshal: appendDoublePtr,
+ unmarshal: consumeDoublePtr,
+ merge: mergeFloat64Ptr,
+}
+
+// sizeDoubleSlice returns the size of wire encoding a []float64 pointer as a repeated Double.
+func sizeDoubleSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ s := *p.Float64Slice()
+ size = len(s) * (f.tagsize + protowire.SizeFixed64())
+ return size
+}
+
+// appendDoubleSlice encodes a []float64 pointer as a repeated Double.
+func appendDoubleSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.Float64Slice()
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed64(b, math.Float64bits(v))
+ }
+ return b, nil
+}
+
+// consumeDoubleSlice wire decodes a []float64 pointer as a repeated Double.
+func consumeDoubleSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ sp := p.Float64Slice()
+ if wtyp == protowire.BytesType {
+ s := *sp
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ for len(b) > 0 {
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ s = append(s, math.Float64frombits(v))
+ b = b[n:]
+ }
+ *sp = s
+ out.n = n
+ return out, nil
+ }
+ if wtyp != protowire.Fixed64Type {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *sp = append(*sp, math.Float64frombits(v))
+ out.n = n
+ return out, nil
+}
+
+var coderDoubleSlice = pointerCoderFuncs{
+ size: sizeDoubleSlice,
+ marshal: appendDoubleSlice,
+ unmarshal: consumeDoubleSlice,
+ merge: mergeFloat64Slice,
+}
+
+// sizeDoublePackedSlice returns the size of wire encoding a []float64 pointer as a packed repeated Double.
+func sizeDoublePackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ s := *p.Float64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := len(s) * protowire.SizeFixed64()
+ return f.tagsize + protowire.SizeBytes(n)
+}
+
+// appendDoublePackedSlice encodes a []float64 pointer as a packed repeated Double.
+func appendDoublePackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.Float64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ n := len(s) * protowire.SizeFixed64()
+ b = protowire.AppendVarint(b, uint64(n))
+ for _, v := range s {
+ b = protowire.AppendFixed64(b, math.Float64bits(v))
+ }
+ return b, nil
+}
+
+var coderDoublePackedSlice = pointerCoderFuncs{
+ size: sizeDoublePackedSlice,
+ marshal: appendDoublePackedSlice,
+ unmarshal: consumeDoubleSlice,
+ merge: mergeFloat64Slice,
+}
+
+// sizeDoubleValue returns the size of wire encoding a float64 value as a Double.
+func sizeDoubleValue(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+ return tagsize + protowire.SizeFixed64()
+}
+
+// appendDoubleValue encodes a float64 value as a Double.
+func appendDoubleValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendFixed64(b, math.Float64bits(v.Float()))
+ return b, nil
+}
+
+// consumeDoubleValue decodes a float64 value as a Double.
+func consumeDoubleValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ if wtyp != protowire.Fixed64Type {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ out.n = n
+ return protoreflect.ValueOfFloat64(math.Float64frombits(v)), out, nil
+}
+
+var coderDoubleValue = valueCoderFuncs{
+ size: sizeDoubleValue,
+ marshal: appendDoubleValue,
+ unmarshal: consumeDoubleValue,
+ merge: mergeScalarValue,
+}
+
+// sizeDoubleSliceValue returns the size of wire encoding a []float64 value as a repeated Double.
+func sizeDoubleSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ size = list.Len() * (tagsize + protowire.SizeFixed64())
+ return size
+}
+
+// appendDoubleSliceValue encodes a []float64 value as a repeated Double.
+func appendDoubleSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendFixed64(b, math.Float64bits(v.Float()))
+ }
+ return b, nil
+}
+
+// consumeDoubleSliceValue wire decodes a []float64 value as a repeated Double.
+func consumeDoubleSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ list := listv.List()
+ if wtyp == protowire.BytesType {
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ for len(b) > 0 {
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v)))
+ b = b[n:]
+ }
+ out.n = n
+ return listv, out, nil
+ }
+ if wtyp != protowire.Fixed64Type {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v)))
+ out.n = n
+ return listv, out, nil
+}
+
+var coderDoubleSliceValue = valueCoderFuncs{
+ size: sizeDoubleSliceValue,
+ marshal: appendDoubleSliceValue,
+ unmarshal: consumeDoubleSliceValue,
+ merge: mergeListValue,
+}
+
+// sizeDoublePackedSliceValue returns the size of wire encoding a []float64 value as a packed repeated Double.
+func sizeDoublePackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return 0
+ }
+ n := llen * protowire.SizeFixed64()
+ return tagsize + protowire.SizeBytes(n)
+}
+
+// appendDoublePackedSliceValue encodes a []float64 value as a packed repeated Double.
+func appendDoublePackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, wiretag)
+ n := llen * protowire.SizeFixed64()
+ b = protowire.AppendVarint(b, uint64(n))
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendFixed64(b, math.Float64bits(v.Float()))
+ }
+ return b, nil
+}
+
+var coderDoublePackedSliceValue = valueCoderFuncs{
+ size: sizeDoublePackedSliceValue,
+ marshal: appendDoublePackedSliceValue,
+ unmarshal: consumeDoubleSliceValue,
+ merge: mergeListValue,
+}
+
+// sizeString returns the size of wire encoding a string pointer as a String.
+func sizeString(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := *p.String()
+ return f.tagsize + protowire.SizeBytes(len(v))
+}
+
+// appendString wire encodes a string pointer as a String.
+func appendString(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.String()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendString(b, v)
+ return b, nil
+}
+
+// consumeString wire decodes a string pointer as a String.
+func consumeString(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeString(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *p.String() = v
+ out.n = n
+ return out, nil
+}
+
+var coderString = pointerCoderFuncs{
+ size: sizeString,
+ marshal: appendString,
+ unmarshal: consumeString,
+ merge: mergeString,
+}
+
+// appendStringValidateUTF8 wire encodes a string pointer as a String.
+func appendStringValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.String()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendString(b, v)
+ if !utf8.ValidString(v) {
+ return b, errInvalidUTF8{}
+ }
+ return b, nil
+}
+
+// consumeStringValidateUTF8 wire decodes a string pointer as a String.
+func consumeStringValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeString(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ if !utf8.ValidString(v) {
+ return out, errInvalidUTF8{}
+ }
+ *p.String() = v
+ out.n = n
+ return out, nil
+}
+
+var coderStringValidateUTF8 = pointerCoderFuncs{
+ size: sizeString,
+ marshal: appendStringValidateUTF8,
+ unmarshal: consumeStringValidateUTF8,
+ merge: mergeString,
+}
+
+// sizeStringNoZero returns the size of wire encoding a string pointer as a String.
+// The zero value is not encoded.
+func sizeStringNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := *p.String()
+ if len(v) == 0 {
+ return 0
+ }
+ return f.tagsize + protowire.SizeBytes(len(v))
+}
+
+// appendStringNoZero wire encodes a string pointer as a String.
+// The zero value is not encoded.
+func appendStringNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.String()
+ if len(v) == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendString(b, v)
+ return b, nil
+}
+
+var coderStringNoZero = pointerCoderFuncs{
+ size: sizeStringNoZero,
+ marshal: appendStringNoZero,
+ unmarshal: consumeString,
+ merge: mergeStringNoZero,
+}
+
+// appendStringNoZeroValidateUTF8 wire encodes a string pointer as a String.
+// The zero value is not encoded.
+func appendStringNoZeroValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.String()
+ if len(v) == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendString(b, v)
+ if !utf8.ValidString(v) {
+ return b, errInvalidUTF8{}
+ }
+ return b, nil
+}
+
+var coderStringNoZeroValidateUTF8 = pointerCoderFuncs{
+ size: sizeStringNoZero,
+ marshal: appendStringNoZeroValidateUTF8,
+ unmarshal: consumeStringValidateUTF8,
+ merge: mergeStringNoZero,
+}
+
+// sizeStringPtr returns the size of wire encoding a *string pointer as a String.
+// It panics if the pointer is nil.
+func sizeStringPtr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := **p.StringPtr()
+ return f.tagsize + protowire.SizeBytes(len(v))
+}
+
+// appendStringPtr wire encodes a *string pointer as a String.
+// It panics if the pointer is nil.
+func appendStringPtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := **p.StringPtr()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendString(b, v)
+ return b, nil
+}
+
+// consumeStringPtr wire decodes a *string pointer as a String.
+func consumeStringPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeString(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ vp := p.StringPtr()
+ if *vp == nil {
+ *vp = new(string)
+ }
+ **vp = v
+ out.n = n
+ return out, nil
+}
+
+var coderStringPtr = pointerCoderFuncs{
+ size: sizeStringPtr,
+ marshal: appendStringPtr,
+ unmarshal: consumeStringPtr,
+ merge: mergeStringPtr,
+}
+
+// appendStringPtrValidateUTF8 wire encodes a *string pointer as a String.
+// It panics if the pointer is nil.
+func appendStringPtrValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := **p.StringPtr()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendString(b, v)
+ if !utf8.ValidString(v) {
+ return b, errInvalidUTF8{}
+ }
+ return b, nil
+}
+
+// consumeStringPtrValidateUTF8 wire decodes a *string pointer as a String.
+func consumeStringPtrValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeString(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ if !utf8.ValidString(v) {
+ return out, errInvalidUTF8{}
+ }
+ vp := p.StringPtr()
+ if *vp == nil {
+ *vp = new(string)
+ }
+ **vp = v
+ out.n = n
+ return out, nil
+}
+
+var coderStringPtrValidateUTF8 = pointerCoderFuncs{
+ size: sizeStringPtr,
+ marshal: appendStringPtrValidateUTF8,
+ unmarshal: consumeStringPtrValidateUTF8,
+ merge: mergeStringPtr,
+}
+
+// sizeStringSlice returns the size of wire encoding a []string pointer as a repeated String.
+func sizeStringSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ s := *p.StringSlice()
+ for _, v := range s {
+ size += f.tagsize + protowire.SizeBytes(len(v))
+ }
+ return size
+}
+
+// appendStringSlice encodes a []string pointer as a repeated String.
+func appendStringSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.StringSlice()
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendString(b, v)
+ }
+ return b, nil
+}
+
+// consumeStringSlice wire decodes a []string pointer as a repeated String.
+func consumeStringSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ sp := p.StringSlice()
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeString(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *sp = append(*sp, v)
+ out.n = n
+ return out, nil
+}
+
+var coderStringSlice = pointerCoderFuncs{
+ size: sizeStringSlice,
+ marshal: appendStringSlice,
+ unmarshal: consumeStringSlice,
+ merge: mergeStringSlice,
+}
+
+// appendStringSliceValidateUTF8 encodes a []string pointer as a repeated String.
+func appendStringSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.StringSlice()
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendString(b, v)
+ if !utf8.ValidString(v) {
+ return b, errInvalidUTF8{}
+ }
+ }
+ return b, nil
+}
+
+// consumeStringSliceValidateUTF8 wire decodes a []string pointer as a repeated String.
+func consumeStringSliceValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ sp := p.StringSlice()
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeString(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ if !utf8.ValidString(v) {
+ return out, errInvalidUTF8{}
+ }
+ *sp = append(*sp, v)
+ out.n = n
+ return out, nil
+}
+
+var coderStringSliceValidateUTF8 = pointerCoderFuncs{
+ size: sizeStringSlice,
+ marshal: appendStringSliceValidateUTF8,
+ unmarshal: consumeStringSliceValidateUTF8,
+ merge: mergeStringSlice,
+}
+
+// sizeStringValue returns the size of wire encoding a string value as a String.
+func sizeStringValue(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+ return tagsize + protowire.SizeBytes(len(v.String()))
+}
+
+// appendStringValue encodes a string value as a String.
+func appendStringValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendString(b, v.String())
+ return b, nil
+}
+
+// consumeStringValue decodes a string value as a String.
+func consumeStringValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ v, n := protowire.ConsumeString(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ out.n = n
+ return protoreflect.ValueOfString(string(v)), out, nil
+}
+
+var coderStringValue = valueCoderFuncs{
+ size: sizeStringValue,
+ marshal: appendStringValue,
+ unmarshal: consumeStringValue,
+ merge: mergeScalarValue,
+}
+
+// appendStringValueValidateUTF8 encodes a string value as a String.
+func appendStringValueValidateUTF8(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendString(b, v.String())
+ if !utf8.ValidString(v.String()) {
+ return b, errInvalidUTF8{}
+ }
+ return b, nil
+}
+
+// consumeStringValueValidateUTF8 decodes a string value as a String.
+func consumeStringValueValidateUTF8(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ v, n := protowire.ConsumeString(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ if !utf8.ValidString(v) {
+ return protoreflect.Value{}, out, errInvalidUTF8{}
+ }
+ out.n = n
+ return protoreflect.ValueOfString(string(v)), out, nil
+}
+
+var coderStringValueValidateUTF8 = valueCoderFuncs{
+ size: sizeStringValue,
+ marshal: appendStringValueValidateUTF8,
+ unmarshal: consumeStringValueValidateUTF8,
+ merge: mergeScalarValue,
+}
+
+// sizeStringSliceValue returns the size of wire encoding a []string value as a repeated String.
+func sizeStringSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ size += tagsize + protowire.SizeBytes(len(v.String()))
+ }
+ return size
+}
+
+// appendStringSliceValue encodes a []string value as a repeated String.
+func appendStringSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendString(b, v.String())
+ }
+ return b, nil
+}
+
+// consumeStringSliceValue wire decodes a []string value as a repeated String.
+func consumeStringSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ list := listv.List()
+ if wtyp != protowire.BytesType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ v, n := protowire.ConsumeString(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfString(string(v)))
+ out.n = n
+ return listv, out, nil
+}
+
+var coderStringSliceValue = valueCoderFuncs{
+ size: sizeStringSliceValue,
+ marshal: appendStringSliceValue,
+ unmarshal: consumeStringSliceValue,
+ merge: mergeListValue,
+}
+
+// sizeBytes returns the size of wire encoding a []byte pointer as a Bytes.
+func sizeBytes(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := *p.Bytes()
+ return f.tagsize + protowire.SizeBytes(len(v))
+}
+
+// appendBytes wire encodes a []byte pointer as a Bytes.
+func appendBytes(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Bytes()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendBytes(b, v)
+ return b, nil
+}
+
+// consumeBytes wire decodes a []byte pointer as a Bytes.
+func consumeBytes(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *p.Bytes() = append(emptyBuf[:], v...)
+ out.n = n
+ return out, nil
+}
+
+var coderBytes = pointerCoderFuncs{
+ size: sizeBytes,
+ marshal: appendBytes,
+ unmarshal: consumeBytes,
+ merge: mergeBytes,
+}
+
+// appendBytesValidateUTF8 wire encodes a []byte pointer as a Bytes.
+func appendBytesValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Bytes()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendBytes(b, v)
+ if !utf8.Valid(v) {
+ return b, errInvalidUTF8{}
+ }
+ return b, nil
+}
+
+// consumeBytesValidateUTF8 wire decodes a []byte pointer as a Bytes.
+func consumeBytesValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ if !utf8.Valid(v) {
+ return out, errInvalidUTF8{}
+ }
+ *p.Bytes() = append(emptyBuf[:], v...)
+ out.n = n
+ return out, nil
+}
+
+var coderBytesValidateUTF8 = pointerCoderFuncs{
+ size: sizeBytes,
+ marshal: appendBytesValidateUTF8,
+ unmarshal: consumeBytesValidateUTF8,
+ merge: mergeBytes,
+}
+
+// sizeBytesNoZero returns the size of wire encoding a []byte pointer as a Bytes.
+// The zero value is not encoded.
+func sizeBytesNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := *p.Bytes()
+ if len(v) == 0 {
+ return 0
+ }
+ return f.tagsize + protowire.SizeBytes(len(v))
+}
+
+// appendBytesNoZero wire encodes a []byte pointer as a Bytes.
+// The zero value is not encoded.
+func appendBytesNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Bytes()
+ if len(v) == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendBytes(b, v)
+ return b, nil
+}
+
+// consumeBytesNoZero wire decodes a []byte pointer as a Bytes.
+// The zero value is not decoded.
+func consumeBytesNoZero(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *p.Bytes() = append(([]byte)(nil), v...)
+ out.n = n
+ return out, nil
+}
+
+var coderBytesNoZero = pointerCoderFuncs{
+ size: sizeBytesNoZero,
+ marshal: appendBytesNoZero,
+ unmarshal: consumeBytesNoZero,
+ merge: mergeBytesNoZero,
+}
+
+// appendBytesNoZeroValidateUTF8 wire encodes a []byte pointer as a Bytes.
+// The zero value is not encoded.
+func appendBytesNoZeroValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Bytes()
+ if len(v) == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendBytes(b, v)
+ if !utf8.Valid(v) {
+ return b, errInvalidUTF8{}
+ }
+ return b, nil
+}
+
+// consumeBytesNoZeroValidateUTF8 wire decodes a []byte pointer as a Bytes.
+func consumeBytesNoZeroValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ if !utf8.Valid(v) {
+ return out, errInvalidUTF8{}
+ }
+ *p.Bytes() = append(([]byte)(nil), v...)
+ out.n = n
+ return out, nil
+}
+
+var coderBytesNoZeroValidateUTF8 = pointerCoderFuncs{
+ size: sizeBytesNoZero,
+ marshal: appendBytesNoZeroValidateUTF8,
+ unmarshal: consumeBytesNoZeroValidateUTF8,
+ merge: mergeBytesNoZero,
+}
+
+// sizeBytesSlice returns the size of wire encoding a [][]byte pointer as a repeated Bytes.
+func sizeBytesSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ s := *p.BytesSlice()
+ for _, v := range s {
+ size += f.tagsize + protowire.SizeBytes(len(v))
+ }
+ return size
+}
+
+// appendBytesSlice encodes a [][]byte pointer as a repeated Bytes.
+func appendBytesSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.BytesSlice()
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendBytes(b, v)
+ }
+ return b, nil
+}
+
+// consumeBytesSlice wire decodes a [][]byte pointer as a repeated Bytes.
+func consumeBytesSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ sp := p.BytesSlice()
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *sp = append(*sp, append(emptyBuf[:], v...))
+ out.n = n
+ return out, nil
+}
+
+var coderBytesSlice = pointerCoderFuncs{
+ size: sizeBytesSlice,
+ marshal: appendBytesSlice,
+ unmarshal: consumeBytesSlice,
+ merge: mergeBytesSlice,
+}
+
+// appendBytesSliceValidateUTF8 encodes a [][]byte pointer as a repeated Bytes.
+func appendBytesSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.BytesSlice()
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendBytes(b, v)
+ if !utf8.Valid(v) {
+ return b, errInvalidUTF8{}
+ }
+ }
+ return b, nil
+}
+
+// consumeBytesSliceValidateUTF8 wire decodes a [][]byte pointer as a repeated Bytes.
+func consumeBytesSliceValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ sp := p.BytesSlice()
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ if !utf8.Valid(v) {
+ return out, errInvalidUTF8{}
+ }
+ *sp = append(*sp, append(emptyBuf[:], v...))
+ out.n = n
+ return out, nil
+}
+
+var coderBytesSliceValidateUTF8 = pointerCoderFuncs{
+ size: sizeBytesSlice,
+ marshal: appendBytesSliceValidateUTF8,
+ unmarshal: consumeBytesSliceValidateUTF8,
+ merge: mergeBytesSlice,
+}
+
+// sizeBytesValue returns the size of wire encoding a []byte value as a Bytes.
+func sizeBytesValue(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+ return tagsize + protowire.SizeBytes(len(v.Bytes()))
+}
+
+// appendBytesValue encodes a []byte value as a Bytes.
+func appendBytesValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendBytes(b, v.Bytes())
+ return b, nil
+}
+
+// consumeBytesValue decodes a []byte value as a Bytes.
+func consumeBytesValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ out.n = n
+ return protoreflect.ValueOfBytes(append(emptyBuf[:], v...)), out, nil
+}
+
+var coderBytesValue = valueCoderFuncs{
+ size: sizeBytesValue,
+ marshal: appendBytesValue,
+ unmarshal: consumeBytesValue,
+ merge: mergeBytesValue,
+}
+
+// sizeBytesSliceValue returns the size of wire encoding a [][]byte value as a repeated Bytes.
+func sizeBytesSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ size += tagsize + protowire.SizeBytes(len(v.Bytes()))
+ }
+ return size
+}
+
+// appendBytesSliceValue encodes a [][]byte value as a repeated Bytes.
+func appendBytesSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendBytes(b, v.Bytes())
+ }
+ return b, nil
+}
+
+// consumeBytesSliceValue wire decodes a [][]byte value as a repeated Bytes.
+func consumeBytesSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ list := listv.List()
+ if wtyp != protowire.BytesType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfBytes(append(emptyBuf[:], v...)))
+ out.n = n
+ return listv, out, nil
+}
+
+var coderBytesSliceValue = valueCoderFuncs{
+ size: sizeBytesSliceValue,
+ marshal: appendBytesSliceValue,
+ unmarshal: consumeBytesSliceValue,
+ merge: mergeBytesListValue,
+}
+
+// We append to an empty array rather than a nil []byte to get non-nil zero-length byte slices.
+var emptyBuf [0]byte
+
+var wireTypes = map[protoreflect.Kind]protowire.Type{
+ protoreflect.BoolKind: protowire.VarintType,
+ protoreflect.EnumKind: protowire.VarintType,
+ protoreflect.Int32Kind: protowire.VarintType,
+ protoreflect.Sint32Kind: protowire.VarintType,
+ protoreflect.Uint32Kind: protowire.VarintType,
+ protoreflect.Int64Kind: protowire.VarintType,
+ protoreflect.Sint64Kind: protowire.VarintType,
+ protoreflect.Uint64Kind: protowire.VarintType,
+ protoreflect.Sfixed32Kind: protowire.Fixed32Type,
+ protoreflect.Fixed32Kind: protowire.Fixed32Type,
+ protoreflect.FloatKind: protowire.Fixed32Type,
+ protoreflect.Sfixed64Kind: protowire.Fixed64Type,
+ protoreflect.Fixed64Kind: protowire.Fixed64Type,
+ protoreflect.DoubleKind: protowire.Fixed64Type,
+ protoreflect.StringKind: protowire.BytesType,
+ protoreflect.BytesKind: protowire.BytesType,
+ protoreflect.MessageKind: protowire.BytesType,
+ protoreflect.GroupKind: protowire.StartGroupType,
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go
new file mode 100644
index 000000000..35a67c25b
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go
@@ -0,0 +1,388 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "errors"
+ "reflect"
+ "sort"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+type mapInfo struct {
+ goType reflect.Type
+ keyWiretag uint64
+ valWiretag uint64
+ keyFuncs valueCoderFuncs
+ valFuncs valueCoderFuncs
+ keyZero pref.Value
+ keyKind pref.Kind
+ conv *mapConverter
+}
+
+func encoderFuncsForMap(fd pref.FieldDescriptor, ft reflect.Type) (valueMessage *MessageInfo, funcs pointerCoderFuncs) {
+ // TODO: Consider generating specialized map coders.
+ keyField := fd.MapKey()
+ valField := fd.MapValue()
+ keyWiretag := protowire.EncodeTag(1, wireTypes[keyField.Kind()])
+ valWiretag := protowire.EncodeTag(2, wireTypes[valField.Kind()])
+ keyFuncs := encoderFuncsForValue(keyField)
+ valFuncs := encoderFuncsForValue(valField)
+ conv := newMapConverter(ft, fd)
+
+ mapi := &mapInfo{
+ goType: ft,
+ keyWiretag: keyWiretag,
+ valWiretag: valWiretag,
+ keyFuncs: keyFuncs,
+ valFuncs: valFuncs,
+ keyZero: keyField.Default(),
+ keyKind: keyField.Kind(),
+ conv: conv,
+ }
+ if valField.Kind() == pref.MessageKind {
+ valueMessage = getMessageInfo(ft.Elem())
+ }
+
+ funcs = pointerCoderFuncs{
+ size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int {
+ return sizeMap(p.AsValueOf(ft).Elem(), mapi, f, opts)
+ },
+ marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ return appendMap(b, p.AsValueOf(ft).Elem(), mapi, f, opts)
+ },
+ unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) {
+ mp := p.AsValueOf(ft)
+ if mp.Elem().IsNil() {
+ mp.Elem().Set(reflect.MakeMap(mapi.goType))
+ }
+ if f.mi == nil {
+ return consumeMap(b, mp.Elem(), wtyp, mapi, f, opts)
+ } else {
+ return consumeMapOfMessage(b, mp.Elem(), wtyp, mapi, f, opts)
+ }
+ },
+ }
+ switch valField.Kind() {
+ case pref.MessageKind:
+ funcs.merge = mergeMapOfMessage
+ case pref.BytesKind:
+ funcs.merge = mergeMapOfBytes
+ default:
+ funcs.merge = mergeMap
+ }
+ if valFuncs.isInit != nil {
+ funcs.isInit = func(p pointer, f *coderFieldInfo) error {
+ return isInitMap(p.AsValueOf(ft).Elem(), mapi, f)
+ }
+ }
+ return valueMessage, funcs
+}
+
+const (
+ mapKeyTagSize = 1 // field 1, tag size 1.
+ mapValTagSize = 1 // field 2, tag size 2.
+)
+
+func sizeMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalOptions) int {
+ if mapv.Len() == 0 {
+ return 0
+ }
+ n := 0
+ iter := mapRange(mapv)
+ for iter.Next() {
+ key := mapi.conv.keyConv.PBValueOf(iter.Key()).MapKey()
+ keySize := mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts)
+ var valSize int
+ value := mapi.conv.valConv.PBValueOf(iter.Value())
+ if f.mi == nil {
+ valSize = mapi.valFuncs.size(value, mapValTagSize, opts)
+ } else {
+ p := pointerOfValue(iter.Value())
+ valSize += mapValTagSize
+ valSize += protowire.SizeBytes(f.mi.sizePointer(p, opts))
+ }
+ n += f.tagsize + protowire.SizeBytes(keySize+valSize)
+ }
+ return n
+}
+
+func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ var (
+ key = mapi.keyZero
+ val = mapi.conv.valConv.New()
+ )
+ for len(b) > 0 {
+ num, wtyp, n := protowire.ConsumeTag(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ if num > protowire.MaxValidNumber {
+ return out, errors.New("invalid field number")
+ }
+ b = b[n:]
+ err := errUnknown
+ switch num {
+ case 1:
+ var v pref.Value
+ var o unmarshalOutput
+ v, o, err = mapi.keyFuncs.unmarshal(b, key, num, wtyp, opts)
+ if err != nil {
+ break
+ }
+ key = v
+ n = o.n
+ case 2:
+ var v pref.Value
+ var o unmarshalOutput
+ v, o, err = mapi.valFuncs.unmarshal(b, val, num, wtyp, opts)
+ if err != nil {
+ break
+ }
+ val = v
+ n = o.n
+ }
+ if err == errUnknown {
+ n = protowire.ConsumeFieldValue(num, wtyp, b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ } else if err != nil {
+ return out, err
+ }
+ b = b[n:]
+ }
+ mapv.SetMapIndex(mapi.conv.keyConv.GoValueOf(key), mapi.conv.valConv.GoValueOf(val))
+ out.n = n
+ return out, nil
+}
+
+func consumeMapOfMessage(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ var (
+ key = mapi.keyZero
+ val = reflect.New(f.mi.GoReflectType.Elem())
+ )
+ for len(b) > 0 {
+ num, wtyp, n := protowire.ConsumeTag(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ if num > protowire.MaxValidNumber {
+ return out, errors.New("invalid field number")
+ }
+ b = b[n:]
+ err := errUnknown
+ switch num {
+ case 1:
+ var v pref.Value
+ var o unmarshalOutput
+ v, o, err = mapi.keyFuncs.unmarshal(b, key, num, wtyp, opts)
+ if err != nil {
+ break
+ }
+ key = v
+ n = o.n
+ case 2:
+ if wtyp != protowire.BytesType {
+ break
+ }
+ var v []byte
+ v, n = protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ var o unmarshalOutput
+ o, err = f.mi.unmarshalPointer(v, pointerOfValue(val), 0, opts)
+ if o.initialized {
+ // Consider this map item initialized so long as we see
+ // an initialized value.
+ out.initialized = true
+ }
+ }
+ if err == errUnknown {
+ n = protowire.ConsumeFieldValue(num, wtyp, b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ } else if err != nil {
+ return out, err
+ }
+ b = b[n:]
+ }
+ mapv.SetMapIndex(mapi.conv.keyConv.GoValueOf(key), val)
+ out.n = n
+ return out, nil
+}
+
+func appendMapItem(b []byte, keyrv, valrv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ if f.mi == nil {
+ key := mapi.conv.keyConv.PBValueOf(keyrv).MapKey()
+ val := mapi.conv.valConv.PBValueOf(valrv)
+ size := 0
+ size += mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts)
+ size += mapi.valFuncs.size(val, mapValTagSize, opts)
+ b = protowire.AppendVarint(b, uint64(size))
+ b, err := mapi.keyFuncs.marshal(b, key.Value(), mapi.keyWiretag, opts)
+ if err != nil {
+ return nil, err
+ }
+ return mapi.valFuncs.marshal(b, val, mapi.valWiretag, opts)
+ } else {
+ key := mapi.conv.keyConv.PBValueOf(keyrv).MapKey()
+ val := pointerOfValue(valrv)
+ valSize := f.mi.sizePointer(val, opts)
+ size := 0
+ size += mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts)
+ size += mapValTagSize + protowire.SizeBytes(valSize)
+ b = protowire.AppendVarint(b, uint64(size))
+ b, err := mapi.keyFuncs.marshal(b, key.Value(), mapi.keyWiretag, opts)
+ if err != nil {
+ return nil, err
+ }
+ b = protowire.AppendVarint(b, mapi.valWiretag)
+ b = protowire.AppendVarint(b, uint64(valSize))
+ return f.mi.marshalAppendPointer(b, val, opts)
+ }
+}
+
+func appendMap(b []byte, mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ if mapv.Len() == 0 {
+ return b, nil
+ }
+ if opts.Deterministic() {
+ return appendMapDeterministic(b, mapv, mapi, f, opts)
+ }
+ iter := mapRange(mapv)
+ for iter.Next() {
+ var err error
+ b = protowire.AppendVarint(b, f.wiretag)
+ b, err = appendMapItem(b, iter.Key(), iter.Value(), mapi, f, opts)
+ if err != nil {
+ return b, err
+ }
+ }
+ return b, nil
+}
+
+func appendMapDeterministic(b []byte, mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ keys := mapv.MapKeys()
+ sort.Slice(keys, func(i, j int) bool {
+ switch keys[i].Kind() {
+ case reflect.Bool:
+ return !keys[i].Bool() && keys[j].Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return keys[i].Int() < keys[j].Int()
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return keys[i].Uint() < keys[j].Uint()
+ case reflect.Float32, reflect.Float64:
+ return keys[i].Float() < keys[j].Float()
+ case reflect.String:
+ return keys[i].String() < keys[j].String()
+ default:
+ panic("invalid kind: " + keys[i].Kind().String())
+ }
+ })
+ for _, key := range keys {
+ var err error
+ b = protowire.AppendVarint(b, f.wiretag)
+ b, err = appendMapItem(b, key, mapv.MapIndex(key), mapi, f, opts)
+ if err != nil {
+ return b, err
+ }
+ }
+ return b, nil
+}
+
+func isInitMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo) error {
+ if mi := f.mi; mi != nil {
+ mi.init()
+ if !mi.needsInitCheck {
+ return nil
+ }
+ iter := mapRange(mapv)
+ for iter.Next() {
+ val := pointerOfValue(iter.Value())
+ if err := mi.checkInitializedPointer(val); err != nil {
+ return err
+ }
+ }
+ } else {
+ iter := mapRange(mapv)
+ for iter.Next() {
+ val := mapi.conv.valConv.PBValueOf(iter.Value())
+ if err := mapi.valFuncs.isInit(val); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func mergeMap(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
+ dstm := dst.AsValueOf(f.ft).Elem()
+ srcm := src.AsValueOf(f.ft).Elem()
+ if srcm.Len() == 0 {
+ return
+ }
+ if dstm.IsNil() {
+ dstm.Set(reflect.MakeMap(f.ft))
+ }
+ iter := mapRange(srcm)
+ for iter.Next() {
+ dstm.SetMapIndex(iter.Key(), iter.Value())
+ }
+}
+
+func mergeMapOfBytes(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
+ dstm := dst.AsValueOf(f.ft).Elem()
+ srcm := src.AsValueOf(f.ft).Elem()
+ if srcm.Len() == 0 {
+ return
+ }
+ if dstm.IsNil() {
+ dstm.Set(reflect.MakeMap(f.ft))
+ }
+ iter := mapRange(srcm)
+ for iter.Next() {
+ dstm.SetMapIndex(iter.Key(), reflect.ValueOf(append(emptyBuf[:], iter.Value().Bytes()...)))
+ }
+}
+
+func mergeMapOfMessage(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
+ dstm := dst.AsValueOf(f.ft).Elem()
+ srcm := src.AsValueOf(f.ft).Elem()
+ if srcm.Len() == 0 {
+ return
+ }
+ if dstm.IsNil() {
+ dstm.Set(reflect.MakeMap(f.ft))
+ }
+ iter := mapRange(srcm)
+ for iter.Next() {
+ val := reflect.New(f.ft.Elem().Elem())
+ if f.mi != nil {
+ f.mi.mergePointer(pointerOfValue(val), pointerOfValue(iter.Value()), opts)
+ } else {
+ opts.Merge(asMessage(val), asMessage(iter.Value()))
+ }
+ dstm.SetMapIndex(iter.Key(), val)
+ }
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go
new file mode 100644
index 000000000..2706bb67f
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go
@@ -0,0 +1,37 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.12
+
+package impl
+
+import "reflect"
+
+type mapIter struct {
+ v reflect.Value
+ keys []reflect.Value
+}
+
+// mapRange provides a less-efficient equivalent to
+// the Go 1.12 reflect.Value.MapRange method.
+func mapRange(v reflect.Value) *mapIter {
+ return &mapIter{v: v}
+}
+
+func (i *mapIter) Next() bool {
+ if i.keys == nil {
+ i.keys = i.v.MapKeys()
+ } else {
+ i.keys = i.keys[1:]
+ }
+ return len(i.keys) > 0
+}
+
+func (i *mapIter) Key() reflect.Value {
+ return i.keys[0]
+}
+
+func (i *mapIter) Value() reflect.Value {
+ return i.v.MapIndex(i.keys[0])
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go
new file mode 100644
index 000000000..1533ef600
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go
@@ -0,0 +1,11 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.12
+
+package impl
+
+import "reflect"
+
+func mapRange(v reflect.Value) *reflect.MapIter { return v.MapRange() }
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go
new file mode 100644
index 000000000..0e176d565
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go
@@ -0,0 +1,159 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "fmt"
+ "reflect"
+ "sort"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/encoding/messageset"
+ "google.golang.org/protobuf/internal/fieldsort"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+ piface "google.golang.org/protobuf/runtime/protoiface"
+)
+
+// coderMessageInfo contains per-message information used by the fast-path functions.
+// This is a different type from MessageInfo to keep MessageInfo as general-purpose as
+// possible.
+type coderMessageInfo struct {
+ methods piface.Methods
+
+ orderedCoderFields []*coderFieldInfo
+ denseCoderFields []*coderFieldInfo
+ coderFields map[protowire.Number]*coderFieldInfo
+ sizecacheOffset offset
+ unknownOffset offset
+ extensionOffset offset
+ needsInitCheck bool
+ isMessageSet bool
+ numRequiredFields uint8
+}
+
+type coderFieldInfo struct {
+ funcs pointerCoderFuncs // fast-path per-field functions
+ mi *MessageInfo // field's message
+ ft reflect.Type
+ validation validationInfo // information used by message validation
+ num pref.FieldNumber // field number
+ offset offset // struct field offset
+ wiretag uint64 // field tag (number + wire type)
+ tagsize int // size of the varint-encoded tag
+ isPointer bool // true if IsNil may be called on the struct field
+ isRequired bool // true if field is required
+}
+
+func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) {
+ mi.sizecacheOffset = si.sizecacheOffset
+ mi.unknownOffset = si.unknownOffset
+ mi.extensionOffset = si.extensionOffset
+
+ mi.coderFields = make(map[protowire.Number]*coderFieldInfo)
+ fields := mi.Desc.Fields()
+ preallocFields := make([]coderFieldInfo, fields.Len())
+ for i := 0; i < fields.Len(); i++ {
+ fd := fields.Get(i)
+
+ fs := si.fieldsByNumber[fd.Number()]
+ isOneof := fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic()
+ if isOneof {
+ fs = si.oneofsByName[fd.ContainingOneof().Name()]
+ }
+ ft := fs.Type
+ var wiretag uint64
+ if !fd.IsPacked() {
+ wiretag = protowire.EncodeTag(fd.Number(), wireTypes[fd.Kind()])
+ } else {
+ wiretag = protowire.EncodeTag(fd.Number(), protowire.BytesType)
+ }
+ var fieldOffset offset
+ var funcs pointerCoderFuncs
+ var childMessage *MessageInfo
+ switch {
+ case isOneof:
+ fieldOffset = offsetOf(fs, mi.Exporter)
+ case fd.IsWeak():
+ fieldOffset = si.weakOffset
+ funcs = makeWeakMessageFieldCoder(fd)
+ default:
+ fieldOffset = offsetOf(fs, mi.Exporter)
+ childMessage, funcs = fieldCoder(fd, ft)
+ }
+ cf := &preallocFields[i]
+ *cf = coderFieldInfo{
+ num: fd.Number(),
+ offset: fieldOffset,
+ wiretag: wiretag,
+ ft: ft,
+ tagsize: protowire.SizeVarint(wiretag),
+ funcs: funcs,
+ mi: childMessage,
+ validation: newFieldValidationInfo(mi, si, fd, ft),
+ isPointer: fd.Cardinality() == pref.Repeated || fd.HasPresence(),
+ isRequired: fd.Cardinality() == pref.Required,
+ }
+ mi.orderedCoderFields = append(mi.orderedCoderFields, cf)
+ mi.coderFields[cf.num] = cf
+ }
+ for i, oneofs := 0, mi.Desc.Oneofs(); i < oneofs.Len(); i++ {
+ if od := oneofs.Get(i); !od.IsSynthetic() {
+ mi.initOneofFieldCoders(od, si)
+ }
+ }
+ if messageset.IsMessageSet(mi.Desc) {
+ if !mi.extensionOffset.IsValid() {
+ panic(fmt.Sprintf("%v: MessageSet with no extensions field", mi.Desc.FullName()))
+ }
+ if !mi.unknownOffset.IsValid() {
+ panic(fmt.Sprintf("%v: MessageSet with no unknown field", mi.Desc.FullName()))
+ }
+ mi.isMessageSet = true
+ }
+ sort.Slice(mi.orderedCoderFields, func(i, j int) bool {
+ return mi.orderedCoderFields[i].num < mi.orderedCoderFields[j].num
+ })
+
+ var maxDense pref.FieldNumber
+ for _, cf := range mi.orderedCoderFields {
+ if cf.num >= 16 && cf.num >= 2*maxDense {
+ break
+ }
+ maxDense = cf.num
+ }
+ mi.denseCoderFields = make([]*coderFieldInfo, maxDense+1)
+ for _, cf := range mi.orderedCoderFields {
+ if int(cf.num) >= len(mi.denseCoderFields) {
+ break
+ }
+ mi.denseCoderFields[cf.num] = cf
+ }
+
+ // To preserve compatibility with historic wire output, marshal oneofs last.
+ if mi.Desc.Oneofs().Len() > 0 {
+ sort.Slice(mi.orderedCoderFields, func(i, j int) bool {
+ fi := fields.ByNumber(mi.orderedCoderFields[i].num)
+ fj := fields.ByNumber(mi.orderedCoderFields[j].num)
+ return fieldsort.Less(fi, fj)
+ })
+ }
+
+ mi.needsInitCheck = needsInitCheck(mi.Desc)
+ if mi.methods.Marshal == nil && mi.methods.Size == nil {
+ mi.methods.Flags |= piface.SupportMarshalDeterministic
+ mi.methods.Marshal = mi.marshal
+ mi.methods.Size = mi.size
+ }
+ if mi.methods.Unmarshal == nil {
+ mi.methods.Flags |= piface.SupportUnmarshalDiscardUnknown
+ mi.methods.Unmarshal = mi.unmarshal
+ }
+ if mi.methods.CheckInitialized == nil {
+ mi.methods.CheckInitialized = mi.checkInitialized
+ }
+ if mi.methods.Merge == nil {
+ mi.methods.Merge = mi.merge
+ }
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go
new file mode 100644
index 000000000..cfb68e12f
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go
@@ -0,0 +1,120 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "sort"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/encoding/messageset"
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/internal/flags"
+)
+
+func sizeMessageSet(mi *MessageInfo, p pointer, opts marshalOptions) (size int) {
+ if !flags.ProtoLegacy {
+ return 0
+ }
+
+ ext := *p.Apply(mi.extensionOffset).Extensions()
+ for _, x := range ext {
+ xi := getExtensionFieldInfo(x.Type())
+ if xi.funcs.size == nil {
+ continue
+ }
+ num, _ := protowire.DecodeTag(xi.wiretag)
+ size += messageset.SizeField(num)
+ size += xi.funcs.size(x.Value(), protowire.SizeTag(messageset.FieldMessage), opts)
+ }
+
+ unknown := *p.Apply(mi.unknownOffset).Bytes()
+ size += messageset.SizeUnknown(unknown)
+
+ return size
+}
+
+func marshalMessageSet(mi *MessageInfo, b []byte, p pointer, opts marshalOptions) ([]byte, error) {
+ if !flags.ProtoLegacy {
+ return b, errors.New("no support for message_set_wire_format")
+ }
+
+ ext := *p.Apply(mi.extensionOffset).Extensions()
+ switch len(ext) {
+ case 0:
+ case 1:
+ // Fast-path for one extension: Don't bother sorting the keys.
+ for _, x := range ext {
+ var err error
+ b, err = marshalMessageSetField(mi, b, x, opts)
+ if err != nil {
+ return b, err
+ }
+ }
+ default:
+ // Sort the keys to provide a deterministic encoding.
+ // Not sure this is required, but the old code does it.
+ keys := make([]int, 0, len(ext))
+ for k := range ext {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+ for _, k := range keys {
+ var err error
+ b, err = marshalMessageSetField(mi, b, ext[int32(k)], opts)
+ if err != nil {
+ return b, err
+ }
+ }
+ }
+
+ unknown := *p.Apply(mi.unknownOffset).Bytes()
+ b, err := messageset.AppendUnknown(b, unknown)
+ if err != nil {
+ return b, err
+ }
+
+ return b, nil
+}
+
+func marshalMessageSetField(mi *MessageInfo, b []byte, x ExtensionField, opts marshalOptions) ([]byte, error) {
+ xi := getExtensionFieldInfo(x.Type())
+ num, _ := protowire.DecodeTag(xi.wiretag)
+ b = messageset.AppendFieldStart(b, num)
+ b, err := xi.funcs.marshal(b, x.Value(), protowire.EncodeTag(messageset.FieldMessage, protowire.BytesType), opts)
+ if err != nil {
+ return b, err
+ }
+ b = messageset.AppendFieldEnd(b)
+ return b, nil
+}
+
+func unmarshalMessageSet(mi *MessageInfo, b []byte, p pointer, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if !flags.ProtoLegacy {
+ return out, errors.New("no support for message_set_wire_format")
+ }
+
+ ep := p.Apply(mi.extensionOffset).Extensions()
+ if *ep == nil {
+ *ep = make(map[int32]ExtensionField)
+ }
+ ext := *ep
+ unknown := p.Apply(mi.unknownOffset).Bytes()
+ initialized := true
+ err = messageset.Unmarshal(b, true, func(num protowire.Number, v []byte) error {
+ o, err := mi.unmarshalExtension(v, num, protowire.BytesType, ext, opts)
+ if err == errUnknown {
+ *unknown = protowire.AppendTag(*unknown, num, protowire.BytesType)
+ *unknown = append(*unknown, v...)
+ return nil
+ }
+ if !o.initialized {
+ initialized = false
+ }
+ return err
+ })
+ out.n = len(b)
+ out.initialized = initialized
+ return out, err
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go
new file mode 100644
index 000000000..86f7dc3c9
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go
@@ -0,0 +1,209 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build purego appengine
+
+package impl
+
+import (
+ "reflect"
+
+ "google.golang.org/protobuf/encoding/protowire"
+)
+
+func sizeEnum(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := p.v.Elem().Int()
+ return f.tagsize + protowire.SizeVarint(uint64(v))
+}
+
+func appendEnum(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ v := p.v.Elem().Int()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, uint64(v))
+ return b, nil
+}
+
+func consumeEnum(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ p.v.Elem().SetInt(int64(v))
+ out.n = n
+ return out, nil
+}
+
+func mergeEnum(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ dst.v.Elem().Set(src.v.Elem())
+}
+
+var coderEnum = pointerCoderFuncs{
+ size: sizeEnum,
+ marshal: appendEnum,
+ unmarshal: consumeEnum,
+ merge: mergeEnum,
+}
+
+func sizeEnumNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ if p.v.Elem().Int() == 0 {
+ return 0
+ }
+ return sizeEnum(p, f, opts)
+}
+
+func appendEnumNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ if p.v.Elem().Int() == 0 {
+ return b, nil
+ }
+ return appendEnum(b, p, f, opts)
+}
+
+func mergeEnumNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ if src.v.Elem().Int() != 0 {
+ dst.v.Elem().Set(src.v.Elem())
+ }
+}
+
+var coderEnumNoZero = pointerCoderFuncs{
+ size: sizeEnumNoZero,
+ marshal: appendEnumNoZero,
+ unmarshal: consumeEnum,
+ merge: mergeEnumNoZero,
+}
+
+func sizeEnumPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ return sizeEnum(pointer{p.v.Elem()}, f, opts)
+}
+
+func appendEnumPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ return appendEnum(b, pointer{p.v.Elem()}, f, opts)
+}
+
+func consumeEnumPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ if p.v.Elem().IsNil() {
+ p.v.Elem().Set(reflect.New(p.v.Elem().Type().Elem()))
+ }
+ return consumeEnum(b, pointer{p.v.Elem()}, wtyp, f, opts)
+}
+
+func mergeEnumPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ if !src.v.Elem().IsNil() {
+ v := reflect.New(dst.v.Type().Elem().Elem())
+ v.Elem().Set(src.v.Elem().Elem())
+ dst.v.Elem().Set(v)
+ }
+}
+
+var coderEnumPtr = pointerCoderFuncs{
+ size: sizeEnumPtr,
+ marshal: appendEnumPtr,
+ unmarshal: consumeEnumPtr,
+ merge: mergeEnumPtr,
+}
+
+func sizeEnumSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ s := p.v.Elem()
+ for i, llen := 0, s.Len(); i < llen; i++ {
+ size += protowire.SizeVarint(uint64(s.Index(i).Int())) + f.tagsize
+ }
+ return size
+}
+
+func appendEnumSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ s := p.v.Elem()
+ for i, llen := 0, s.Len(); i < llen; i++ {
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, uint64(s.Index(i).Int()))
+ }
+ return b, nil
+}
+
+func consumeEnumSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ s := p.v.Elem()
+ if wtyp == protowire.BytesType {
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ for len(b) > 0 {
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ rv := reflect.New(s.Type().Elem()).Elem()
+ rv.SetInt(int64(v))
+ s.Set(reflect.Append(s, rv))
+ b = b[n:]
+ }
+ out.n = n
+ return out, nil
+ }
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ rv := reflect.New(s.Type().Elem()).Elem()
+ rv.SetInt(int64(v))
+ s.Set(reflect.Append(s, rv))
+ out.n = n
+ return out, nil
+}
+
+func mergeEnumSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ dst.v.Elem().Set(reflect.AppendSlice(dst.v.Elem(), src.v.Elem()))
+}
+
+var coderEnumSlice = pointerCoderFuncs{
+ size: sizeEnumSlice,
+ marshal: appendEnumSlice,
+ unmarshal: consumeEnumSlice,
+ merge: mergeEnumSlice,
+}
+
+func sizeEnumPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ s := p.v.Elem()
+ llen := s.Len()
+ if llen == 0 {
+ return 0
+ }
+ n := 0
+ for i := 0; i < llen; i++ {
+ n += protowire.SizeVarint(uint64(s.Index(i).Int()))
+ }
+ return f.tagsize + protowire.SizeBytes(n)
+}
+
+func appendEnumPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ s := p.v.Elem()
+ llen := s.Len()
+ if llen == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ n := 0
+ for i := 0; i < llen; i++ {
+ n += protowire.SizeVarint(uint64(s.Index(i).Int()))
+ }
+ b = protowire.AppendVarint(b, uint64(n))
+ for i := 0; i < llen; i++ {
+ b = protowire.AppendVarint(b, uint64(s.Index(i).Int()))
+ }
+ return b, nil
+}
+
+var coderEnumPackedSlice = pointerCoderFuncs{
+ size: sizeEnumPackedSlice,
+ marshal: appendEnumPackedSlice,
+ unmarshal: consumeEnumSlice,
+ merge: mergeEnumSlice,
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go b/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go
new file mode 100644
index 000000000..e89971238
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go
@@ -0,0 +1,557 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "fmt"
+ "reflect"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/strs"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// pointerCoderFuncs is a set of pointer encoding functions.
+type pointerCoderFuncs struct {
+ mi *MessageInfo
+ size func(p pointer, f *coderFieldInfo, opts marshalOptions) int
+ marshal func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error)
+ unmarshal func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error)
+ isInit func(p pointer, f *coderFieldInfo) error
+ merge func(dst, src pointer, f *coderFieldInfo, opts mergeOptions)
+}
+
+// valueCoderFuncs is a set of protoreflect.Value encoding functions.
+type valueCoderFuncs struct {
+ size func(v pref.Value, tagsize int, opts marshalOptions) int
+ marshal func(b []byte, v pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error)
+ unmarshal func(b []byte, v pref.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (pref.Value, unmarshalOutput, error)
+ isInit func(v pref.Value) error
+ merge func(dst, src pref.Value, opts mergeOptions) pref.Value
+}
+
+// fieldCoder returns pointer functions for a field, used for operating on
+// struct fields.
+func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointerCoderFuncs) {
+ switch {
+ case fd.IsMap():
+ return encoderFuncsForMap(fd, ft)
+ case fd.Cardinality() == pref.Repeated && !fd.IsPacked():
+ // Repeated fields (not packed).
+ if ft.Kind() != reflect.Slice {
+ break
+ }
+ ft := ft.Elem()
+ switch fd.Kind() {
+ case pref.BoolKind:
+ if ft.Kind() == reflect.Bool {
+ return nil, coderBoolSlice
+ }
+ case pref.EnumKind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderEnumSlice
+ }
+ case pref.Int32Kind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderInt32Slice
+ }
+ case pref.Sint32Kind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderSint32Slice
+ }
+ case pref.Uint32Kind:
+ if ft.Kind() == reflect.Uint32 {
+ return nil, coderUint32Slice
+ }
+ case pref.Int64Kind:
+ if ft.Kind() == reflect.Int64 {
+ return nil, coderInt64Slice
+ }
+ case pref.Sint64Kind:
+ if ft.Kind() == reflect.Int64 {
+ return nil, coderSint64Slice
+ }
+ case pref.Uint64Kind:
+ if ft.Kind() == reflect.Uint64 {
+ return nil, coderUint64Slice
+ }
+ case pref.Sfixed32Kind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderSfixed32Slice
+ }
+ case pref.Fixed32Kind:
+ if ft.Kind() == reflect.Uint32 {
+ return nil, coderFixed32Slice
+ }
+ case pref.FloatKind:
+ if ft.Kind() == reflect.Float32 {
+ return nil, coderFloatSlice
+ }
+ case pref.Sfixed64Kind:
+ if ft.Kind() == reflect.Int64 {
+ return nil, coderSfixed64Slice
+ }
+ case pref.Fixed64Kind:
+ if ft.Kind() == reflect.Uint64 {
+ return nil, coderFixed64Slice
+ }
+ case pref.DoubleKind:
+ if ft.Kind() == reflect.Float64 {
+ return nil, coderDoubleSlice
+ }
+ case pref.StringKind:
+ if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) {
+ return nil, coderStringSliceValidateUTF8
+ }
+ if ft.Kind() == reflect.String {
+ return nil, coderStringSlice
+ }
+ if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 && strs.EnforceUTF8(fd) {
+ return nil, coderBytesSliceValidateUTF8
+ }
+ if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 {
+ return nil, coderBytesSlice
+ }
+ case pref.BytesKind:
+ if ft.Kind() == reflect.String {
+ return nil, coderStringSlice
+ }
+ if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 {
+ return nil, coderBytesSlice
+ }
+ case pref.MessageKind:
+ return getMessageInfo(ft), makeMessageSliceFieldCoder(fd, ft)
+ case pref.GroupKind:
+ return getMessageInfo(ft), makeGroupSliceFieldCoder(fd, ft)
+ }
+ case fd.Cardinality() == pref.Repeated && fd.IsPacked():
+ // Packed repeated fields.
+ //
+ // Only repeated fields of primitive numeric types
+ // (Varint, Fixed32, or Fixed64 wire type) can be packed.
+ if ft.Kind() != reflect.Slice {
+ break
+ }
+ ft := ft.Elem()
+ switch fd.Kind() {
+ case pref.BoolKind:
+ if ft.Kind() == reflect.Bool {
+ return nil, coderBoolPackedSlice
+ }
+ case pref.EnumKind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderEnumPackedSlice
+ }
+ case pref.Int32Kind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderInt32PackedSlice
+ }
+ case pref.Sint32Kind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderSint32PackedSlice
+ }
+ case pref.Uint32Kind:
+ if ft.Kind() == reflect.Uint32 {
+ return nil, coderUint32PackedSlice
+ }
+ case pref.Int64Kind:
+ if ft.Kind() == reflect.Int64 {
+ return nil, coderInt64PackedSlice
+ }
+ case pref.Sint64Kind:
+ if ft.Kind() == reflect.Int64 {
+ return nil, coderSint64PackedSlice
+ }
+ case pref.Uint64Kind:
+ if ft.Kind() == reflect.Uint64 {
+ return nil, coderUint64PackedSlice
+ }
+ case pref.Sfixed32Kind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderSfixed32PackedSlice
+ }
+ case pref.Fixed32Kind:
+ if ft.Kind() == reflect.Uint32 {
+ return nil, coderFixed32PackedSlice
+ }
+ case pref.FloatKind:
+ if ft.Kind() == reflect.Float32 {
+ return nil, coderFloatPackedSlice
+ }
+ case pref.Sfixed64Kind:
+ if ft.Kind() == reflect.Int64 {
+ return nil, coderSfixed64PackedSlice
+ }
+ case pref.Fixed64Kind:
+ if ft.Kind() == reflect.Uint64 {
+ return nil, coderFixed64PackedSlice
+ }
+ case pref.DoubleKind:
+ if ft.Kind() == reflect.Float64 {
+ return nil, coderDoublePackedSlice
+ }
+ }
+ case fd.Kind() == pref.MessageKind:
+ return getMessageInfo(ft), makeMessageFieldCoder(fd, ft)
+ case fd.Kind() == pref.GroupKind:
+ return getMessageInfo(ft), makeGroupFieldCoder(fd, ft)
+ case fd.Syntax() == pref.Proto3 && fd.ContainingOneof() == nil:
+ // Populated oneof fields always encode even if set to the zero value,
+ // which normally are not encoded in proto3.
+ switch fd.Kind() {
+ case pref.BoolKind:
+ if ft.Kind() == reflect.Bool {
+ return nil, coderBoolNoZero
+ }
+ case pref.EnumKind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderEnumNoZero
+ }
+ case pref.Int32Kind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderInt32NoZero
+ }
+ case pref.Sint32Kind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderSint32NoZero
+ }
+ case pref.Uint32Kind:
+ if ft.Kind() == reflect.Uint32 {
+ return nil, coderUint32NoZero
+ }
+ case pref.Int64Kind:
+ if ft.Kind() == reflect.Int64 {
+ return nil, coderInt64NoZero
+ }
+ case pref.Sint64Kind:
+ if ft.Kind() == reflect.Int64 {
+ return nil, coderSint64NoZero
+ }
+ case pref.Uint64Kind:
+ if ft.Kind() == reflect.Uint64 {
+ return nil, coderUint64NoZero
+ }
+ case pref.Sfixed32Kind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderSfixed32NoZero
+ }
+ case pref.Fixed32Kind:
+ if ft.Kind() == reflect.Uint32 {
+ return nil, coderFixed32NoZero
+ }
+ case pref.FloatKind:
+ if ft.Kind() == reflect.Float32 {
+ return nil, coderFloatNoZero
+ }
+ case pref.Sfixed64Kind:
+ if ft.Kind() == reflect.Int64 {
+ return nil, coderSfixed64NoZero
+ }
+ case pref.Fixed64Kind:
+ if ft.Kind() == reflect.Uint64 {
+ return nil, coderFixed64NoZero
+ }
+ case pref.DoubleKind:
+ if ft.Kind() == reflect.Float64 {
+ return nil, coderDoubleNoZero
+ }
+ case pref.StringKind:
+ if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) {
+ return nil, coderStringNoZeroValidateUTF8
+ }
+ if ft.Kind() == reflect.String {
+ return nil, coderStringNoZero
+ }
+ if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 && strs.EnforceUTF8(fd) {
+ return nil, coderBytesNoZeroValidateUTF8
+ }
+ if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 {
+ return nil, coderBytesNoZero
+ }
+ case pref.BytesKind:
+ if ft.Kind() == reflect.String {
+ return nil, coderStringNoZero
+ }
+ if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 {
+ return nil, coderBytesNoZero
+ }
+ }
+ case ft.Kind() == reflect.Ptr:
+ ft := ft.Elem()
+ switch fd.Kind() {
+ case pref.BoolKind:
+ if ft.Kind() == reflect.Bool {
+ return nil, coderBoolPtr
+ }
+ case pref.EnumKind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderEnumPtr
+ }
+ case pref.Int32Kind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderInt32Ptr
+ }
+ case pref.Sint32Kind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderSint32Ptr
+ }
+ case pref.Uint32Kind:
+ if ft.Kind() == reflect.Uint32 {
+ return nil, coderUint32Ptr
+ }
+ case pref.Int64Kind:
+ if ft.Kind() == reflect.Int64 {
+ return nil, coderInt64Ptr
+ }
+ case pref.Sint64Kind:
+ if ft.Kind() == reflect.Int64 {
+ return nil, coderSint64Ptr
+ }
+ case pref.Uint64Kind:
+ if ft.Kind() == reflect.Uint64 {
+ return nil, coderUint64Ptr
+ }
+ case pref.Sfixed32Kind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderSfixed32Ptr
+ }
+ case pref.Fixed32Kind:
+ if ft.Kind() == reflect.Uint32 {
+ return nil, coderFixed32Ptr
+ }
+ case pref.FloatKind:
+ if ft.Kind() == reflect.Float32 {
+ return nil, coderFloatPtr
+ }
+ case pref.Sfixed64Kind:
+ if ft.Kind() == reflect.Int64 {
+ return nil, coderSfixed64Ptr
+ }
+ case pref.Fixed64Kind:
+ if ft.Kind() == reflect.Uint64 {
+ return nil, coderFixed64Ptr
+ }
+ case pref.DoubleKind:
+ if ft.Kind() == reflect.Float64 {
+ return nil, coderDoublePtr
+ }
+ case pref.StringKind:
+ if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) {
+ return nil, coderStringPtrValidateUTF8
+ }
+ if ft.Kind() == reflect.String {
+ return nil, coderStringPtr
+ }
+ case pref.BytesKind:
+ if ft.Kind() == reflect.String {
+ return nil, coderStringPtr
+ }
+ }
+ default:
+ switch fd.Kind() {
+ case pref.BoolKind:
+ if ft.Kind() == reflect.Bool {
+ return nil, coderBool
+ }
+ case pref.EnumKind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderEnum
+ }
+ case pref.Int32Kind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderInt32
+ }
+ case pref.Sint32Kind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderSint32
+ }
+ case pref.Uint32Kind:
+ if ft.Kind() == reflect.Uint32 {
+ return nil, coderUint32
+ }
+ case pref.Int64Kind:
+ if ft.Kind() == reflect.Int64 {
+ return nil, coderInt64
+ }
+ case pref.Sint64Kind:
+ if ft.Kind() == reflect.Int64 {
+ return nil, coderSint64
+ }
+ case pref.Uint64Kind:
+ if ft.Kind() == reflect.Uint64 {
+ return nil, coderUint64
+ }
+ case pref.Sfixed32Kind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderSfixed32
+ }
+ case pref.Fixed32Kind:
+ if ft.Kind() == reflect.Uint32 {
+ return nil, coderFixed32
+ }
+ case pref.FloatKind:
+ if ft.Kind() == reflect.Float32 {
+ return nil, coderFloat
+ }
+ case pref.Sfixed64Kind:
+ if ft.Kind() == reflect.Int64 {
+ return nil, coderSfixed64
+ }
+ case pref.Fixed64Kind:
+ if ft.Kind() == reflect.Uint64 {
+ return nil, coderFixed64
+ }
+ case pref.DoubleKind:
+ if ft.Kind() == reflect.Float64 {
+ return nil, coderDouble
+ }
+ case pref.StringKind:
+ if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) {
+ return nil, coderStringValidateUTF8
+ }
+ if ft.Kind() == reflect.String {
+ return nil, coderString
+ }
+ if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 && strs.EnforceUTF8(fd) {
+ return nil, coderBytesValidateUTF8
+ }
+ if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 {
+ return nil, coderBytes
+ }
+ case pref.BytesKind:
+ if ft.Kind() == reflect.String {
+ return nil, coderString
+ }
+ if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 {
+ return nil, coderBytes
+ }
+ }
+ }
+ panic(fmt.Sprintf("invalid type: no encoder for %v %v %v/%v", fd.FullName(), fd.Cardinality(), fd.Kind(), ft))
+}
+
+// encoderFuncsForValue returns value functions for a field, used for
+// extension values and map encoding.
+func encoderFuncsForValue(fd pref.FieldDescriptor) valueCoderFuncs {
+ switch {
+ case fd.Cardinality() == pref.Repeated && !fd.IsPacked():
+ switch fd.Kind() {
+ case pref.BoolKind:
+ return coderBoolSliceValue
+ case pref.EnumKind:
+ return coderEnumSliceValue
+ case pref.Int32Kind:
+ return coderInt32SliceValue
+ case pref.Sint32Kind:
+ return coderSint32SliceValue
+ case pref.Uint32Kind:
+ return coderUint32SliceValue
+ case pref.Int64Kind:
+ return coderInt64SliceValue
+ case pref.Sint64Kind:
+ return coderSint64SliceValue
+ case pref.Uint64Kind:
+ return coderUint64SliceValue
+ case pref.Sfixed32Kind:
+ return coderSfixed32SliceValue
+ case pref.Fixed32Kind:
+ return coderFixed32SliceValue
+ case pref.FloatKind:
+ return coderFloatSliceValue
+ case pref.Sfixed64Kind:
+ return coderSfixed64SliceValue
+ case pref.Fixed64Kind:
+ return coderFixed64SliceValue
+ case pref.DoubleKind:
+ return coderDoubleSliceValue
+ case pref.StringKind:
+ // We don't have a UTF-8 validating coder for repeated string fields.
+ // Value coders are used for extensions and maps.
+ // Extensions are never proto3, and maps never contain lists.
+ return coderStringSliceValue
+ case pref.BytesKind:
+ return coderBytesSliceValue
+ case pref.MessageKind:
+ return coderMessageSliceValue
+ case pref.GroupKind:
+ return coderGroupSliceValue
+ }
+ case fd.Cardinality() == pref.Repeated && fd.IsPacked():
+ switch fd.Kind() {
+ case pref.BoolKind:
+ return coderBoolPackedSliceValue
+ case pref.EnumKind:
+ return coderEnumPackedSliceValue
+ case pref.Int32Kind:
+ return coderInt32PackedSliceValue
+ case pref.Sint32Kind:
+ return coderSint32PackedSliceValue
+ case pref.Uint32Kind:
+ return coderUint32PackedSliceValue
+ case pref.Int64Kind:
+ return coderInt64PackedSliceValue
+ case pref.Sint64Kind:
+ return coderSint64PackedSliceValue
+ case pref.Uint64Kind:
+ return coderUint64PackedSliceValue
+ case pref.Sfixed32Kind:
+ return coderSfixed32PackedSliceValue
+ case pref.Fixed32Kind:
+ return coderFixed32PackedSliceValue
+ case pref.FloatKind:
+ return coderFloatPackedSliceValue
+ case pref.Sfixed64Kind:
+ return coderSfixed64PackedSliceValue
+ case pref.Fixed64Kind:
+ return coderFixed64PackedSliceValue
+ case pref.DoubleKind:
+ return coderDoublePackedSliceValue
+ }
+ default:
+ switch fd.Kind() {
+ default:
+ case pref.BoolKind:
+ return coderBoolValue
+ case pref.EnumKind:
+ return coderEnumValue
+ case pref.Int32Kind:
+ return coderInt32Value
+ case pref.Sint32Kind:
+ return coderSint32Value
+ case pref.Uint32Kind:
+ return coderUint32Value
+ case pref.Int64Kind:
+ return coderInt64Value
+ case pref.Sint64Kind:
+ return coderSint64Value
+ case pref.Uint64Kind:
+ return coderUint64Value
+ case pref.Sfixed32Kind:
+ return coderSfixed32Value
+ case pref.Fixed32Kind:
+ return coderFixed32Value
+ case pref.FloatKind:
+ return coderFloatValue
+ case pref.Sfixed64Kind:
+ return coderSfixed64Value
+ case pref.Fixed64Kind:
+ return coderFixed64Value
+ case pref.DoubleKind:
+ return coderDoubleValue
+ case pref.StringKind:
+ if strs.EnforceUTF8(fd) {
+ return coderStringValueValidateUTF8
+ }
+ return coderStringValue
+ case pref.BytesKind:
+ return coderBytesValue
+ case pref.MessageKind:
+ return coderMessageValue
+ case pref.GroupKind:
+ return coderGroupValue
+ }
+ }
+ panic(fmt.Sprintf("invalid field: no encoder for %v %v %v", fd.FullName(), fd.Cardinality(), fd.Kind()))
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go
new file mode 100644
index 000000000..e118af1e2
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go
@@ -0,0 +1,17 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !purego,!appengine
+
+package impl
+
+// When using unsafe pointers, we can just treat enum values as int32s.
+
+var (
+ coderEnumNoZero = coderInt32NoZero
+ coderEnum = coderInt32
+ coderEnumPtr = coderInt32Ptr
+ coderEnumSlice = coderInt32Slice
+ coderEnumPackedSlice = coderInt32PackedSlice
+)
diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go
new file mode 100644
index 000000000..36a90dff3
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go
@@ -0,0 +1,467 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "fmt"
+ "reflect"
+
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// unwrapper unwraps the value to the underlying value.
+// This is implemented by List and Map.
+type unwrapper interface {
+ protoUnwrap() interface{}
+}
+
+// A Converter coverts to/from Go reflect.Value types and protobuf protoreflect.Value types.
+type Converter interface {
+ // PBValueOf converts a reflect.Value to a protoreflect.Value.
+ PBValueOf(reflect.Value) pref.Value
+
+ // GoValueOf converts a protoreflect.Value to a reflect.Value.
+ GoValueOf(pref.Value) reflect.Value
+
+ // IsValidPB returns whether a protoreflect.Value is compatible with this type.
+ IsValidPB(pref.Value) bool
+
+ // IsValidGo returns whether a reflect.Value is compatible with this type.
+ IsValidGo(reflect.Value) bool
+
+ // New returns a new field value.
+ // For scalars, it returns the default value of the field.
+ // For composite types, it returns a new mutable value.
+ New() pref.Value
+
+ // Zero returns a new field value.
+ // For scalars, it returns the default value of the field.
+ // For composite types, it returns an immutable, empty value.
+ Zero() pref.Value
+}
+
+// NewConverter matches a Go type with a protobuf field and returns a Converter
+// that converts between the two. Enums must be a named int32 kind that
+// implements protoreflect.Enum, and messages must be pointer to a named
+// struct type that implements protoreflect.ProtoMessage.
+//
+// This matcher deliberately supports a wider range of Go types than what
+// protoc-gen-go historically generated to be able to automatically wrap some
+// v1 messages generated by other forks of protoc-gen-go.
+func NewConverter(t reflect.Type, fd pref.FieldDescriptor) Converter {
+ switch {
+ case fd.IsList():
+ return newListConverter(t, fd)
+ case fd.IsMap():
+ return newMapConverter(t, fd)
+ default:
+ return newSingularConverter(t, fd)
+ }
+ panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName()))
+}
+
+var (
+ boolType = reflect.TypeOf(bool(false))
+ int32Type = reflect.TypeOf(int32(0))
+ int64Type = reflect.TypeOf(int64(0))
+ uint32Type = reflect.TypeOf(uint32(0))
+ uint64Type = reflect.TypeOf(uint64(0))
+ float32Type = reflect.TypeOf(float32(0))
+ float64Type = reflect.TypeOf(float64(0))
+ stringType = reflect.TypeOf(string(""))
+ bytesType = reflect.TypeOf([]byte(nil))
+ byteType = reflect.TypeOf(byte(0))
+)
+
+var (
+ boolZero = pref.ValueOfBool(false)
+ int32Zero = pref.ValueOfInt32(0)
+ int64Zero = pref.ValueOfInt64(0)
+ uint32Zero = pref.ValueOfUint32(0)
+ uint64Zero = pref.ValueOfUint64(0)
+ float32Zero = pref.ValueOfFloat32(0)
+ float64Zero = pref.ValueOfFloat64(0)
+ stringZero = pref.ValueOfString("")
+ bytesZero = pref.ValueOfBytes(nil)
+)
+
+func newSingularConverter(t reflect.Type, fd pref.FieldDescriptor) Converter {
+ defVal := func(fd pref.FieldDescriptor, zero pref.Value) pref.Value {
+ if fd.Cardinality() == pref.Repeated {
+ // Default isn't defined for repeated fields.
+ return zero
+ }
+ return fd.Default()
+ }
+ switch fd.Kind() {
+ case pref.BoolKind:
+ if t.Kind() == reflect.Bool {
+ return &boolConverter{t, defVal(fd, boolZero)}
+ }
+ case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind:
+ if t.Kind() == reflect.Int32 {
+ return &int32Converter{t, defVal(fd, int32Zero)}
+ }
+ case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind:
+ if t.Kind() == reflect.Int64 {
+ return &int64Converter{t, defVal(fd, int64Zero)}
+ }
+ case pref.Uint32Kind, pref.Fixed32Kind:
+ if t.Kind() == reflect.Uint32 {
+ return &uint32Converter{t, defVal(fd, uint32Zero)}
+ }
+ case pref.Uint64Kind, pref.Fixed64Kind:
+ if t.Kind() == reflect.Uint64 {
+ return &uint64Converter{t, defVal(fd, uint64Zero)}
+ }
+ case pref.FloatKind:
+ if t.Kind() == reflect.Float32 {
+ return &float32Converter{t, defVal(fd, float32Zero)}
+ }
+ case pref.DoubleKind:
+ if t.Kind() == reflect.Float64 {
+ return &float64Converter{t, defVal(fd, float64Zero)}
+ }
+ case pref.StringKind:
+ if t.Kind() == reflect.String || (t.Kind() == reflect.Slice && t.Elem() == byteType) {
+ return &stringConverter{t, defVal(fd, stringZero)}
+ }
+ case pref.BytesKind:
+ if t.Kind() == reflect.String || (t.Kind() == reflect.Slice && t.Elem() == byteType) {
+ return &bytesConverter{t, defVal(fd, bytesZero)}
+ }
+ case pref.EnumKind:
+ // Handle enums, which must be a named int32 type.
+ if t.Kind() == reflect.Int32 {
+ return newEnumConverter(t, fd)
+ }
+ case pref.MessageKind, pref.GroupKind:
+ return newMessageConverter(t)
+ }
+ panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName()))
+}
+
+type boolConverter struct {
+ goType reflect.Type
+ def pref.Value
+}
+
+func (c *boolConverter) PBValueOf(v reflect.Value) pref.Value {
+ if v.Type() != c.goType {
+ panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
+ }
+ return pref.ValueOfBool(v.Bool())
+}
+func (c *boolConverter) GoValueOf(v pref.Value) reflect.Value {
+ return reflect.ValueOf(v.Bool()).Convert(c.goType)
+}
+func (c *boolConverter) IsValidPB(v pref.Value) bool {
+ _, ok := v.Interface().(bool)
+ return ok
+}
+func (c *boolConverter) IsValidGo(v reflect.Value) bool {
+ return v.IsValid() && v.Type() == c.goType
+}
+func (c *boolConverter) New() pref.Value { return c.def }
+func (c *boolConverter) Zero() pref.Value { return c.def }
+
+type int32Converter struct {
+ goType reflect.Type
+ def pref.Value
+}
+
+func (c *int32Converter) PBValueOf(v reflect.Value) pref.Value {
+ if v.Type() != c.goType {
+ panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
+ }
+ return pref.ValueOfInt32(int32(v.Int()))
+}
+func (c *int32Converter) GoValueOf(v pref.Value) reflect.Value {
+ return reflect.ValueOf(int32(v.Int())).Convert(c.goType)
+}
+func (c *int32Converter) IsValidPB(v pref.Value) bool {
+ _, ok := v.Interface().(int32)
+ return ok
+}
+func (c *int32Converter) IsValidGo(v reflect.Value) bool {
+ return v.IsValid() && v.Type() == c.goType
+}
+func (c *int32Converter) New() pref.Value { return c.def }
+func (c *int32Converter) Zero() pref.Value { return c.def }
+
+type int64Converter struct {
+ goType reflect.Type
+ def pref.Value
+}
+
+func (c *int64Converter) PBValueOf(v reflect.Value) pref.Value {
+ if v.Type() != c.goType {
+ panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
+ }
+ return pref.ValueOfInt64(int64(v.Int()))
+}
+func (c *int64Converter) GoValueOf(v pref.Value) reflect.Value {
+ return reflect.ValueOf(int64(v.Int())).Convert(c.goType)
+}
+func (c *int64Converter) IsValidPB(v pref.Value) bool {
+ _, ok := v.Interface().(int64)
+ return ok
+}
+func (c *int64Converter) IsValidGo(v reflect.Value) bool {
+ return v.IsValid() && v.Type() == c.goType
+}
+func (c *int64Converter) New() pref.Value { return c.def }
+func (c *int64Converter) Zero() pref.Value { return c.def }
+
+type uint32Converter struct {
+ goType reflect.Type
+ def pref.Value
+}
+
+func (c *uint32Converter) PBValueOf(v reflect.Value) pref.Value {
+ if v.Type() != c.goType {
+ panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
+ }
+ return pref.ValueOfUint32(uint32(v.Uint()))
+}
+func (c *uint32Converter) GoValueOf(v pref.Value) reflect.Value {
+ return reflect.ValueOf(uint32(v.Uint())).Convert(c.goType)
+}
+func (c *uint32Converter) IsValidPB(v pref.Value) bool {
+ _, ok := v.Interface().(uint32)
+ return ok
+}
+func (c *uint32Converter) IsValidGo(v reflect.Value) bool {
+ return v.IsValid() && v.Type() == c.goType
+}
+func (c *uint32Converter) New() pref.Value { return c.def }
+func (c *uint32Converter) Zero() pref.Value { return c.def }
+
+type uint64Converter struct {
+ goType reflect.Type
+ def pref.Value
+}
+
+func (c *uint64Converter) PBValueOf(v reflect.Value) pref.Value {
+ if v.Type() != c.goType {
+ panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
+ }
+ return pref.ValueOfUint64(uint64(v.Uint()))
+}
+func (c *uint64Converter) GoValueOf(v pref.Value) reflect.Value {
+ return reflect.ValueOf(uint64(v.Uint())).Convert(c.goType)
+}
+func (c *uint64Converter) IsValidPB(v pref.Value) bool {
+ _, ok := v.Interface().(uint64)
+ return ok
+}
+func (c *uint64Converter) IsValidGo(v reflect.Value) bool {
+ return v.IsValid() && v.Type() == c.goType
+}
+func (c *uint64Converter) New() pref.Value { return c.def }
+func (c *uint64Converter) Zero() pref.Value { return c.def }
+
+type float32Converter struct {
+ goType reflect.Type
+ def pref.Value
+}
+
+func (c *float32Converter) PBValueOf(v reflect.Value) pref.Value {
+ if v.Type() != c.goType {
+ panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
+ }
+ return pref.ValueOfFloat32(float32(v.Float()))
+}
+func (c *float32Converter) GoValueOf(v pref.Value) reflect.Value {
+ return reflect.ValueOf(float32(v.Float())).Convert(c.goType)
+}
+func (c *float32Converter) IsValidPB(v pref.Value) bool {
+ _, ok := v.Interface().(float32)
+ return ok
+}
+func (c *float32Converter) IsValidGo(v reflect.Value) bool {
+ return v.IsValid() && v.Type() == c.goType
+}
+func (c *float32Converter) New() pref.Value { return c.def }
+func (c *float32Converter) Zero() pref.Value { return c.def }
+
+type float64Converter struct {
+ goType reflect.Type
+ def pref.Value
+}
+
+func (c *float64Converter) PBValueOf(v reflect.Value) pref.Value {
+ if v.Type() != c.goType {
+ panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
+ }
+ return pref.ValueOfFloat64(float64(v.Float()))
+}
+func (c *float64Converter) GoValueOf(v pref.Value) reflect.Value {
+ return reflect.ValueOf(float64(v.Float())).Convert(c.goType)
+}
+func (c *float64Converter) IsValidPB(v pref.Value) bool {
+ _, ok := v.Interface().(float64)
+ return ok
+}
+func (c *float64Converter) IsValidGo(v reflect.Value) bool {
+ return v.IsValid() && v.Type() == c.goType
+}
+func (c *float64Converter) New() pref.Value { return c.def }
+func (c *float64Converter) Zero() pref.Value { return c.def }
+
+type stringConverter struct {
+ goType reflect.Type
+ def pref.Value
+}
+
+func (c *stringConverter) PBValueOf(v reflect.Value) pref.Value {
+ if v.Type() != c.goType {
+ panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
+ }
+ return pref.ValueOfString(v.Convert(stringType).String())
+}
+func (c *stringConverter) GoValueOf(v pref.Value) reflect.Value {
+ // pref.Value.String never panics, so we go through an interface
+ // conversion here to check the type.
+ s := v.Interface().(string)
+ if c.goType.Kind() == reflect.Slice && s == "" {
+ return reflect.Zero(c.goType) // ensure empty string is []byte(nil)
+ }
+ return reflect.ValueOf(s).Convert(c.goType)
+}
+func (c *stringConverter) IsValidPB(v pref.Value) bool {
+ _, ok := v.Interface().(string)
+ return ok
+}
+func (c *stringConverter) IsValidGo(v reflect.Value) bool {
+ return v.IsValid() && v.Type() == c.goType
+}
+func (c *stringConverter) New() pref.Value { return c.def }
+func (c *stringConverter) Zero() pref.Value { return c.def }
+
+type bytesConverter struct {
+ goType reflect.Type
+ def pref.Value
+}
+
+func (c *bytesConverter) PBValueOf(v reflect.Value) pref.Value {
+ if v.Type() != c.goType {
+ panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
+ }
+ if c.goType.Kind() == reflect.String && v.Len() == 0 {
+ return pref.ValueOfBytes(nil) // ensure empty string is []byte(nil)
+ }
+ return pref.ValueOfBytes(v.Convert(bytesType).Bytes())
+}
+func (c *bytesConverter) GoValueOf(v pref.Value) reflect.Value {
+ return reflect.ValueOf(v.Bytes()).Convert(c.goType)
+}
+func (c *bytesConverter) IsValidPB(v pref.Value) bool {
+ _, ok := v.Interface().([]byte)
+ return ok
+}
+func (c *bytesConverter) IsValidGo(v reflect.Value) bool {
+ return v.IsValid() && v.Type() == c.goType
+}
+func (c *bytesConverter) New() pref.Value { return c.def }
+func (c *bytesConverter) Zero() pref.Value { return c.def }
+
+type enumConverter struct {
+ goType reflect.Type
+ def pref.Value
+}
+
+func newEnumConverter(goType reflect.Type, fd pref.FieldDescriptor) Converter {
+ var def pref.Value
+ if fd.Cardinality() == pref.Repeated {
+ def = pref.ValueOfEnum(fd.Enum().Values().Get(0).Number())
+ } else {
+ def = fd.Default()
+ }
+ return &enumConverter{goType, def}
+}
+
+func (c *enumConverter) PBValueOf(v reflect.Value) pref.Value {
+ if v.Type() != c.goType {
+ panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
+ }
+ return pref.ValueOfEnum(pref.EnumNumber(v.Int()))
+}
+
+func (c *enumConverter) GoValueOf(v pref.Value) reflect.Value {
+ return reflect.ValueOf(v.Enum()).Convert(c.goType)
+}
+
+func (c *enumConverter) IsValidPB(v pref.Value) bool {
+ _, ok := v.Interface().(pref.EnumNumber)
+ return ok
+}
+
+func (c *enumConverter) IsValidGo(v reflect.Value) bool {
+ return v.IsValid() && v.Type() == c.goType
+}
+
+func (c *enumConverter) New() pref.Value {
+ return c.def
+}
+
+func (c *enumConverter) Zero() pref.Value {
+ return c.def
+}
+
+type messageConverter struct {
+ goType reflect.Type
+}
+
+func newMessageConverter(goType reflect.Type) Converter {
+ return &messageConverter{goType}
+}
+
+func (c *messageConverter) PBValueOf(v reflect.Value) pref.Value {
+ if v.Type() != c.goType {
+ panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
+ }
+ if m, ok := v.Interface().(pref.ProtoMessage); ok {
+ return pref.ValueOfMessage(m.ProtoReflect())
+ }
+ return pref.ValueOfMessage(legacyWrapMessage(v))
+}
+
+func (c *messageConverter) GoValueOf(v pref.Value) reflect.Value {
+ m := v.Message()
+ var rv reflect.Value
+ if u, ok := m.(unwrapper); ok {
+ rv = reflect.ValueOf(u.protoUnwrap())
+ } else {
+ rv = reflect.ValueOf(m.Interface())
+ }
+ if rv.Type() != c.goType {
+ panic(fmt.Sprintf("invalid type: got %v, want %v", rv.Type(), c.goType))
+ }
+ return rv
+}
+
+func (c *messageConverter) IsValidPB(v pref.Value) bool {
+ m := v.Message()
+ var rv reflect.Value
+ if u, ok := m.(unwrapper); ok {
+ rv = reflect.ValueOf(u.protoUnwrap())
+ } else {
+ rv = reflect.ValueOf(m.Interface())
+ }
+ return rv.Type() == c.goType
+}
+
+func (c *messageConverter) IsValidGo(v reflect.Value) bool {
+ return v.IsValid() && v.Type() == c.goType
+}
+
+func (c *messageConverter) New() pref.Value {
+ return c.PBValueOf(reflect.New(c.goType.Elem()))
+}
+
+func (c *messageConverter) Zero() pref.Value {
+ return c.PBValueOf(reflect.Zero(c.goType))
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go
new file mode 100644
index 000000000..6fccab520
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go
@@ -0,0 +1,141 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "fmt"
+ "reflect"
+
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+func newListConverter(t reflect.Type, fd pref.FieldDescriptor) Converter {
+ switch {
+ case t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Slice:
+ return &listPtrConverter{t, newSingularConverter(t.Elem().Elem(), fd)}
+ case t.Kind() == reflect.Slice:
+ return &listConverter{t, newSingularConverter(t.Elem(), fd)}
+ }
+ panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName()))
+}
+
+type listConverter struct {
+ goType reflect.Type // []T
+ c Converter
+}
+
+func (c *listConverter) PBValueOf(v reflect.Value) pref.Value {
+ if v.Type() != c.goType {
+ panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
+ }
+ pv := reflect.New(c.goType)
+ pv.Elem().Set(v)
+ return pref.ValueOfList(&listReflect{pv, c.c})
+}
+
+func (c *listConverter) GoValueOf(v pref.Value) reflect.Value {
+ rv := v.List().(*listReflect).v
+ if rv.IsNil() {
+ return reflect.Zero(c.goType)
+ }
+ return rv.Elem()
+}
+
+func (c *listConverter) IsValidPB(v pref.Value) bool {
+ list, ok := v.Interface().(*listReflect)
+ if !ok {
+ return false
+ }
+ return list.v.Type().Elem() == c.goType
+}
+
+func (c *listConverter) IsValidGo(v reflect.Value) bool {
+ return v.IsValid() && v.Type() == c.goType
+}
+
+func (c *listConverter) New() pref.Value {
+ return pref.ValueOfList(&listReflect{reflect.New(c.goType), c.c})
+}
+
+func (c *listConverter) Zero() pref.Value {
+ return pref.ValueOfList(&listReflect{reflect.Zero(reflect.PtrTo(c.goType)), c.c})
+}
+
+type listPtrConverter struct {
+ goType reflect.Type // *[]T
+ c Converter
+}
+
+func (c *listPtrConverter) PBValueOf(v reflect.Value) pref.Value {
+ if v.Type() != c.goType {
+ panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
+ }
+ return pref.ValueOfList(&listReflect{v, c.c})
+}
+
+func (c *listPtrConverter) GoValueOf(v pref.Value) reflect.Value {
+ return v.List().(*listReflect).v
+}
+
+func (c *listPtrConverter) IsValidPB(v pref.Value) bool {
+ list, ok := v.Interface().(*listReflect)
+ if !ok {
+ return false
+ }
+ return list.v.Type() == c.goType
+}
+
+func (c *listPtrConverter) IsValidGo(v reflect.Value) bool {
+ return v.IsValid() && v.Type() == c.goType
+}
+
+func (c *listPtrConverter) New() pref.Value {
+ return c.PBValueOf(reflect.New(c.goType.Elem()))
+}
+
+func (c *listPtrConverter) Zero() pref.Value {
+ return c.PBValueOf(reflect.Zero(c.goType))
+}
+
+type listReflect struct {
+ v reflect.Value // *[]T
+ conv Converter
+}
+
+func (ls *listReflect) Len() int {
+ if ls.v.IsNil() {
+ return 0
+ }
+ return ls.v.Elem().Len()
+}
+func (ls *listReflect) Get(i int) pref.Value {
+ return ls.conv.PBValueOf(ls.v.Elem().Index(i))
+}
+func (ls *listReflect) Set(i int, v pref.Value) {
+ ls.v.Elem().Index(i).Set(ls.conv.GoValueOf(v))
+}
+func (ls *listReflect) Append(v pref.Value) {
+ ls.v.Elem().Set(reflect.Append(ls.v.Elem(), ls.conv.GoValueOf(v)))
+}
+func (ls *listReflect) AppendMutable() pref.Value {
+ if _, ok := ls.conv.(*messageConverter); !ok {
+ panic("invalid AppendMutable on list with non-message type")
+ }
+ v := ls.NewElement()
+ ls.Append(v)
+ return v
+}
+func (ls *listReflect) Truncate(i int) {
+ ls.v.Elem().Set(ls.v.Elem().Slice(0, i))
+}
+func (ls *listReflect) NewElement() pref.Value {
+ return ls.conv.New()
+}
+func (ls *listReflect) IsValid() bool {
+ return !ls.v.IsNil()
+}
+func (ls *listReflect) protoUnwrap() interface{} {
+ return ls.v.Interface()
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go
new file mode 100644
index 000000000..de06b2593
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go
@@ -0,0 +1,121 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "fmt"
+ "reflect"
+
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+type mapConverter struct {
+ goType reflect.Type // map[K]V
+ keyConv, valConv Converter
+}
+
+func newMapConverter(t reflect.Type, fd pref.FieldDescriptor) *mapConverter {
+ if t.Kind() != reflect.Map {
+ panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName()))
+ }
+ return &mapConverter{
+ goType: t,
+ keyConv: newSingularConverter(t.Key(), fd.MapKey()),
+ valConv: newSingularConverter(t.Elem(), fd.MapValue()),
+ }
+}
+
+func (c *mapConverter) PBValueOf(v reflect.Value) pref.Value {
+ if v.Type() != c.goType {
+ panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
+ }
+ return pref.ValueOfMap(&mapReflect{v, c.keyConv, c.valConv})
+}
+
+func (c *mapConverter) GoValueOf(v pref.Value) reflect.Value {
+ return v.Map().(*mapReflect).v
+}
+
+func (c *mapConverter) IsValidPB(v pref.Value) bool {
+ mapv, ok := v.Interface().(*mapReflect)
+ if !ok {
+ return false
+ }
+ return mapv.v.Type() == c.goType
+}
+
+func (c *mapConverter) IsValidGo(v reflect.Value) bool {
+ return v.IsValid() && v.Type() == c.goType
+}
+
+func (c *mapConverter) New() pref.Value {
+ return c.PBValueOf(reflect.MakeMap(c.goType))
+}
+
+func (c *mapConverter) Zero() pref.Value {
+ return c.PBValueOf(reflect.Zero(c.goType))
+}
+
+type mapReflect struct {
+ v reflect.Value // map[K]V
+ keyConv Converter
+ valConv Converter
+}
+
+func (ms *mapReflect) Len() int {
+ return ms.v.Len()
+}
+func (ms *mapReflect) Has(k pref.MapKey) bool {
+ rk := ms.keyConv.GoValueOf(k.Value())
+ rv := ms.v.MapIndex(rk)
+ return rv.IsValid()
+}
+func (ms *mapReflect) Get(k pref.MapKey) pref.Value {
+ rk := ms.keyConv.GoValueOf(k.Value())
+ rv := ms.v.MapIndex(rk)
+ if !rv.IsValid() {
+ return pref.Value{}
+ }
+ return ms.valConv.PBValueOf(rv)
+}
+func (ms *mapReflect) Set(k pref.MapKey, v pref.Value) {
+ rk := ms.keyConv.GoValueOf(k.Value())
+ rv := ms.valConv.GoValueOf(v)
+ ms.v.SetMapIndex(rk, rv)
+}
+func (ms *mapReflect) Clear(k pref.MapKey) {
+ rk := ms.keyConv.GoValueOf(k.Value())
+ ms.v.SetMapIndex(rk, reflect.Value{})
+}
+func (ms *mapReflect) Mutable(k pref.MapKey) pref.Value {
+ if _, ok := ms.valConv.(*messageConverter); !ok {
+ panic("invalid Mutable on map with non-message value type")
+ }
+ v := ms.Get(k)
+ if !v.IsValid() {
+ v = ms.NewValue()
+ ms.Set(k, v)
+ }
+ return v
+}
+func (ms *mapReflect) Range(f func(pref.MapKey, pref.Value) bool) {
+ iter := mapRange(ms.v)
+ for iter.Next() {
+ k := ms.keyConv.PBValueOf(iter.Key()).MapKey()
+ v := ms.valConv.PBValueOf(iter.Value())
+ if !f(k, v) {
+ return
+ }
+ }
+}
+func (ms *mapReflect) NewValue() pref.Value {
+ return ms.valConv.New()
+}
+func (ms *mapReflect) IsValid() bool {
+ return !ms.v.IsNil()
+}
+func (ms *mapReflect) protoUnwrap() interface{} {
+ return ms.v.Interface()
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/decode.go b/vendor/google.golang.org/protobuf/internal/impl/decode.go
new file mode 100644
index 000000000..85ba1d3b3
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/decode.go
@@ -0,0 +1,274 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "math/bits"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/internal/flags"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ preg "google.golang.org/protobuf/reflect/protoregistry"
+ "google.golang.org/protobuf/runtime/protoiface"
+ piface "google.golang.org/protobuf/runtime/protoiface"
+)
+
+type unmarshalOptions struct {
+ flags protoiface.UnmarshalInputFlags
+ resolver interface {
+ FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error)
+ FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error)
+ }
+}
+
+func (o unmarshalOptions) Options() proto.UnmarshalOptions {
+ return proto.UnmarshalOptions{
+ Merge: true,
+ AllowPartial: true,
+ DiscardUnknown: o.DiscardUnknown(),
+ Resolver: o.resolver,
+ }
+}
+
+func (o unmarshalOptions) DiscardUnknown() bool { return o.flags&piface.UnmarshalDiscardUnknown != 0 }
+
+func (o unmarshalOptions) IsDefault() bool {
+ return o.flags == 0 && o.resolver == preg.GlobalTypes
+}
+
+var lazyUnmarshalOptions = unmarshalOptions{
+ resolver: preg.GlobalTypes,
+}
+
+type unmarshalOutput struct {
+ n int // number of bytes consumed
+ initialized bool
+}
+
+// unmarshal is protoreflect.Methods.Unmarshal.
+func (mi *MessageInfo) unmarshal(in piface.UnmarshalInput) (piface.UnmarshalOutput, error) {
+ var p pointer
+ if ms, ok := in.Message.(*messageState); ok {
+ p = ms.pointer()
+ } else {
+ p = in.Message.(*messageReflectWrapper).pointer()
+ }
+ out, err := mi.unmarshalPointer(in.Buf, p, 0, unmarshalOptions{
+ flags: in.Flags,
+ resolver: in.Resolver,
+ })
+ var flags piface.UnmarshalOutputFlags
+ if out.initialized {
+ flags |= piface.UnmarshalInitialized
+ }
+ return piface.UnmarshalOutput{
+ Flags: flags,
+ }, err
+}
+
+// errUnknown is returned during unmarshaling to indicate a parse error that
+// should result in a field being placed in the unknown fields section (for example,
+// when the wire type doesn't match) as opposed to the entire unmarshal operation
+// failing (for example, when a field extends past the available input).
+//
+// This is a sentinel error which should never be visible to the user.
+var errUnknown = errors.New("unknown")
+
+func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ mi.init()
+ if flags.ProtoLegacy && mi.isMessageSet {
+ return unmarshalMessageSet(mi, b, p, opts)
+ }
+ initialized := true
+ var requiredMask uint64
+ var exts *map[int32]ExtensionField
+ start := len(b)
+ for len(b) > 0 {
+ // Parse the tag (field number and wire type).
+ var tag uint64
+ if b[0] < 0x80 {
+ tag = uint64(b[0])
+ b = b[1:]
+ } else if len(b) >= 2 && b[1] < 128 {
+ tag = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ b = b[2:]
+ } else {
+ var n int
+ tag, n = protowire.ConsumeVarint(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ b = b[n:]
+ }
+ var num protowire.Number
+ if n := tag >> 3; n < uint64(protowire.MinValidNumber) || n > uint64(protowire.MaxValidNumber) {
+ return out, errors.New("invalid field number")
+ } else {
+ num = protowire.Number(n)
+ }
+ wtyp := protowire.Type(tag & 7)
+
+ if wtyp == protowire.EndGroupType {
+ if num != groupTag {
+ return out, errors.New("mismatching end group marker")
+ }
+ groupTag = 0
+ break
+ }
+
+ var f *coderFieldInfo
+ if int(num) < len(mi.denseCoderFields) {
+ f = mi.denseCoderFields[num]
+ } else {
+ f = mi.coderFields[num]
+ }
+ var n int
+ err := errUnknown
+ switch {
+ case f != nil:
+ if f.funcs.unmarshal == nil {
+ break
+ }
+ var o unmarshalOutput
+ o, err = f.funcs.unmarshal(b, p.Apply(f.offset), wtyp, f, opts)
+ n = o.n
+ if err != nil {
+ break
+ }
+ requiredMask |= f.validation.requiredBit
+ if f.funcs.isInit != nil && !o.initialized {
+ initialized = false
+ }
+ default:
+ // Possible extension.
+ if exts == nil && mi.extensionOffset.IsValid() {
+ exts = p.Apply(mi.extensionOffset).Extensions()
+ if *exts == nil {
+ *exts = make(map[int32]ExtensionField)
+ }
+ }
+ if exts == nil {
+ break
+ }
+ var o unmarshalOutput
+ o, err = mi.unmarshalExtension(b, num, wtyp, *exts, opts)
+ if err != nil {
+ break
+ }
+ n = o.n
+ if !o.initialized {
+ initialized = false
+ }
+ }
+ if err != nil {
+ if err != errUnknown {
+ return out, err
+ }
+ n = protowire.ConsumeFieldValue(num, wtyp, b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ if !opts.DiscardUnknown() && mi.unknownOffset.IsValid() {
+ u := p.Apply(mi.unknownOffset).Bytes()
+ *u = protowire.AppendTag(*u, num, wtyp)
+ *u = append(*u, b[:n]...)
+ }
+ }
+ b = b[n:]
+ }
+ if groupTag != 0 {
+ return out, errors.New("missing end group marker")
+ }
+ if mi.numRequiredFields > 0 && bits.OnesCount64(requiredMask) != int(mi.numRequiredFields) {
+ initialized = false
+ }
+ if initialized {
+ out.initialized = true
+ }
+ out.n = start - len(b)
+ return out, nil
+}
+
+func (mi *MessageInfo) unmarshalExtension(b []byte, num protowire.Number, wtyp protowire.Type, exts map[int32]ExtensionField, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ x := exts[int32(num)]
+ xt := x.Type()
+ if xt == nil {
+ var err error
+ xt, err = opts.resolver.FindExtensionByNumber(mi.Desc.FullName(), num)
+ if err != nil {
+ if err == preg.NotFound {
+ return out, errUnknown
+ }
+ return out, errors.New("%v: unable to resolve extension %v: %v", mi.Desc.FullName(), num, err)
+ }
+ }
+ xi := getExtensionFieldInfo(xt)
+ if xi.funcs.unmarshal == nil {
+ return out, errUnknown
+ }
+ if flags.LazyUnmarshalExtensions {
+ if opts.IsDefault() && x.canLazy(xt) {
+ out, valid := skipExtension(b, xi, num, wtyp, opts)
+ switch valid {
+ case ValidationValid:
+ if out.initialized {
+ x.appendLazyBytes(xt, xi, num, wtyp, b[:out.n])
+ exts[int32(num)] = x
+ return out, nil
+ }
+ case ValidationInvalid:
+ return out, errors.New("invalid wire format")
+ case ValidationUnknown:
+ }
+ }
+ }
+ ival := x.Value()
+ if !ival.IsValid() && xi.unmarshalNeedsValue {
+ // Create a new message, list, or map value to fill in.
+ // For enums, create a prototype value to let the unmarshal func know the
+ // concrete type.
+ ival = xt.New()
+ }
+ v, out, err := xi.funcs.unmarshal(b, ival, num, wtyp, opts)
+ if err != nil {
+ return out, err
+ }
+ if xi.funcs.isInit == nil {
+ out.initialized = true
+ }
+ x.Set(xt, v)
+ exts[int32(num)] = x
+ return out, nil
+}
+
+func skipExtension(b []byte, xi *extensionFieldInfo, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, _ ValidationStatus) {
+ if xi.validation.mi == nil {
+ return out, ValidationUnknown
+ }
+ xi.validation.mi.init()
+ switch xi.validation.typ {
+ case validationTypeMessage:
+ if wtyp != protowire.BytesType {
+ return out, ValidationUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, ValidationUnknown
+ }
+ out, st := xi.validation.mi.validate(v, 0, opts)
+ out.n = n
+ return out, st
+ case validationTypeGroup:
+ if wtyp != protowire.StartGroupType {
+ return out, ValidationUnknown
+ }
+ out, st := xi.validation.mi.validate(b, num, opts)
+ return out, st
+ default:
+ return out, ValidationUnknown
+ }
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/encode.go b/vendor/google.golang.org/protobuf/internal/impl/encode.go
new file mode 100644
index 000000000..8c8a794c6
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/encode.go
@@ -0,0 +1,199 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "math"
+ "sort"
+ "sync/atomic"
+
+ "google.golang.org/protobuf/internal/flags"
+ proto "google.golang.org/protobuf/proto"
+ piface "google.golang.org/protobuf/runtime/protoiface"
+)
+
+type marshalOptions struct {
+ flags piface.MarshalInputFlags
+}
+
+func (o marshalOptions) Options() proto.MarshalOptions {
+ return proto.MarshalOptions{
+ AllowPartial: true,
+ Deterministic: o.Deterministic(),
+ UseCachedSize: o.UseCachedSize(),
+ }
+}
+
+func (o marshalOptions) Deterministic() bool { return o.flags&piface.MarshalDeterministic != 0 }
+func (o marshalOptions) UseCachedSize() bool { return o.flags&piface.MarshalUseCachedSize != 0 }
+
+// size is protoreflect.Methods.Size.
+func (mi *MessageInfo) size(in piface.SizeInput) piface.SizeOutput {
+ var p pointer
+ if ms, ok := in.Message.(*messageState); ok {
+ p = ms.pointer()
+ } else {
+ p = in.Message.(*messageReflectWrapper).pointer()
+ }
+ size := mi.sizePointer(p, marshalOptions{
+ flags: in.Flags,
+ })
+ return piface.SizeOutput{Size: size}
+}
+
+func (mi *MessageInfo) sizePointer(p pointer, opts marshalOptions) (size int) {
+ mi.init()
+ if p.IsNil() {
+ return 0
+ }
+ if opts.UseCachedSize() && mi.sizecacheOffset.IsValid() {
+ if size := atomic.LoadInt32(p.Apply(mi.sizecacheOffset).Int32()); size >= 0 {
+ return int(size)
+ }
+ }
+ return mi.sizePointerSlow(p, opts)
+}
+
+func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int) {
+ if flags.ProtoLegacy && mi.isMessageSet {
+ size = sizeMessageSet(mi, p, opts)
+ if mi.sizecacheOffset.IsValid() {
+ atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size))
+ }
+ return size
+ }
+ if mi.extensionOffset.IsValid() {
+ e := p.Apply(mi.extensionOffset).Extensions()
+ size += mi.sizeExtensions(e, opts)
+ }
+ for _, f := range mi.orderedCoderFields {
+ if f.funcs.size == nil {
+ continue
+ }
+ fptr := p.Apply(f.offset)
+ if f.isPointer && fptr.Elem().IsNil() {
+ continue
+ }
+ size += f.funcs.size(fptr, f, opts)
+ }
+ if mi.unknownOffset.IsValid() {
+ u := *p.Apply(mi.unknownOffset).Bytes()
+ size += len(u)
+ }
+ if mi.sizecacheOffset.IsValid() {
+ if size > math.MaxInt32 {
+ // The size is too large for the int32 sizecache field.
+ // We will need to recompute the size when encoding;
+ // unfortunately expensive, but better than invalid output.
+ atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), -1)
+ } else {
+ atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size))
+ }
+ }
+ return size
+}
+
+// marshal is protoreflect.Methods.Marshal.
+func (mi *MessageInfo) marshal(in piface.MarshalInput) (out piface.MarshalOutput, err error) {
+ var p pointer
+ if ms, ok := in.Message.(*messageState); ok {
+ p = ms.pointer()
+ } else {
+ p = in.Message.(*messageReflectWrapper).pointer()
+ }
+ b, err := mi.marshalAppendPointer(in.Buf, p, marshalOptions{
+ flags: in.Flags,
+ })
+ return piface.MarshalOutput{Buf: b}, err
+}
+
+func (mi *MessageInfo) marshalAppendPointer(b []byte, p pointer, opts marshalOptions) ([]byte, error) {
+ mi.init()
+ if p.IsNil() {
+ return b, nil
+ }
+ if flags.ProtoLegacy && mi.isMessageSet {
+ return marshalMessageSet(mi, b, p, opts)
+ }
+ var err error
+ // The old marshaler encodes extensions at beginning.
+ if mi.extensionOffset.IsValid() {
+ e := p.Apply(mi.extensionOffset).Extensions()
+ // TODO: Special handling for MessageSet?
+ b, err = mi.appendExtensions(b, e, opts)
+ if err != nil {
+ return b, err
+ }
+ }
+ for _, f := range mi.orderedCoderFields {
+ if f.funcs.marshal == nil {
+ continue
+ }
+ fptr := p.Apply(f.offset)
+ if f.isPointer && fptr.Elem().IsNil() {
+ continue
+ }
+ b, err = f.funcs.marshal(b, fptr, f, opts)
+ if err != nil {
+ return b, err
+ }
+ }
+ if mi.unknownOffset.IsValid() && !mi.isMessageSet {
+ u := *p.Apply(mi.unknownOffset).Bytes()
+ b = append(b, u...)
+ }
+ return b, nil
+}
+
+func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marshalOptions) (n int) {
+ if ext == nil {
+ return 0
+ }
+ for _, x := range *ext {
+ xi := getExtensionFieldInfo(x.Type())
+ if xi.funcs.size == nil {
+ continue
+ }
+ n += xi.funcs.size(x.Value(), xi.tagsize, opts)
+ }
+ return n
+}
+
+func (mi *MessageInfo) appendExtensions(b []byte, ext *map[int32]ExtensionField, opts marshalOptions) ([]byte, error) {
+ if ext == nil {
+ return b, nil
+ }
+
+ switch len(*ext) {
+ case 0:
+ return b, nil
+ case 1:
+ // Fast-path for one extension: Don't bother sorting the keys.
+ var err error
+ for _, x := range *ext {
+ xi := getExtensionFieldInfo(x.Type())
+ b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts)
+ }
+ return b, err
+ default:
+ // Sort the keys to provide a deterministic encoding.
+ // Not sure this is required, but the old code does it.
+ keys := make([]int, 0, len(*ext))
+ for k := range *ext {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+ var err error
+ for _, k := range keys {
+ x := (*ext)[int32(k)]
+ xi := getExtensionFieldInfo(x.Type())
+ b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts)
+ if err != nil {
+ return b, err
+ }
+ }
+ return b, nil
+ }
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/enum.go b/vendor/google.golang.org/protobuf/internal/impl/enum.go
new file mode 100644
index 000000000..8c1eab4bf
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/enum.go
@@ -0,0 +1,21 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "reflect"
+
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+type EnumInfo struct {
+ GoReflectType reflect.Type // int32 kind
+ Desc pref.EnumDescriptor
+}
+
+func (t *EnumInfo) New(n pref.EnumNumber) pref.Enum {
+ return reflect.ValueOf(n).Convert(t.GoReflectType).Interface().(pref.Enum)
+}
+func (t *EnumInfo) Descriptor() pref.EnumDescriptor { return t.Desc }
diff --git a/vendor/google.golang.org/protobuf/internal/impl/extension.go b/vendor/google.golang.org/protobuf/internal/impl/extension.go
new file mode 100644
index 000000000..e904fd993
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/extension.go
@@ -0,0 +1,156 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "reflect"
+ "sync"
+ "sync/atomic"
+
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+ piface "google.golang.org/protobuf/runtime/protoiface"
+)
+
+// ExtensionInfo implements ExtensionType.
+//
+// This type contains a number of exported fields for legacy compatibility.
+// The only non-deprecated use of this type is through the methods of the
+// ExtensionType interface.
+type ExtensionInfo struct {
+ // An ExtensionInfo may exist in several stages of initialization.
+ //
+ // extensionInfoUninitialized: Some or all of the legacy exported
+ // fields may be set, but none of the unexported fields have been
+ // initialized. This is the starting state for an ExtensionInfo
+ // in legacy generated code.
+ //
+ // extensionInfoDescInit: The desc field is set, but other unexported fields
+ // may not be initialized. Legacy exported fields may or may not be set.
+ // This is the starting state for an ExtensionInfo in newly generated code.
+ //
+ // extensionInfoFullInit: The ExtensionInfo is fully initialized.
+ // This state is only entered after lazy initialization is complete.
+ init uint32
+ mu sync.Mutex
+
+ goType reflect.Type
+ desc extensionTypeDescriptor
+ conv Converter
+ info *extensionFieldInfo // for fast-path method implementations
+
+ // ExtendedType is a typed nil-pointer to the parent message type that
+ // is being extended. It is possible for this to be unpopulated in v2
+ // since the message may no longer implement the MessageV1 interface.
+ //
+ // Deprecated: Use the ExtendedType method instead.
+ ExtendedType piface.MessageV1
+
+ // ExtensionType is the zero value of the extension type.
+ //
+ // For historical reasons, reflect.TypeOf(ExtensionType) and the
+ // type returned by InterfaceOf may not be identical.
+ //
+ // Deprecated: Use InterfaceOf(xt.Zero()) instead.
+ ExtensionType interface{}
+
+ // Field is the field number of the extension.
+ //
+ // Deprecated: Use the Descriptor().Number method instead.
+ Field int32
+
+ // Name is the fully qualified name of extension.
+ //
+ // Deprecated: Use the Descriptor().FullName method instead.
+ Name string
+
+ // Tag is the protobuf struct tag used in the v1 API.
+ //
+ // Deprecated: Do not use.
+ Tag string
+
+ // Filename is the proto filename in which the extension is defined.
+ //
+ // Deprecated: Use Descriptor().ParentFile().Path() instead.
+ Filename string
+}
+
+// Stages of initialization: See the ExtensionInfo.init field.
+const (
+ extensionInfoUninitialized = 0
+ extensionInfoDescInit = 1
+ extensionInfoFullInit = 2
+)
+
+func InitExtensionInfo(xi *ExtensionInfo, xd pref.ExtensionDescriptor, goType reflect.Type) {
+ xi.goType = goType
+ xi.desc = extensionTypeDescriptor{xd, xi}
+ xi.init = extensionInfoDescInit
+}
+
+func (xi *ExtensionInfo) New() pref.Value {
+ return xi.lazyInit().New()
+}
+func (xi *ExtensionInfo) Zero() pref.Value {
+ return xi.lazyInit().Zero()
+}
+func (xi *ExtensionInfo) ValueOf(v interface{}) pref.Value {
+ return xi.lazyInit().PBValueOf(reflect.ValueOf(v))
+}
+func (xi *ExtensionInfo) InterfaceOf(v pref.Value) interface{} {
+ return xi.lazyInit().GoValueOf(v).Interface()
+}
+func (xi *ExtensionInfo) IsValidValue(v pref.Value) bool {
+ return xi.lazyInit().IsValidPB(v)
+}
+func (xi *ExtensionInfo) IsValidInterface(v interface{}) bool {
+ return xi.lazyInit().IsValidGo(reflect.ValueOf(v))
+}
+func (xi *ExtensionInfo) TypeDescriptor() pref.ExtensionTypeDescriptor {
+ if atomic.LoadUint32(&xi.init) < extensionInfoDescInit {
+ xi.lazyInitSlow()
+ }
+ return &xi.desc
+}
+
+func (xi *ExtensionInfo) lazyInit() Converter {
+ if atomic.LoadUint32(&xi.init) < extensionInfoFullInit {
+ xi.lazyInitSlow()
+ }
+ return xi.conv
+}
+
+func (xi *ExtensionInfo) lazyInitSlow() {
+ xi.mu.Lock()
+ defer xi.mu.Unlock()
+
+ if xi.init == extensionInfoFullInit {
+ return
+ }
+ defer atomic.StoreUint32(&xi.init, extensionInfoFullInit)
+
+ if xi.desc.ExtensionDescriptor == nil {
+ xi.initFromLegacy()
+ }
+ if !xi.desc.ExtensionDescriptor.IsPlaceholder() {
+ if xi.ExtensionType == nil {
+ xi.initToLegacy()
+ }
+ xi.conv = NewConverter(xi.goType, xi.desc.ExtensionDescriptor)
+ xi.info = makeExtensionFieldInfo(xi.desc.ExtensionDescriptor)
+ xi.info.validation = newValidationInfo(xi.desc.ExtensionDescriptor, xi.goType)
+ }
+}
+
+type extensionTypeDescriptor struct {
+ pref.ExtensionDescriptor
+ xi *ExtensionInfo
+}
+
+func (xtd *extensionTypeDescriptor) Type() pref.ExtensionType {
+ return xtd.xi
+}
+func (xtd *extensionTypeDescriptor) Descriptor() pref.ExtensionDescriptor {
+ return xtd.ExtensionDescriptor
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go
new file mode 100644
index 000000000..f7d7ffb51
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go
@@ -0,0 +1,219 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+
+ "google.golang.org/protobuf/internal/filedesc"
+ "google.golang.org/protobuf/internal/strs"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// legacyEnumName returns the name of enums used in legacy code.
+// It is neither the protobuf full name nor the qualified Go name,
+// but rather an odd hybrid of both.
+func legacyEnumName(ed pref.EnumDescriptor) string {
+ var protoPkg string
+ enumName := string(ed.FullName())
+ if fd := ed.ParentFile(); fd != nil {
+ protoPkg = string(fd.Package())
+ enumName = strings.TrimPrefix(enumName, protoPkg+".")
+ }
+ if protoPkg == "" {
+ return strs.GoCamelCase(enumName)
+ }
+ return protoPkg + "." + strs.GoCamelCase(enumName)
+}
+
+// legacyWrapEnum wraps v as a protoreflect.Enum,
+// where v must be a int32 kind and not implement the v2 API already.
+func legacyWrapEnum(v reflect.Value) pref.Enum {
+ et := legacyLoadEnumType(v.Type())
+ return et.New(pref.EnumNumber(v.Int()))
+}
+
+var legacyEnumTypeCache sync.Map // map[reflect.Type]protoreflect.EnumType
+
+// legacyLoadEnumType dynamically loads a protoreflect.EnumType for t,
+// where t must be an int32 kind and not implement the v2 API already.
+func legacyLoadEnumType(t reflect.Type) pref.EnumType {
+ // Fast-path: check if a EnumType is cached for this concrete type.
+ if et, ok := legacyEnumTypeCache.Load(t); ok {
+ return et.(pref.EnumType)
+ }
+
+ // Slow-path: derive enum descriptor and initialize EnumType.
+ var et pref.EnumType
+ ed := LegacyLoadEnumDesc(t)
+ et = &legacyEnumType{
+ desc: ed,
+ goType: t,
+ }
+ if et, ok := legacyEnumTypeCache.LoadOrStore(t, et); ok {
+ return et.(pref.EnumType)
+ }
+ return et
+}
+
+type legacyEnumType struct {
+ desc pref.EnumDescriptor
+ goType reflect.Type
+ m sync.Map // map[protoreflect.EnumNumber]proto.Enum
+}
+
+func (t *legacyEnumType) New(n pref.EnumNumber) pref.Enum {
+ if e, ok := t.m.Load(n); ok {
+ return e.(pref.Enum)
+ }
+ e := &legacyEnumWrapper{num: n, pbTyp: t, goTyp: t.goType}
+ t.m.Store(n, e)
+ return e
+}
+func (t *legacyEnumType) Descriptor() pref.EnumDescriptor {
+ return t.desc
+}
+
+type legacyEnumWrapper struct {
+ num pref.EnumNumber
+ pbTyp pref.EnumType
+ goTyp reflect.Type
+}
+
+func (e *legacyEnumWrapper) Descriptor() pref.EnumDescriptor {
+ return e.pbTyp.Descriptor()
+}
+func (e *legacyEnumWrapper) Type() pref.EnumType {
+ return e.pbTyp
+}
+func (e *legacyEnumWrapper) Number() pref.EnumNumber {
+ return e.num
+}
+func (e *legacyEnumWrapper) ProtoReflect() pref.Enum {
+ return e
+}
+func (e *legacyEnumWrapper) protoUnwrap() interface{} {
+ v := reflect.New(e.goTyp).Elem()
+ v.SetInt(int64(e.num))
+ return v.Interface()
+}
+
+var (
+ _ pref.Enum = (*legacyEnumWrapper)(nil)
+ _ unwrapper = (*legacyEnumWrapper)(nil)
+)
+
+var legacyEnumDescCache sync.Map // map[reflect.Type]protoreflect.EnumDescriptor
+
+// LegacyLoadEnumDesc returns an EnumDescriptor derived from the Go type,
+// which must be an int32 kind and not implement the v2 API already.
+//
+// This is exported for testing purposes.
+func LegacyLoadEnumDesc(t reflect.Type) pref.EnumDescriptor {
+ // Fast-path: check if an EnumDescriptor is cached for this concrete type.
+ if ed, ok := legacyEnumDescCache.Load(t); ok {
+ return ed.(pref.EnumDescriptor)
+ }
+
+ // Slow-path: initialize EnumDescriptor from the raw descriptor.
+ ev := reflect.Zero(t).Interface()
+ if _, ok := ev.(pref.Enum); ok {
+ panic(fmt.Sprintf("%v already implements proto.Enum", t))
+ }
+ edV1, ok := ev.(enumV1)
+ if !ok {
+ return aberrantLoadEnumDesc(t)
+ }
+ b, idxs := edV1.EnumDescriptor()
+
+ var ed pref.EnumDescriptor
+ if len(idxs) == 1 {
+ ed = legacyLoadFileDesc(b).Enums().Get(idxs[0])
+ } else {
+ md := legacyLoadFileDesc(b).Messages().Get(idxs[0])
+ for _, i := range idxs[1 : len(idxs)-1] {
+ md = md.Messages().Get(i)
+ }
+ ed = md.Enums().Get(idxs[len(idxs)-1])
+ }
+ if ed, ok := legacyEnumDescCache.LoadOrStore(t, ed); ok {
+ return ed.(protoreflect.EnumDescriptor)
+ }
+ return ed
+}
+
+var aberrantEnumDescCache sync.Map // map[reflect.Type]protoreflect.EnumDescriptor
+
+// aberrantLoadEnumDesc returns an EnumDescriptor derived from the Go type,
+// which must not implement protoreflect.Enum or enumV1.
+//
+// If the type does not implement enumV1, then there is no reliable
+// way to derive the original protobuf type information.
+// We are unable to use the global enum registry since it is
+// unfortunately keyed by the protobuf full name, which we also do not know.
+// Thus, this produces some bogus enum descriptor based on the Go type name.
+func aberrantLoadEnumDesc(t reflect.Type) pref.EnumDescriptor {
+ // Fast-path: check if an EnumDescriptor is cached for this concrete type.
+ if ed, ok := aberrantEnumDescCache.Load(t); ok {
+ return ed.(pref.EnumDescriptor)
+ }
+
+ // Slow-path: construct a bogus, but unique EnumDescriptor.
+ ed := &filedesc.Enum{L2: new(filedesc.EnumL2)}
+ ed.L0.FullName = AberrantDeriveFullName(t) // e.g., github_com.user.repo.MyEnum
+ ed.L0.ParentFile = filedesc.SurrogateProto3
+ ed.L2.Values.List = append(ed.L2.Values.List, filedesc.EnumValue{})
+
+ // TODO: Use the presence of a UnmarshalJSON method to determine proto2?
+
+ vd := &ed.L2.Values.List[0]
+ vd.L0.FullName = ed.L0.FullName + "_UNKNOWN" // e.g., github_com.user.repo.MyEnum_UNKNOWN
+ vd.L0.ParentFile = ed.L0.ParentFile
+ vd.L0.Parent = ed
+
+ // TODO: We could use the String method to obtain some enum value names by
+ // starting at 0 and print the enum until it produces invalid identifiers.
+ // An exhaustive query is clearly impractical, but can be best-effort.
+
+ if ed, ok := aberrantEnumDescCache.LoadOrStore(t, ed); ok {
+ return ed.(pref.EnumDescriptor)
+ }
+ return ed
+}
+
+// AberrantDeriveFullName derives a fully qualified protobuf name for the given Go type
+// The provided name is not guaranteed to be stable nor universally unique.
+// It should be sufficiently unique within a program.
+//
+// This is exported for testing purposes.
+func AberrantDeriveFullName(t reflect.Type) pref.FullName {
+ sanitize := func(r rune) rune {
+ switch {
+ case r == '/':
+ return '.'
+ case 'a' <= r && r <= 'z', 'A' <= r && r <= 'Z', '0' <= r && r <= '9':
+ return r
+ default:
+ return '_'
+ }
+ }
+ prefix := strings.Map(sanitize, t.PkgPath())
+ suffix := strings.Map(sanitize, t.Name())
+ if suffix == "" {
+ suffix = fmt.Sprintf("UnknownX%X", reflect.ValueOf(t).Pointer())
+ }
+
+ ss := append(strings.Split(prefix, "."), suffix)
+ for i, s := range ss {
+ if s == "" || ('0' <= s[0] && s[0] <= '9') {
+ ss[i] = "x" + s
+ }
+ }
+ return pref.FullName(strings.Join(ss, "."))
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go
new file mode 100644
index 000000000..c3d741c2f
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go
@@ -0,0 +1,92 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "encoding/binary"
+ "encoding/json"
+ "hash/crc32"
+ "math"
+ "reflect"
+
+ "google.golang.org/protobuf/internal/errors"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+ piface "google.golang.org/protobuf/runtime/protoiface"
+)
+
+// These functions exist to support exported APIs in generated protobufs.
+// While these are deprecated, they cannot be removed for compatibility reasons.
+
+// LegacyEnumName returns the name of enums used in legacy code.
+func (Export) LegacyEnumName(ed pref.EnumDescriptor) string {
+ return legacyEnumName(ed)
+}
+
+// LegacyMessageTypeOf returns the protoreflect.MessageType for m,
+// with name used as the message name if necessary.
+func (Export) LegacyMessageTypeOf(m piface.MessageV1, name pref.FullName) pref.MessageType {
+ if mv := (Export{}).protoMessageV2Of(m); mv != nil {
+ return mv.ProtoReflect().Type()
+ }
+ return legacyLoadMessageInfo(reflect.TypeOf(m), name)
+}
+
+// UnmarshalJSONEnum unmarshals an enum from a JSON-encoded input.
+// The input can either be a string representing the enum value by name,
+// or a number representing the enum number itself.
+func (Export) UnmarshalJSONEnum(ed pref.EnumDescriptor, b []byte) (pref.EnumNumber, error) {
+ if b[0] == '"' {
+ var name pref.Name
+ if err := json.Unmarshal(b, &name); err != nil {
+ return 0, errors.New("invalid input for enum %v: %s", ed.FullName(), b)
+ }
+ ev := ed.Values().ByName(name)
+ if ev == nil {
+ return 0, errors.New("invalid value for enum %v: %s", ed.FullName(), name)
+ }
+ return ev.Number(), nil
+ } else {
+ var num pref.EnumNumber
+ if err := json.Unmarshal(b, &num); err != nil {
+ return 0, errors.New("invalid input for enum %v: %s", ed.FullName(), b)
+ }
+ return num, nil
+ }
+}
+
+// CompressGZIP compresses the input as a GZIP-encoded file.
+// The current implementation does no compression.
+func (Export) CompressGZIP(in []byte) (out []byte) {
+ // RFC 1952, section 2.3.1.
+ var gzipHeader = [10]byte{0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff}
+
+ // RFC 1951, section 3.2.4.
+ var blockHeader [5]byte
+ const maxBlockSize = math.MaxUint16
+ numBlocks := 1 + len(in)/maxBlockSize
+
+ // RFC 1952, section 2.3.1.
+ var gzipFooter [8]byte
+ binary.LittleEndian.PutUint32(gzipFooter[0:4], crc32.ChecksumIEEE(in))
+ binary.LittleEndian.PutUint32(gzipFooter[4:8], uint32(len(in)))
+
+ // Encode the input without compression using raw DEFLATE blocks.
+ out = make([]byte, 0, len(gzipHeader)+len(blockHeader)*numBlocks+len(in)+len(gzipFooter))
+ out = append(out, gzipHeader[:]...)
+ for blockHeader[0] == 0 {
+ blockSize := maxBlockSize
+ if blockSize > len(in) {
+ blockHeader[0] = 0x01 // final bit per RFC 1951, section 3.2.3.
+ blockSize = len(in)
+ }
+ binary.LittleEndian.PutUint16(blockHeader[1:3], uint16(blockSize)^0x0000)
+ binary.LittleEndian.PutUint16(blockHeader[3:5], uint16(blockSize)^0xffff)
+ out = append(out, blockHeader[:]...)
+ out = append(out, in[:blockSize]...)
+ in = in[blockSize:]
+ }
+ out = append(out, gzipFooter[:]...)
+ return out
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go
new file mode 100644
index 000000000..61757ce50
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go
@@ -0,0 +1,175 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "reflect"
+
+ "google.golang.org/protobuf/internal/descopts"
+ "google.golang.org/protobuf/internal/encoding/messageset"
+ ptag "google.golang.org/protobuf/internal/encoding/tag"
+ "google.golang.org/protobuf/internal/filedesc"
+ "google.golang.org/protobuf/internal/pragma"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+ preg "google.golang.org/protobuf/reflect/protoregistry"
+ piface "google.golang.org/protobuf/runtime/protoiface"
+)
+
+func (xi *ExtensionInfo) initToLegacy() {
+ xd := xi.desc
+ var parent piface.MessageV1
+ messageName := xd.ContainingMessage().FullName()
+ if mt, _ := preg.GlobalTypes.FindMessageByName(messageName); mt != nil {
+ // Create a new parent message and unwrap it if possible.
+ mv := mt.New().Interface()
+ t := reflect.TypeOf(mv)
+ if mv, ok := mv.(unwrapper); ok {
+ t = reflect.TypeOf(mv.protoUnwrap())
+ }
+
+ // Check whether the message implements the legacy v1 Message interface.
+ mz := reflect.Zero(t).Interface()
+ if mz, ok := mz.(piface.MessageV1); ok {
+ parent = mz
+ }
+ }
+
+ // Determine the v1 extension type, which is unfortunately not the same as
+ // the v2 ExtensionType.GoType.
+ extType := xi.goType
+ switch extType.Kind() {
+ case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
+ extType = reflect.PtrTo(extType) // T -> *T for singular scalar fields
+ }
+
+ // Reconstruct the legacy enum full name.
+ var enumName string
+ if xd.Kind() == pref.EnumKind {
+ enumName = legacyEnumName(xd.Enum())
+ }
+
+ // Derive the proto file that the extension was declared within.
+ var filename string
+ if fd := xd.ParentFile(); fd != nil {
+ filename = fd.Path()
+ }
+
+ // For MessageSet extensions, the name used is the parent message.
+ name := xd.FullName()
+ if messageset.IsMessageSetExtension(xd) {
+ name = name.Parent()
+ }
+
+ xi.ExtendedType = parent
+ xi.ExtensionType = reflect.Zero(extType).Interface()
+ xi.Field = int32(xd.Number())
+ xi.Name = string(name)
+ xi.Tag = ptag.Marshal(xd, enumName)
+ xi.Filename = filename
+}
+
+// initFromLegacy initializes an ExtensionInfo from
+// the contents of the deprecated exported fields of the type.
+func (xi *ExtensionInfo) initFromLegacy() {
+ // The v1 API returns "type incomplete" descriptors where only the
+ // field number is specified. In such a case, use a placeholder.
+ if xi.ExtendedType == nil || xi.ExtensionType == nil {
+ xd := placeholderExtension{
+ name: pref.FullName(xi.Name),
+ number: pref.FieldNumber(xi.Field),
+ }
+ xi.desc = extensionTypeDescriptor{xd, xi}
+ return
+ }
+
+ // Resolve enum or message dependencies.
+ var ed pref.EnumDescriptor
+ var md pref.MessageDescriptor
+ t := reflect.TypeOf(xi.ExtensionType)
+ isOptional := t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct
+ isRepeated := t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
+ if isOptional || isRepeated {
+ t = t.Elem()
+ }
+ switch v := reflect.Zero(t).Interface().(type) {
+ case pref.Enum:
+ ed = v.Descriptor()
+ case enumV1:
+ ed = LegacyLoadEnumDesc(t)
+ case pref.ProtoMessage:
+ md = v.ProtoReflect().Descriptor()
+ case messageV1:
+ md = LegacyLoadMessageDesc(t)
+ }
+
+ // Derive basic field information from the struct tag.
+ var evs pref.EnumValueDescriptors
+ if ed != nil {
+ evs = ed.Values()
+ }
+ fd := ptag.Unmarshal(xi.Tag, t, evs).(*filedesc.Field)
+
+ // Construct a v2 ExtensionType.
+ xd := &filedesc.Extension{L2: new(filedesc.ExtensionL2)}
+ xd.L0.ParentFile = filedesc.SurrogateProto2
+ xd.L0.FullName = pref.FullName(xi.Name)
+ xd.L1.Number = pref.FieldNumber(xi.Field)
+ xd.L1.Cardinality = fd.L1.Cardinality
+ xd.L1.Kind = fd.L1.Kind
+ xd.L2.IsPacked = fd.L1.IsPacked
+ xd.L2.Default = fd.L1.Default
+ xd.L1.Extendee = Export{}.MessageDescriptorOf(xi.ExtendedType)
+ xd.L2.Enum = ed
+ xd.L2.Message = md
+
+ // Derive real extension field name for MessageSets.
+ if messageset.IsMessageSet(xd.L1.Extendee) && md.FullName() == xd.L0.FullName {
+ xd.L0.FullName = xd.L0.FullName.Append(messageset.ExtensionName)
+ }
+
+ tt := reflect.TypeOf(xi.ExtensionType)
+ if isOptional {
+ tt = tt.Elem()
+ }
+ xi.goType = tt
+ xi.desc = extensionTypeDescriptor{xd, xi}
+}
+
+type placeholderExtension struct {
+ name pref.FullName
+ number pref.FieldNumber
+}
+
+func (x placeholderExtension) ParentFile() pref.FileDescriptor { return nil }
+func (x placeholderExtension) Parent() pref.Descriptor { return nil }
+func (x placeholderExtension) Index() int { return 0 }
+func (x placeholderExtension) Syntax() pref.Syntax { return 0 }
+func (x placeholderExtension) Name() pref.Name { return x.name.Name() }
+func (x placeholderExtension) FullName() pref.FullName { return x.name }
+func (x placeholderExtension) IsPlaceholder() bool { return true }
+func (x placeholderExtension) Options() pref.ProtoMessage { return descopts.Field }
+func (x placeholderExtension) Number() pref.FieldNumber { return x.number }
+func (x placeholderExtension) Cardinality() pref.Cardinality { return 0 }
+func (x placeholderExtension) Kind() pref.Kind { return 0 }
+func (x placeholderExtension) HasJSONName() bool { return false }
+func (x placeholderExtension) JSONName() string { return "" }
+func (x placeholderExtension) HasPresence() bool { return false }
+func (x placeholderExtension) HasOptionalKeyword() bool { return false }
+func (x placeholderExtension) IsExtension() bool { return true }
+func (x placeholderExtension) IsWeak() bool { return false }
+func (x placeholderExtension) IsPacked() bool { return false }
+func (x placeholderExtension) IsList() bool { return false }
+func (x placeholderExtension) IsMap() bool { return false }
+func (x placeholderExtension) MapKey() pref.FieldDescriptor { return nil }
+func (x placeholderExtension) MapValue() pref.FieldDescriptor { return nil }
+func (x placeholderExtension) HasDefault() bool { return false }
+func (x placeholderExtension) Default() pref.Value { return pref.Value{} }
+func (x placeholderExtension) DefaultEnumValue() pref.EnumValueDescriptor { return nil }
+func (x placeholderExtension) ContainingOneof() pref.OneofDescriptor { return nil }
+func (x placeholderExtension) ContainingMessage() pref.MessageDescriptor { return nil }
+func (x placeholderExtension) Enum() pref.EnumDescriptor { return nil }
+func (x placeholderExtension) Message() pref.MessageDescriptor { return nil }
+func (x placeholderExtension) ProtoType(pref.FieldDescriptor) { return }
+func (x placeholderExtension) ProtoInternal(pragma.DoNotImplement) { return }
diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go
new file mode 100644
index 000000000..9ab091086
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go
@@ -0,0 +1,81 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "bytes"
+ "compress/gzip"
+ "io/ioutil"
+ "sync"
+
+ "google.golang.org/protobuf/internal/filedesc"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+)
+
+// Every enum and message type generated by protoc-gen-go since commit 2fc053c5
+// on February 25th, 2016 has had a method to get the raw descriptor.
+// Types that were not generated by protoc-gen-go or were generated prior
+// to that version are not supported.
+//
+// The []byte returned is the encoded form of a FileDescriptorProto message
+// compressed using GZIP. The []int is the path from the top-level file
+// to the specific message or enum declaration.
+type (
+ enumV1 interface {
+ EnumDescriptor() ([]byte, []int)
+ }
+ messageV1 interface {
+ Descriptor() ([]byte, []int)
+ }
+)
+
+var legacyFileDescCache sync.Map // map[*byte]protoreflect.FileDescriptor
+
+// legacyLoadFileDesc unmarshals b as a compressed FileDescriptorProto message.
+//
+// This assumes that b is immutable and that b does not refer to part of a
+// concatenated series of GZIP files (which would require shenanigans that
+// rely on the concatenation properties of both protobufs and GZIP).
+// File descriptors generated by protoc-gen-go do not rely on that property.
+func legacyLoadFileDesc(b []byte) protoreflect.FileDescriptor {
+ // Fast-path: check whether we already have a cached file descriptor.
+ if fd, ok := legacyFileDescCache.Load(&b[0]); ok {
+ return fd.(protoreflect.FileDescriptor)
+ }
+
+ // Slow-path: decompress and unmarshal the file descriptor proto.
+ zr, err := gzip.NewReader(bytes.NewReader(b))
+ if err != nil {
+ panic(err)
+ }
+ b2, err := ioutil.ReadAll(zr)
+ if err != nil {
+ panic(err)
+ }
+
+ fd := filedesc.Builder{
+ RawDescriptor: b2,
+ FileRegistry: resolverOnly{protoregistry.GlobalFiles}, // do not register back to global registry
+ }.Build().File
+ if fd, ok := legacyFileDescCache.LoadOrStore(&b[0], fd); ok {
+ return fd.(protoreflect.FileDescriptor)
+ }
+ return fd
+}
+
+type resolverOnly struct {
+ reg *protoregistry.Files
+}
+
+func (r resolverOnly) FindFileByPath(path string) (protoreflect.FileDescriptor, error) {
+ return r.reg.FindFileByPath(path)
+}
+func (r resolverOnly) FindDescriptorByName(name protoreflect.FullName) (protoreflect.Descriptor, error) {
+ return r.reg.FindDescriptorByName(name)
+}
+func (resolverOnly) RegisterFile(protoreflect.FileDescriptor) error {
+ return nil
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go
new file mode 100644
index 000000000..06c68e117
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go
@@ -0,0 +1,502 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+
+ "google.golang.org/protobuf/internal/descopts"
+ ptag "google.golang.org/protobuf/internal/encoding/tag"
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/internal/filedesc"
+ "google.golang.org/protobuf/internal/strs"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/runtime/protoiface"
+ piface "google.golang.org/protobuf/runtime/protoiface"
+)
+
+// legacyWrapMessage wraps v as a protoreflect.Message,
+// where v must be a *struct kind and not implement the v2 API already.
+func legacyWrapMessage(v reflect.Value) pref.Message {
+ typ := v.Type()
+ if typ.Kind() != reflect.Ptr || typ.Elem().Kind() != reflect.Struct {
+ return aberrantMessage{v: v}
+ }
+ mt := legacyLoadMessageInfo(typ, "")
+ return mt.MessageOf(v.Interface())
+}
+
+var legacyMessageTypeCache sync.Map // map[reflect.Type]*MessageInfo
+
+// legacyLoadMessageInfo dynamically loads a *MessageInfo for t,
+// where t must be a *struct kind and not implement the v2 API already.
+// The provided name is used if it cannot be determined from the message.
+func legacyLoadMessageInfo(t reflect.Type, name pref.FullName) *MessageInfo {
+ // Fast-path: check if a MessageInfo is cached for this concrete type.
+ if mt, ok := legacyMessageTypeCache.Load(t); ok {
+ return mt.(*MessageInfo)
+ }
+
+ // Slow-path: derive message descriptor and initialize MessageInfo.
+ mi := &MessageInfo{
+ Desc: legacyLoadMessageDesc(t, name),
+ GoReflectType: t,
+ }
+
+ v := reflect.Zero(t).Interface()
+ if _, ok := v.(legacyMarshaler); ok {
+ mi.methods.Marshal = legacyMarshal
+
+ // We have no way to tell whether the type's Marshal method
+ // supports deterministic serialization or not, but this
+ // preserves the v1 implementation's behavior of always
+ // calling Marshal methods when present.
+ mi.methods.Flags |= piface.SupportMarshalDeterministic
+ }
+ if _, ok := v.(legacyUnmarshaler); ok {
+ mi.methods.Unmarshal = legacyUnmarshal
+ }
+ if _, ok := v.(legacyMerger); ok {
+ mi.methods.Merge = legacyMerge
+ }
+
+ if mi, ok := legacyMessageTypeCache.LoadOrStore(t, mi); ok {
+ return mi.(*MessageInfo)
+ }
+ return mi
+}
+
+var legacyMessageDescCache sync.Map // map[reflect.Type]protoreflect.MessageDescriptor
+
+// LegacyLoadMessageDesc returns an MessageDescriptor derived from the Go type,
+// which must be a *struct kind and not implement the v2 API already.
+//
+// This is exported for testing purposes.
+func LegacyLoadMessageDesc(t reflect.Type) pref.MessageDescriptor {
+ return legacyLoadMessageDesc(t, "")
+}
+func legacyLoadMessageDesc(t reflect.Type, name pref.FullName) pref.MessageDescriptor {
+ // Fast-path: check if a MessageDescriptor is cached for this concrete type.
+ if mi, ok := legacyMessageDescCache.Load(t); ok {
+ return mi.(pref.MessageDescriptor)
+ }
+
+ // Slow-path: initialize MessageDescriptor from the raw descriptor.
+ mv := reflect.Zero(t).Interface()
+ if _, ok := mv.(pref.ProtoMessage); ok {
+ panic(fmt.Sprintf("%v already implements proto.Message", t))
+ }
+ mdV1, ok := mv.(messageV1)
+ if !ok {
+ return aberrantLoadMessageDesc(t, name)
+ }
+
+ // If this is a dynamic message type where there isn't a 1-1 mapping between
+ // Go and protobuf types, calling the Descriptor method on the zero value of
+ // the message type isn't likely to work. If it panics, swallow the panic and
+ // continue as if the Descriptor method wasn't present.
+ b, idxs := func() ([]byte, []int) {
+ defer func() {
+ recover()
+ }()
+ return mdV1.Descriptor()
+ }()
+ if b == nil {
+ return aberrantLoadMessageDesc(t, name)
+ }
+
+ // If the Go type has no fields, then this might be a proto3 empty message
+ // from before the size cache was added. If there are any fields, check to
+ // see that at least one of them looks like something we generated.
+ if nfield := t.Elem().NumField(); nfield > 0 {
+ hasProtoField := false
+ for i := 0; i < nfield; i++ {
+ f := t.Elem().Field(i)
+ if f.Tag.Get("protobuf") != "" || f.Tag.Get("protobuf_oneof") != "" || strings.HasPrefix(f.Name, "XXX_") {
+ hasProtoField = true
+ break
+ }
+ }
+ if !hasProtoField {
+ return aberrantLoadMessageDesc(t, name)
+ }
+ }
+
+ md := legacyLoadFileDesc(b).Messages().Get(idxs[0])
+ for _, i := range idxs[1:] {
+ md = md.Messages().Get(i)
+ }
+ if name != "" && md.FullName() != name {
+ panic(fmt.Sprintf("mismatching message name: got %v, want %v", md.FullName(), name))
+ }
+ if md, ok := legacyMessageDescCache.LoadOrStore(t, md); ok {
+ return md.(protoreflect.MessageDescriptor)
+ }
+ return md
+}
+
+var (
+ aberrantMessageDescLock sync.Mutex
+ aberrantMessageDescCache map[reflect.Type]protoreflect.MessageDescriptor
+)
+
+// aberrantLoadMessageDesc returns an MessageDescriptor derived from the Go type,
+// which must not implement protoreflect.ProtoMessage or messageV1.
+//
+// This is a best-effort derivation of the message descriptor using the protobuf
+// tags on the struct fields.
+func aberrantLoadMessageDesc(t reflect.Type, name pref.FullName) pref.MessageDescriptor {
+ aberrantMessageDescLock.Lock()
+ defer aberrantMessageDescLock.Unlock()
+ if aberrantMessageDescCache == nil {
+ aberrantMessageDescCache = make(map[reflect.Type]protoreflect.MessageDescriptor)
+ }
+ return aberrantLoadMessageDescReentrant(t, name)
+}
+func aberrantLoadMessageDescReentrant(t reflect.Type, name pref.FullName) pref.MessageDescriptor {
+ // Fast-path: check if an MessageDescriptor is cached for this concrete type.
+ if md, ok := aberrantMessageDescCache[t]; ok {
+ return md
+ }
+
+ // Slow-path: construct a descriptor from the Go struct type (best-effort).
+ // Cache the MessageDescriptor early on so that we can resolve internal
+ // cyclic references.
+ md := &filedesc.Message{L2: new(filedesc.MessageL2)}
+ md.L0.FullName = aberrantDeriveMessageName(t, name)
+ md.L0.ParentFile = filedesc.SurrogateProto2
+ aberrantMessageDescCache[t] = md
+
+ if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct {
+ return md
+ }
+
+ // Try to determine if the message is using proto3 by checking scalars.
+ for i := 0; i < t.Elem().NumField(); i++ {
+ f := t.Elem().Field(i)
+ if tag := f.Tag.Get("protobuf"); tag != "" {
+ switch f.Type.Kind() {
+ case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
+ md.L0.ParentFile = filedesc.SurrogateProto3
+ }
+ for _, s := range strings.Split(tag, ",") {
+ if s == "proto3" {
+ md.L0.ParentFile = filedesc.SurrogateProto3
+ }
+ }
+ }
+ }
+
+ // Obtain a list of oneof wrapper types.
+ var oneofWrappers []reflect.Type
+ for _, method := range []string{"XXX_OneofFuncs", "XXX_OneofWrappers"} {
+ if fn, ok := t.MethodByName(method); ok {
+ for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) {
+ if vs, ok := v.Interface().([]interface{}); ok {
+ for _, v := range vs {
+ oneofWrappers = append(oneofWrappers, reflect.TypeOf(v))
+ }
+ }
+ }
+ }
+ }
+
+ // Obtain a list of the extension ranges.
+ if fn, ok := t.MethodByName("ExtensionRangeArray"); ok {
+ vs := fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0]
+ for i := 0; i < vs.Len(); i++ {
+ v := vs.Index(i)
+ md.L2.ExtensionRanges.List = append(md.L2.ExtensionRanges.List, [2]pref.FieldNumber{
+ pref.FieldNumber(v.FieldByName("Start").Int()),
+ pref.FieldNumber(v.FieldByName("End").Int() + 1),
+ })
+ md.L2.ExtensionRangeOptions = append(md.L2.ExtensionRangeOptions, nil)
+ }
+ }
+
+ // Derive the message fields by inspecting the struct fields.
+ for i := 0; i < t.Elem().NumField(); i++ {
+ f := t.Elem().Field(i)
+ if tag := f.Tag.Get("protobuf"); tag != "" {
+ tagKey := f.Tag.Get("protobuf_key")
+ tagVal := f.Tag.Get("protobuf_val")
+ aberrantAppendField(md, f.Type, tag, tagKey, tagVal)
+ }
+ if tag := f.Tag.Get("protobuf_oneof"); tag != "" {
+ n := len(md.L2.Oneofs.List)
+ md.L2.Oneofs.List = append(md.L2.Oneofs.List, filedesc.Oneof{})
+ od := &md.L2.Oneofs.List[n]
+ od.L0.FullName = md.FullName().Append(pref.Name(tag))
+ od.L0.ParentFile = md.L0.ParentFile
+ od.L0.Parent = md
+ od.L0.Index = n
+
+ for _, t := range oneofWrappers {
+ if t.Implements(f.Type) {
+ f := t.Elem().Field(0)
+ if tag := f.Tag.Get("protobuf"); tag != "" {
+ aberrantAppendField(md, f.Type, tag, "", "")
+ fd := &md.L2.Fields.List[len(md.L2.Fields.List)-1]
+ fd.L1.ContainingOneof = od
+ od.L1.Fields.List = append(od.L1.Fields.List, fd)
+ }
+ }
+ }
+ }
+ }
+
+ return md
+}
+
+func aberrantDeriveMessageName(t reflect.Type, name pref.FullName) pref.FullName {
+ if name.IsValid() {
+ return name
+ }
+ func() {
+ defer func() { recover() }() // swallow possible nil panics
+ if m, ok := reflect.Zero(t).Interface().(interface{ XXX_MessageName() string }); ok {
+ name = pref.FullName(m.XXX_MessageName())
+ }
+ }()
+ if name.IsValid() {
+ return name
+ }
+ if t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+ return AberrantDeriveFullName(t)
+}
+
+func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey, tagVal string) {
+ t := goType
+ isOptional := t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct
+ isRepeated := t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
+ if isOptional || isRepeated {
+ t = t.Elem()
+ }
+ fd := ptag.Unmarshal(tag, t, placeholderEnumValues{}).(*filedesc.Field)
+
+ // Append field descriptor to the message.
+ n := len(md.L2.Fields.List)
+ md.L2.Fields.List = append(md.L2.Fields.List, *fd)
+ fd = &md.L2.Fields.List[n]
+ fd.L0.FullName = md.FullName().Append(fd.Name())
+ fd.L0.ParentFile = md.L0.ParentFile
+ fd.L0.Parent = md
+ fd.L0.Index = n
+
+ if fd.L1.IsWeak || fd.L1.HasPacked {
+ fd.L1.Options = func() pref.ProtoMessage {
+ opts := descopts.Field.ProtoReflect().New()
+ if fd.L1.IsWeak {
+ opts.Set(opts.Descriptor().Fields().ByName("weak"), protoreflect.ValueOfBool(true))
+ }
+ if fd.L1.HasPacked {
+ opts.Set(opts.Descriptor().Fields().ByName("packed"), protoreflect.ValueOfBool(fd.L1.IsPacked))
+ }
+ return opts.Interface()
+ }
+ }
+
+ // Populate Enum and Message.
+ if fd.Enum() == nil && fd.Kind() == pref.EnumKind {
+ switch v := reflect.Zero(t).Interface().(type) {
+ case pref.Enum:
+ fd.L1.Enum = v.Descriptor()
+ default:
+ fd.L1.Enum = LegacyLoadEnumDesc(t)
+ }
+ }
+ if fd.Message() == nil && (fd.Kind() == pref.MessageKind || fd.Kind() == pref.GroupKind) {
+ switch v := reflect.Zero(t).Interface().(type) {
+ case pref.ProtoMessage:
+ fd.L1.Message = v.ProtoReflect().Descriptor()
+ case messageV1:
+ fd.L1.Message = LegacyLoadMessageDesc(t)
+ default:
+ if t.Kind() == reflect.Map {
+ n := len(md.L1.Messages.List)
+ md.L1.Messages.List = append(md.L1.Messages.List, filedesc.Message{L2: new(filedesc.MessageL2)})
+ md2 := &md.L1.Messages.List[n]
+ md2.L0.FullName = md.FullName().Append(pref.Name(strs.MapEntryName(string(fd.Name()))))
+ md2.L0.ParentFile = md.L0.ParentFile
+ md2.L0.Parent = md
+ md2.L0.Index = n
+
+ md2.L1.IsMapEntry = true
+ md2.L2.Options = func() pref.ProtoMessage {
+ opts := descopts.Message.ProtoReflect().New()
+ opts.Set(opts.Descriptor().Fields().ByName("map_entry"), protoreflect.ValueOfBool(true))
+ return opts.Interface()
+ }
+
+ aberrantAppendField(md2, t.Key(), tagKey, "", "")
+ aberrantAppendField(md2, t.Elem(), tagVal, "", "")
+
+ fd.L1.Message = md2
+ break
+ }
+ fd.L1.Message = aberrantLoadMessageDescReentrant(t, "")
+ }
+ }
+}
+
+type placeholderEnumValues struct {
+ protoreflect.EnumValueDescriptors
+}
+
+func (placeholderEnumValues) ByNumber(n pref.EnumNumber) pref.EnumValueDescriptor {
+ return filedesc.PlaceholderEnumValue(pref.FullName(fmt.Sprintf("UNKNOWN_%d", n)))
+}
+
+// legacyMarshaler is the proto.Marshaler interface superseded by protoiface.Methoder.
+type legacyMarshaler interface {
+ Marshal() ([]byte, error)
+}
+
+// legacyUnmarshaler is the proto.Unmarshaler interface superseded by protoiface.Methoder.
+type legacyUnmarshaler interface {
+ Unmarshal([]byte) error
+}
+
+// legacyMerger is the proto.Merger interface superseded by protoiface.Methoder.
+type legacyMerger interface {
+ Merge(protoiface.MessageV1)
+}
+
+var legacyProtoMethods = &piface.Methods{
+ Marshal: legacyMarshal,
+ Unmarshal: legacyUnmarshal,
+ Merge: legacyMerge,
+
+ // We have no way to tell whether the type's Marshal method
+ // supports deterministic serialization or not, but this
+ // preserves the v1 implementation's behavior of always
+ // calling Marshal methods when present.
+ Flags: piface.SupportMarshalDeterministic,
+}
+
+func legacyMarshal(in piface.MarshalInput) (piface.MarshalOutput, error) {
+ v := in.Message.(unwrapper).protoUnwrap()
+ marshaler, ok := v.(legacyMarshaler)
+ if !ok {
+ return piface.MarshalOutput{}, errors.New("%T does not implement Marshal", v)
+ }
+ out, err := marshaler.Marshal()
+ if in.Buf != nil {
+ out = append(in.Buf, out...)
+ }
+ return piface.MarshalOutput{
+ Buf: out,
+ }, err
+}
+
+func legacyUnmarshal(in piface.UnmarshalInput) (piface.UnmarshalOutput, error) {
+ v := in.Message.(unwrapper).protoUnwrap()
+ unmarshaler, ok := v.(legacyUnmarshaler)
+ if !ok {
+ return piface.UnmarshalOutput{}, errors.New("%T does not implement Marshal", v)
+ }
+ return piface.UnmarshalOutput{}, unmarshaler.Unmarshal(in.Buf)
+}
+
+func legacyMerge(in piface.MergeInput) piface.MergeOutput {
+ dstv := in.Destination.(unwrapper).protoUnwrap()
+ merger, ok := dstv.(legacyMerger)
+ if !ok {
+ return piface.MergeOutput{}
+ }
+ merger.Merge(Export{}.ProtoMessageV1Of(in.Source))
+ return piface.MergeOutput{Flags: piface.MergeComplete}
+}
+
+// aberrantMessageType implements MessageType for all types other than pointer-to-struct.
+type aberrantMessageType struct {
+ t reflect.Type
+}
+
+func (mt aberrantMessageType) New() pref.Message {
+ return aberrantMessage{reflect.Zero(mt.t)}
+}
+func (mt aberrantMessageType) Zero() pref.Message {
+ return aberrantMessage{reflect.Zero(mt.t)}
+}
+func (mt aberrantMessageType) GoType() reflect.Type {
+ return mt.t
+}
+func (mt aberrantMessageType) Descriptor() pref.MessageDescriptor {
+ return LegacyLoadMessageDesc(mt.t)
+}
+
+// aberrantMessage implements Message for all types other than pointer-to-struct.
+//
+// When the underlying type implements legacyMarshaler or legacyUnmarshaler,
+// the aberrant Message can be marshaled or unmarshaled. Otherwise, there is
+// not much that can be done with values of this type.
+type aberrantMessage struct {
+ v reflect.Value
+}
+
+func (m aberrantMessage) ProtoReflect() pref.Message {
+ return m
+}
+
+func (m aberrantMessage) Descriptor() pref.MessageDescriptor {
+ return LegacyLoadMessageDesc(m.v.Type())
+}
+func (m aberrantMessage) Type() pref.MessageType {
+ return aberrantMessageType{m.v.Type()}
+}
+func (m aberrantMessage) New() pref.Message {
+ return aberrantMessage{reflect.Zero(m.v.Type())}
+}
+func (m aberrantMessage) Interface() pref.ProtoMessage {
+ return m
+}
+func (m aberrantMessage) Range(f func(pref.FieldDescriptor, pref.Value) bool) {
+}
+func (m aberrantMessage) Has(pref.FieldDescriptor) bool {
+ panic("invalid field descriptor")
+}
+func (m aberrantMessage) Clear(pref.FieldDescriptor) {
+ panic("invalid field descriptor")
+}
+func (m aberrantMessage) Get(pref.FieldDescriptor) pref.Value {
+ panic("invalid field descriptor")
+}
+func (m aberrantMessage) Set(pref.FieldDescriptor, pref.Value) {
+ panic("invalid field descriptor")
+}
+func (m aberrantMessage) Mutable(pref.FieldDescriptor) pref.Value {
+ panic("invalid field descriptor")
+}
+func (m aberrantMessage) NewField(pref.FieldDescriptor) pref.Value {
+ panic("invalid field descriptor")
+}
+func (m aberrantMessage) WhichOneof(pref.OneofDescriptor) pref.FieldDescriptor {
+ panic("invalid oneof descriptor")
+}
+func (m aberrantMessage) GetUnknown() pref.RawFields {
+ return nil
+}
+func (m aberrantMessage) SetUnknown(pref.RawFields) {
+ // SetUnknown discards its input on messages which don't support unknown field storage.
+}
+func (m aberrantMessage) IsValid() bool {
+ // An invalid message is a read-only, empty message. Since we don't know anything
+ // about the alleged contents of this message, we can't say with confidence that
+ // it is invalid in this sense. Therefore, report it as valid.
+ return true
+}
+func (m aberrantMessage) ProtoMethods() *piface.Methods {
+ return legacyProtoMethods
+}
+func (m aberrantMessage) protoUnwrap() interface{} {
+ return m.v.Interface()
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/merge.go b/vendor/google.golang.org/protobuf/internal/impl/merge.go
new file mode 100644
index 000000000..cdc4267df
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/merge.go
@@ -0,0 +1,176 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "fmt"
+ "reflect"
+
+ "google.golang.org/protobuf/proto"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+ piface "google.golang.org/protobuf/runtime/protoiface"
+)
+
+type mergeOptions struct{}
+
+func (o mergeOptions) Merge(dst, src proto.Message) {
+ proto.Merge(dst, src)
+}
+
+// merge is protoreflect.Methods.Merge.
+func (mi *MessageInfo) merge(in piface.MergeInput) piface.MergeOutput {
+ dp, ok := mi.getPointer(in.Destination)
+ if !ok {
+ return piface.MergeOutput{}
+ }
+ sp, ok := mi.getPointer(in.Source)
+ if !ok {
+ return piface.MergeOutput{}
+ }
+ mi.mergePointer(dp, sp, mergeOptions{})
+ return piface.MergeOutput{Flags: piface.MergeComplete}
+}
+
+func (mi *MessageInfo) mergePointer(dst, src pointer, opts mergeOptions) {
+ mi.init()
+ if dst.IsNil() {
+ panic(fmt.Sprintf("invalid value: merging into nil message"))
+ }
+ if src.IsNil() {
+ return
+ }
+ for _, f := range mi.orderedCoderFields {
+ if f.funcs.merge == nil {
+ continue
+ }
+ sfptr := src.Apply(f.offset)
+ if f.isPointer && sfptr.Elem().IsNil() {
+ continue
+ }
+ f.funcs.merge(dst.Apply(f.offset), sfptr, f, opts)
+ }
+ if mi.extensionOffset.IsValid() {
+ sext := src.Apply(mi.extensionOffset).Extensions()
+ dext := dst.Apply(mi.extensionOffset).Extensions()
+ if *dext == nil {
+ *dext = make(map[int32]ExtensionField)
+ }
+ for num, sx := range *sext {
+ xt := sx.Type()
+ xi := getExtensionFieldInfo(xt)
+ if xi.funcs.merge == nil {
+ continue
+ }
+ dx := (*dext)[num]
+ var dv pref.Value
+ if dx.Type() == sx.Type() {
+ dv = dx.Value()
+ }
+ if !dv.IsValid() && xi.unmarshalNeedsValue {
+ dv = xt.New()
+ }
+ dv = xi.funcs.merge(dv, sx.Value(), opts)
+ dx.Set(sx.Type(), dv)
+ (*dext)[num] = dx
+ }
+ }
+ if mi.unknownOffset.IsValid() {
+ du := dst.Apply(mi.unknownOffset).Bytes()
+ su := src.Apply(mi.unknownOffset).Bytes()
+ if len(*su) > 0 {
+ *du = append(*du, *su...)
+ }
+ }
+}
+
+func mergeScalarValue(dst, src pref.Value, opts mergeOptions) pref.Value {
+ return src
+}
+
+func mergeBytesValue(dst, src pref.Value, opts mergeOptions) pref.Value {
+ return pref.ValueOfBytes(append(emptyBuf[:], src.Bytes()...))
+}
+
+func mergeListValue(dst, src pref.Value, opts mergeOptions) pref.Value {
+ dstl := dst.List()
+ srcl := src.List()
+ for i, llen := 0, srcl.Len(); i < llen; i++ {
+ dstl.Append(srcl.Get(i))
+ }
+ return dst
+}
+
+func mergeBytesListValue(dst, src pref.Value, opts mergeOptions) pref.Value {
+ dstl := dst.List()
+ srcl := src.List()
+ for i, llen := 0, srcl.Len(); i < llen; i++ {
+ sb := srcl.Get(i).Bytes()
+ db := append(emptyBuf[:], sb...)
+ dstl.Append(pref.ValueOfBytes(db))
+ }
+ return dst
+}
+
+func mergeMessageListValue(dst, src pref.Value, opts mergeOptions) pref.Value {
+ dstl := dst.List()
+ srcl := src.List()
+ for i, llen := 0, srcl.Len(); i < llen; i++ {
+ sm := srcl.Get(i).Message()
+ dm := proto.Clone(sm.Interface()).ProtoReflect()
+ dstl.Append(pref.ValueOfMessage(dm))
+ }
+ return dst
+}
+
+func mergeMessageValue(dst, src pref.Value, opts mergeOptions) pref.Value {
+ opts.Merge(dst.Message().Interface(), src.Message().Interface())
+ return dst
+}
+
+func mergeMessage(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
+ if f.mi != nil {
+ if dst.Elem().IsNil() {
+ dst.SetPointer(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem())))
+ }
+ f.mi.mergePointer(dst.Elem(), src.Elem(), opts)
+ } else {
+ dm := dst.AsValueOf(f.ft).Elem()
+ sm := src.AsValueOf(f.ft).Elem()
+ if dm.IsNil() {
+ dm.Set(reflect.New(f.ft.Elem()))
+ }
+ opts.Merge(asMessage(dm), asMessage(sm))
+ }
+}
+
+func mergeMessageSlice(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
+ for _, sp := range src.PointerSlice() {
+ dm := reflect.New(f.ft.Elem().Elem())
+ if f.mi != nil {
+ f.mi.mergePointer(pointerOfValue(dm), sp, opts)
+ } else {
+ opts.Merge(asMessage(dm), asMessage(sp.AsValueOf(f.ft.Elem().Elem())))
+ }
+ dst.AppendPointerSlice(pointerOfValue(dm))
+ }
+}
+
+func mergeBytes(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ *dst.Bytes() = append(emptyBuf[:], *src.Bytes()...)
+}
+
+func mergeBytesNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ v := *src.Bytes()
+ if len(v) > 0 {
+ *dst.Bytes() = append(emptyBuf[:], v...)
+ }
+}
+
+func mergeBytesSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ ds := dst.BytesSlice()
+ for _, v := range *src.BytesSlice() {
+ *ds = append(*ds, append(emptyBuf[:], v...))
+ }
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/merge_gen.go b/vendor/google.golang.org/protobuf/internal/impl/merge_gen.go
new file mode 100644
index 000000000..8816c274d
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/merge_gen.go
@@ -0,0 +1,209 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-types. DO NOT EDIT.
+
+package impl
+
+import ()
+
+func mergeBool(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ *dst.Bool() = *src.Bool()
+}
+
+func mergeBoolNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ v := *src.Bool()
+ if v != false {
+ *dst.Bool() = v
+ }
+}
+
+func mergeBoolPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ p := *src.BoolPtr()
+ if p != nil {
+ v := *p
+ *dst.BoolPtr() = &v
+ }
+}
+
+func mergeBoolSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ ds := dst.BoolSlice()
+ ss := src.BoolSlice()
+ *ds = append(*ds, *ss...)
+}
+
+func mergeInt32(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ *dst.Int32() = *src.Int32()
+}
+
+func mergeInt32NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ v := *src.Int32()
+ if v != 0 {
+ *dst.Int32() = v
+ }
+}
+
+func mergeInt32Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ p := *src.Int32Ptr()
+ if p != nil {
+ v := *p
+ *dst.Int32Ptr() = &v
+ }
+}
+
+func mergeInt32Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ ds := dst.Int32Slice()
+ ss := src.Int32Slice()
+ *ds = append(*ds, *ss...)
+}
+
+func mergeUint32(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ *dst.Uint32() = *src.Uint32()
+}
+
+func mergeUint32NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ v := *src.Uint32()
+ if v != 0 {
+ *dst.Uint32() = v
+ }
+}
+
+func mergeUint32Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ p := *src.Uint32Ptr()
+ if p != nil {
+ v := *p
+ *dst.Uint32Ptr() = &v
+ }
+}
+
+func mergeUint32Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ ds := dst.Uint32Slice()
+ ss := src.Uint32Slice()
+ *ds = append(*ds, *ss...)
+}
+
+func mergeInt64(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ *dst.Int64() = *src.Int64()
+}
+
+func mergeInt64NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ v := *src.Int64()
+ if v != 0 {
+ *dst.Int64() = v
+ }
+}
+
+func mergeInt64Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ p := *src.Int64Ptr()
+ if p != nil {
+ v := *p
+ *dst.Int64Ptr() = &v
+ }
+}
+
+func mergeInt64Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ ds := dst.Int64Slice()
+ ss := src.Int64Slice()
+ *ds = append(*ds, *ss...)
+}
+
+func mergeUint64(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ *dst.Uint64() = *src.Uint64()
+}
+
+func mergeUint64NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ v := *src.Uint64()
+ if v != 0 {
+ *dst.Uint64() = v
+ }
+}
+
+func mergeUint64Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ p := *src.Uint64Ptr()
+ if p != nil {
+ v := *p
+ *dst.Uint64Ptr() = &v
+ }
+}
+
+func mergeUint64Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ ds := dst.Uint64Slice()
+ ss := src.Uint64Slice()
+ *ds = append(*ds, *ss...)
+}
+
+func mergeFloat32(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ *dst.Float32() = *src.Float32()
+}
+
+func mergeFloat32NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ v := *src.Float32()
+ if v != 0 {
+ *dst.Float32() = v
+ }
+}
+
+func mergeFloat32Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ p := *src.Float32Ptr()
+ if p != nil {
+ v := *p
+ *dst.Float32Ptr() = &v
+ }
+}
+
+func mergeFloat32Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ ds := dst.Float32Slice()
+ ss := src.Float32Slice()
+ *ds = append(*ds, *ss...)
+}
+
+func mergeFloat64(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ *dst.Float64() = *src.Float64()
+}
+
+func mergeFloat64NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ v := *src.Float64()
+ if v != 0 {
+ *dst.Float64() = v
+ }
+}
+
+func mergeFloat64Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ p := *src.Float64Ptr()
+ if p != nil {
+ v := *p
+ *dst.Float64Ptr() = &v
+ }
+}
+
+func mergeFloat64Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ ds := dst.Float64Slice()
+ ss := src.Float64Slice()
+ *ds = append(*ds, *ss...)
+}
+
+func mergeString(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ *dst.String() = *src.String()
+}
+
+func mergeStringNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ v := *src.String()
+ if v != "" {
+ *dst.String() = v
+ }
+}
+
+func mergeStringPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ p := *src.StringPtr()
+ if p != nil {
+ v := *p
+ *dst.StringPtr() = &v
+ }
+}
+
+func mergeStringSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ ds := dst.StringSlice()
+ ss := src.StringSlice()
+ *ds = append(*ds, *ss...)
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go
new file mode 100644
index 000000000..7dd994bd9
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/message.go
@@ -0,0 +1,215 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+
+ "google.golang.org/protobuf/internal/genname"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// MessageInfo provides protobuf related functionality for a given Go type
+// that represents a message. A given instance of MessageInfo is tied to
+// exactly one Go type, which must be a pointer to a struct type.
+//
+// The exported fields must be populated before any methods are called
+// and cannot be mutated after set.
+type MessageInfo struct {
+ // GoReflectType is the underlying message Go type and must be populated.
+ GoReflectType reflect.Type // pointer to struct
+
+ // Desc is the underlying message descriptor type and must be populated.
+ Desc pref.MessageDescriptor
+
+ // Exporter must be provided in a purego environment in order to provide
+ // access to unexported fields.
+ Exporter exporter
+
+ // OneofWrappers is list of pointers to oneof wrapper struct types.
+ OneofWrappers []interface{}
+
+ initMu sync.Mutex // protects all unexported fields
+ initDone uint32
+
+ reflectMessageInfo // for reflection implementation
+ coderMessageInfo // for fast-path method implementations
+}
+
+// exporter is a function that returns a reference to the ith field of v,
+// where v is a pointer to a struct. It returns nil if it does not support
+// exporting the requested field (e.g., already exported).
+type exporter func(v interface{}, i int) interface{}
+
+// getMessageInfo returns the MessageInfo for any message type that
+// is generated by our implementation of protoc-gen-go (for v2 and on).
+// If it is unable to obtain a MessageInfo, it returns nil.
+func getMessageInfo(mt reflect.Type) *MessageInfo {
+ m, ok := reflect.Zero(mt).Interface().(pref.ProtoMessage)
+ if !ok {
+ return nil
+ }
+ mr, ok := m.ProtoReflect().(interface{ ProtoMessageInfo() *MessageInfo })
+ if !ok {
+ return nil
+ }
+ return mr.ProtoMessageInfo()
+}
+
+func (mi *MessageInfo) init() {
+ // This function is called in the hot path. Inline the sync.Once logic,
+ // since allocating a closure for Once.Do is expensive.
+ // Keep init small to ensure that it can be inlined.
+ if atomic.LoadUint32(&mi.initDone) == 0 {
+ mi.initOnce()
+ }
+}
+
+func (mi *MessageInfo) initOnce() {
+ mi.initMu.Lock()
+ defer mi.initMu.Unlock()
+ if mi.initDone == 1 {
+ return
+ }
+
+ t := mi.GoReflectType
+ if t.Kind() != reflect.Ptr && t.Elem().Kind() != reflect.Struct {
+ panic(fmt.Sprintf("got %v, want *struct kind", t))
+ }
+ t = t.Elem()
+
+ si := mi.makeStructInfo(t)
+ mi.makeReflectFuncs(t, si)
+ mi.makeCoderMethods(t, si)
+
+ atomic.StoreUint32(&mi.initDone, 1)
+}
+
+// getPointer returns the pointer for a message, which should be of
+// the type of the MessageInfo. If the message is of a different type,
+// it returns ok==false.
+func (mi *MessageInfo) getPointer(m pref.Message) (p pointer, ok bool) {
+ switch m := m.(type) {
+ case *messageState:
+ return m.pointer(), m.messageInfo() == mi
+ case *messageReflectWrapper:
+ return m.pointer(), m.messageInfo() == mi
+ }
+ return pointer{}, false
+}
+
+type (
+ SizeCache = int32
+ WeakFields = map[int32]protoreflect.ProtoMessage
+ UnknownFields = []byte
+ ExtensionFields = map[int32]ExtensionField
+)
+
+var (
+ sizecacheType = reflect.TypeOf(SizeCache(0))
+ weakFieldsType = reflect.TypeOf(WeakFields(nil))
+ unknownFieldsType = reflect.TypeOf(UnknownFields(nil))
+ extensionFieldsType = reflect.TypeOf(ExtensionFields(nil))
+)
+
+type structInfo struct {
+ sizecacheOffset offset
+ weakOffset offset
+ unknownOffset offset
+ extensionOffset offset
+
+ fieldsByNumber map[pref.FieldNumber]reflect.StructField
+ oneofsByName map[pref.Name]reflect.StructField
+ oneofWrappersByType map[reflect.Type]pref.FieldNumber
+ oneofWrappersByNumber map[pref.FieldNumber]reflect.Type
+}
+
+func (mi *MessageInfo) makeStructInfo(t reflect.Type) structInfo {
+ si := structInfo{
+ sizecacheOffset: invalidOffset,
+ weakOffset: invalidOffset,
+ unknownOffset: invalidOffset,
+ extensionOffset: invalidOffset,
+
+ fieldsByNumber: map[pref.FieldNumber]reflect.StructField{},
+ oneofsByName: map[pref.Name]reflect.StructField{},
+ oneofWrappersByType: map[reflect.Type]pref.FieldNumber{},
+ oneofWrappersByNumber: map[pref.FieldNumber]reflect.Type{},
+ }
+
+fieldLoop:
+ for i := 0; i < t.NumField(); i++ {
+ switch f := t.Field(i); f.Name {
+ case genname.SizeCache, genname.SizeCacheA:
+ if f.Type == sizecacheType {
+ si.sizecacheOffset = offsetOf(f, mi.Exporter)
+ }
+ case genname.WeakFields, genname.WeakFieldsA:
+ if f.Type == weakFieldsType {
+ si.weakOffset = offsetOf(f, mi.Exporter)
+ }
+ case genname.UnknownFields, genname.UnknownFieldsA:
+ if f.Type == unknownFieldsType {
+ si.unknownOffset = offsetOf(f, mi.Exporter)
+ }
+ case genname.ExtensionFields, genname.ExtensionFieldsA, genname.ExtensionFieldsB:
+ if f.Type == extensionFieldsType {
+ si.extensionOffset = offsetOf(f, mi.Exporter)
+ }
+ default:
+ for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") {
+ if len(s) > 0 && strings.Trim(s, "0123456789") == "" {
+ n, _ := strconv.ParseUint(s, 10, 64)
+ si.fieldsByNumber[pref.FieldNumber(n)] = f
+ continue fieldLoop
+ }
+ }
+ if s := f.Tag.Get("protobuf_oneof"); len(s) > 0 {
+ si.oneofsByName[pref.Name(s)] = f
+ continue fieldLoop
+ }
+ }
+ }
+
+ // Derive a mapping of oneof wrappers to fields.
+ oneofWrappers := mi.OneofWrappers
+ for _, method := range []string{"XXX_OneofFuncs", "XXX_OneofWrappers"} {
+ if fn, ok := reflect.PtrTo(t).MethodByName(method); ok {
+ for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) {
+ if vs, ok := v.Interface().([]interface{}); ok {
+ oneofWrappers = vs
+ }
+ }
+ }
+ }
+ for _, v := range oneofWrappers {
+ tf := reflect.TypeOf(v).Elem()
+ f := tf.Field(0)
+ for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") {
+ if len(s) > 0 && strings.Trim(s, "0123456789") == "" {
+ n, _ := strconv.ParseUint(s, 10, 64)
+ si.oneofWrappersByType[tf] = pref.FieldNumber(n)
+ si.oneofWrappersByNumber[pref.FieldNumber(n)] = tf
+ break
+ }
+ }
+ }
+
+ return si
+}
+
+func (mi *MessageInfo) New() protoreflect.Message {
+ return mi.MessageOf(reflect.New(mi.GoReflectType.Elem()).Interface())
+}
+func (mi *MessageInfo) Zero() protoreflect.Message {
+ return mi.MessageOf(reflect.Zero(mi.GoReflectType).Interface())
+}
+func (mi *MessageInfo) Descriptor() protoreflect.MessageDescriptor { return mi.Desc }
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go
new file mode 100644
index 000000000..0f4b8db76
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go
@@ -0,0 +1,364 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "fmt"
+ "reflect"
+
+ "google.golang.org/protobuf/internal/pragma"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+type reflectMessageInfo struct {
+ fields map[pref.FieldNumber]*fieldInfo
+ oneofs map[pref.Name]*oneofInfo
+
+ // denseFields is a subset of fields where:
+ // 0 < fieldDesc.Number() < len(denseFields)
+ // It provides faster access to the fieldInfo, but may be incomplete.
+ denseFields []*fieldInfo
+
+ // rangeInfos is a list of all fields (not belonging to a oneof) and oneofs.
+ rangeInfos []interface{} // either *fieldInfo or *oneofInfo
+
+ getUnknown func(pointer) pref.RawFields
+ setUnknown func(pointer, pref.RawFields)
+ extensionMap func(pointer) *extensionMap
+
+ nilMessage atomicNilMessage
+}
+
+// makeReflectFuncs generates the set of functions to support reflection.
+func (mi *MessageInfo) makeReflectFuncs(t reflect.Type, si structInfo) {
+ mi.makeKnownFieldsFunc(si)
+ mi.makeUnknownFieldsFunc(t, si)
+ mi.makeExtensionFieldsFunc(t, si)
+}
+
+// makeKnownFieldsFunc generates functions for operations that can be performed
+// on each protobuf message field. It takes in a reflect.Type representing the
+// Go struct and matches message fields with struct fields.
+//
+// This code assumes that the struct is well-formed and panics if there are
+// any discrepancies.
+func (mi *MessageInfo) makeKnownFieldsFunc(si structInfo) {
+ mi.fields = map[pref.FieldNumber]*fieldInfo{}
+ md := mi.Desc
+ fds := md.Fields()
+ for i := 0; i < fds.Len(); i++ {
+ fd := fds.Get(i)
+ fs := si.fieldsByNumber[fd.Number()]
+ var fi fieldInfo
+ switch {
+ case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
+ fi = fieldInfoForOneof(fd, si.oneofsByName[fd.ContainingOneof().Name()], mi.Exporter, si.oneofWrappersByNumber[fd.Number()])
+ case fd.IsMap():
+ fi = fieldInfoForMap(fd, fs, mi.Exporter)
+ case fd.IsList():
+ fi = fieldInfoForList(fd, fs, mi.Exporter)
+ case fd.IsWeak():
+ fi = fieldInfoForWeakMessage(fd, si.weakOffset)
+ case fd.Kind() == pref.MessageKind || fd.Kind() == pref.GroupKind:
+ fi = fieldInfoForMessage(fd, fs, mi.Exporter)
+ default:
+ fi = fieldInfoForScalar(fd, fs, mi.Exporter)
+ }
+ mi.fields[fd.Number()] = &fi
+ }
+
+ mi.oneofs = map[pref.Name]*oneofInfo{}
+ for i := 0; i < md.Oneofs().Len(); i++ {
+ od := md.Oneofs().Get(i)
+ mi.oneofs[od.Name()] = makeOneofInfo(od, si, mi.Exporter)
+ }
+
+ mi.denseFields = make([]*fieldInfo, fds.Len()*2)
+ for i := 0; i < fds.Len(); i++ {
+ if fd := fds.Get(i); int(fd.Number()) < len(mi.denseFields) {
+ mi.denseFields[fd.Number()] = mi.fields[fd.Number()]
+ }
+ }
+
+ for i := 0; i < fds.Len(); {
+ fd := fds.Get(i)
+ if od := fd.ContainingOneof(); od != nil && !od.IsSynthetic() {
+ mi.rangeInfos = append(mi.rangeInfos, mi.oneofs[od.Name()])
+ i += od.Fields().Len()
+ } else {
+ mi.rangeInfos = append(mi.rangeInfos, mi.fields[fd.Number()])
+ i++
+ }
+ }
+}
+
+func (mi *MessageInfo) makeUnknownFieldsFunc(t reflect.Type, si structInfo) {
+ mi.getUnknown = func(pointer) pref.RawFields { return nil }
+ mi.setUnknown = func(pointer, pref.RawFields) { return }
+ if si.unknownOffset.IsValid() {
+ mi.getUnknown = func(p pointer) pref.RawFields {
+ if p.IsNil() {
+ return nil
+ }
+ rv := p.Apply(si.unknownOffset).AsValueOf(unknownFieldsType)
+ return pref.RawFields(*rv.Interface().(*[]byte))
+ }
+ mi.setUnknown = func(p pointer, b pref.RawFields) {
+ if p.IsNil() {
+ panic("invalid SetUnknown on nil Message")
+ }
+ rv := p.Apply(si.unknownOffset).AsValueOf(unknownFieldsType)
+ *rv.Interface().(*[]byte) = []byte(b)
+ }
+ } else {
+ mi.getUnknown = func(pointer) pref.RawFields {
+ return nil
+ }
+ mi.setUnknown = func(p pointer, _ pref.RawFields) {
+ if p.IsNil() {
+ panic("invalid SetUnknown on nil Message")
+ }
+ }
+ }
+}
+
+func (mi *MessageInfo) makeExtensionFieldsFunc(t reflect.Type, si structInfo) {
+ if si.extensionOffset.IsValid() {
+ mi.extensionMap = func(p pointer) *extensionMap {
+ if p.IsNil() {
+ return (*extensionMap)(nil)
+ }
+ v := p.Apply(si.extensionOffset).AsValueOf(extensionFieldsType)
+ return (*extensionMap)(v.Interface().(*map[int32]ExtensionField))
+ }
+ } else {
+ mi.extensionMap = func(pointer) *extensionMap {
+ return (*extensionMap)(nil)
+ }
+ }
+}
+
+type extensionMap map[int32]ExtensionField
+
+func (m *extensionMap) Range(f func(pref.FieldDescriptor, pref.Value) bool) {
+ if m != nil {
+ for _, x := range *m {
+ xd := x.Type().TypeDescriptor()
+ v := x.Value()
+ if xd.IsList() && v.List().Len() == 0 {
+ continue
+ }
+ if !f(xd, v) {
+ return
+ }
+ }
+ }
+}
+func (m *extensionMap) Has(xt pref.ExtensionType) (ok bool) {
+ if m == nil {
+ return false
+ }
+ xd := xt.TypeDescriptor()
+ x, ok := (*m)[int32(xd.Number())]
+ if !ok {
+ return false
+ }
+ switch {
+ case xd.IsList():
+ return x.Value().List().Len() > 0
+ case xd.IsMap():
+ return x.Value().Map().Len() > 0
+ case xd.Message() != nil:
+ return x.Value().Message().IsValid()
+ }
+ return true
+}
+func (m *extensionMap) Clear(xt pref.ExtensionType) {
+ delete(*m, int32(xt.TypeDescriptor().Number()))
+}
+func (m *extensionMap) Get(xt pref.ExtensionType) pref.Value {
+ xd := xt.TypeDescriptor()
+ if m != nil {
+ if x, ok := (*m)[int32(xd.Number())]; ok {
+ return x.Value()
+ }
+ }
+ return xt.Zero()
+}
+func (m *extensionMap) Set(xt pref.ExtensionType, v pref.Value) {
+ xd := xt.TypeDescriptor()
+ isValid := true
+ switch {
+ case !xt.IsValidValue(v):
+ isValid = false
+ case xd.IsList():
+ isValid = v.List().IsValid()
+ case xd.IsMap():
+ isValid = v.Map().IsValid()
+ case xd.Message() != nil:
+ isValid = v.Message().IsValid()
+ }
+ if !isValid {
+ panic(fmt.Sprintf("%v: assigning invalid value", xt.TypeDescriptor().FullName()))
+ }
+
+ if *m == nil {
+ *m = make(map[int32]ExtensionField)
+ }
+ var x ExtensionField
+ x.Set(xt, v)
+ (*m)[int32(xd.Number())] = x
+}
+func (m *extensionMap) Mutable(xt pref.ExtensionType) pref.Value {
+ xd := xt.TypeDescriptor()
+ if xd.Kind() != pref.MessageKind && xd.Kind() != pref.GroupKind && !xd.IsList() && !xd.IsMap() {
+ panic("invalid Mutable on field with non-composite type")
+ }
+ if x, ok := (*m)[int32(xd.Number())]; ok {
+ return x.Value()
+ }
+ v := xt.New()
+ m.Set(xt, v)
+ return v
+}
+
+// MessageState is a data structure that is nested as the first field in a
+// concrete message. It provides a way to implement the ProtoReflect method
+// in an allocation-free way without needing to have a shadow Go type generated
+// for every message type. This technique only works using unsafe.
+//
+//
+// Example generated code:
+//
+// type M struct {
+// state protoimpl.MessageState
+//
+// Field1 int32
+// Field2 string
+// Field3 *BarMessage
+// ...
+// }
+//
+// func (m *M) ProtoReflect() protoreflect.Message {
+// mi := &file_fizz_buzz_proto_msgInfos[5]
+// if protoimpl.UnsafeEnabled && m != nil {
+// ms := protoimpl.X.MessageStateOf(Pointer(m))
+// if ms.LoadMessageInfo() == nil {
+// ms.StoreMessageInfo(mi)
+// }
+// return ms
+// }
+// return mi.MessageOf(m)
+// }
+//
+// The MessageState type holds a *MessageInfo, which must be atomically set to
+// the message info associated with a given message instance.
+// By unsafely converting a *M into a *MessageState, the MessageState object
+// has access to all the information needed to implement protobuf reflection.
+// It has access to the message info as its first field, and a pointer to the
+// MessageState is identical to a pointer to the concrete message value.
+//
+//
+// Requirements:
+// • The type M must implement protoreflect.ProtoMessage.
+// • The address of m must not be nil.
+// • The address of m and the address of m.state must be equal,
+// even though they are different Go types.
+type MessageState struct {
+ pragma.NoUnkeyedLiterals
+ pragma.DoNotCompare
+ pragma.DoNotCopy
+
+ atomicMessageInfo *MessageInfo
+}
+
+type messageState MessageState
+
+var (
+ _ pref.Message = (*messageState)(nil)
+ _ unwrapper = (*messageState)(nil)
+)
+
+// messageDataType is a tuple of a pointer to the message data and
+// a pointer to the message type. It is a generalized way of providing a
+// reflective view over a message instance. The disadvantage of this approach
+// is the need to allocate this tuple of 16B.
+type messageDataType struct {
+ p pointer
+ mi *MessageInfo
+}
+
+type (
+ messageReflectWrapper messageDataType
+ messageIfaceWrapper messageDataType
+)
+
+var (
+ _ pref.Message = (*messageReflectWrapper)(nil)
+ _ unwrapper = (*messageReflectWrapper)(nil)
+ _ pref.ProtoMessage = (*messageIfaceWrapper)(nil)
+ _ unwrapper = (*messageIfaceWrapper)(nil)
+)
+
+// MessageOf returns a reflective view over a message. The input must be a
+// pointer to a named Go struct. If the provided type has a ProtoReflect method,
+// it must be implemented by calling this method.
+func (mi *MessageInfo) MessageOf(m interface{}) pref.Message {
+ // TODO: Switch the input to be an opaque Pointer.
+ if reflect.TypeOf(m) != mi.GoReflectType {
+ panic(fmt.Sprintf("type mismatch: got %T, want %v", m, mi.GoReflectType))
+ }
+ p := pointerOfIface(m)
+ if p.IsNil() {
+ return mi.nilMessage.Init(mi)
+ }
+ return &messageReflectWrapper{p, mi}
+}
+
+func (m *messageReflectWrapper) pointer() pointer { return m.p }
+func (m *messageReflectWrapper) messageInfo() *MessageInfo { return m.mi }
+
+func (m *messageIfaceWrapper) ProtoReflect() pref.Message {
+ return (*messageReflectWrapper)(m)
+}
+func (m *messageIfaceWrapper) protoUnwrap() interface{} {
+ return m.p.AsIfaceOf(m.mi.GoReflectType.Elem())
+}
+
+// checkField verifies that the provided field descriptor is valid.
+// Exactly one of the returned values is populated.
+func (mi *MessageInfo) checkField(fd pref.FieldDescriptor) (*fieldInfo, pref.ExtensionType) {
+ var fi *fieldInfo
+ if n := fd.Number(); 0 < n && int(n) < len(mi.denseFields) {
+ fi = mi.denseFields[n]
+ } else {
+ fi = mi.fields[n]
+ }
+ if fi != nil {
+ if fi.fieldDesc != fd {
+ if got, want := fd.FullName(), fi.fieldDesc.FullName(); got != want {
+ panic(fmt.Sprintf("mismatching field: got %v, want %v", got, want))
+ }
+ panic(fmt.Sprintf("mismatching field: %v", fd.FullName()))
+ }
+ return fi, nil
+ }
+
+ if fd.IsExtension() {
+ if got, want := fd.ContainingMessage().FullName(), mi.Desc.FullName(); got != want {
+ // TODO: Should this be exact containing message descriptor match?
+ panic(fmt.Sprintf("extension %v has mismatching containing message: got %v, want %v", fd.FullName(), got, want))
+ }
+ if !mi.Desc.ExtensionRanges().Has(fd.Number()) {
+ panic(fmt.Sprintf("extension %v extends %v outside the extension range", fd.FullName(), mi.Desc.FullName()))
+ }
+ xtd, ok := fd.(pref.ExtensionTypeDescriptor)
+ if !ok {
+ panic(fmt.Sprintf("extension %v does not implement protoreflect.ExtensionTypeDescriptor", fd.FullName()))
+ }
+ return nil, xtd.Type()
+ }
+ panic(fmt.Sprintf("field %v is invalid", fd.FullName()))
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go
new file mode 100644
index 000000000..23124a86e
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go
@@ -0,0 +1,466 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "fmt"
+ "math"
+ "reflect"
+ "sync"
+
+ "google.golang.org/protobuf/internal/flags"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+ preg "google.golang.org/protobuf/reflect/protoregistry"
+)
+
+type fieldInfo struct {
+ fieldDesc pref.FieldDescriptor
+
+ // These fields are used for protobuf reflection support.
+ has func(pointer) bool
+ clear func(pointer)
+ get func(pointer) pref.Value
+ set func(pointer, pref.Value)
+ mutable func(pointer) pref.Value
+ newMessage func() pref.Message
+ newField func() pref.Value
+}
+
+func fieldInfoForOneof(fd pref.FieldDescriptor, fs reflect.StructField, x exporter, ot reflect.Type) fieldInfo {
+ ft := fs.Type
+ if ft.Kind() != reflect.Interface {
+ panic(fmt.Sprintf("field %v has invalid type: got %v, want interface kind", fd.FullName(), ft))
+ }
+ if ot.Kind() != reflect.Struct {
+ panic(fmt.Sprintf("field %v has invalid type: got %v, want struct kind", fd.FullName(), ot))
+ }
+ if !reflect.PtrTo(ot).Implements(ft) {
+ panic(fmt.Sprintf("field %v has invalid type: %v does not implement %v", fd.FullName(), ot, ft))
+ }
+ conv := NewConverter(ot.Field(0).Type, fd)
+ isMessage := fd.Message() != nil
+
+ // TODO: Implement unsafe fast path?
+ fieldOffset := offsetOf(fs, x)
+ return fieldInfo{
+ // NOTE: The logic below intentionally assumes that oneof fields are
+ // well-formatted. That is, the oneof interface never contains a
+ // typed nil pointer to one of the wrapper structs.
+
+ fieldDesc: fd,
+ has: func(p pointer) bool {
+ if p.IsNil() {
+ return false
+ }
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if rv.IsNil() || rv.Elem().Type().Elem() != ot || rv.Elem().IsNil() {
+ return false
+ }
+ return true
+ },
+ clear: func(p pointer) {
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if rv.IsNil() || rv.Elem().Type().Elem() != ot {
+ // NOTE: We intentionally don't check for rv.Elem().IsNil()
+ // so that (*OneofWrapperType)(nil) gets cleared to nil.
+ return
+ }
+ rv.Set(reflect.Zero(rv.Type()))
+ },
+ get: func(p pointer) pref.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if rv.IsNil() || rv.Elem().Type().Elem() != ot || rv.Elem().IsNil() {
+ return conv.Zero()
+ }
+ rv = rv.Elem().Elem().Field(0)
+ return conv.PBValueOf(rv)
+ },
+ set: func(p pointer, v pref.Value) {
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if rv.IsNil() || rv.Elem().Type().Elem() != ot || rv.Elem().IsNil() {
+ rv.Set(reflect.New(ot))
+ }
+ rv = rv.Elem().Elem().Field(0)
+ rv.Set(conv.GoValueOf(v))
+ },
+ mutable: func(p pointer) pref.Value {
+ if !isMessage {
+ panic(fmt.Sprintf("field %v with invalid Mutable call on field with non-composite type", fd.FullName()))
+ }
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if rv.IsNil() || rv.Elem().Type().Elem() != ot || rv.Elem().IsNil() {
+ rv.Set(reflect.New(ot))
+ }
+ rv = rv.Elem().Elem().Field(0)
+ if rv.IsNil() {
+ rv.Set(conv.GoValueOf(pref.ValueOfMessage(conv.New().Message())))
+ }
+ return conv.PBValueOf(rv)
+ },
+ newMessage: func() pref.Message {
+ return conv.New().Message()
+ },
+ newField: func() pref.Value {
+ return conv.New()
+ },
+ }
+}
+
+func fieldInfoForMap(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo {
+ ft := fs.Type
+ if ft.Kind() != reflect.Map {
+ panic(fmt.Sprintf("field %v has invalid type: got %v, want map kind", fd.FullName(), ft))
+ }
+ conv := NewConverter(ft, fd)
+
+ // TODO: Implement unsafe fast path?
+ fieldOffset := offsetOf(fs, x)
+ return fieldInfo{
+ fieldDesc: fd,
+ has: func(p pointer) bool {
+ if p.IsNil() {
+ return false
+ }
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ return rv.Len() > 0
+ },
+ clear: func(p pointer) {
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ rv.Set(reflect.Zero(rv.Type()))
+ },
+ get: func(p pointer) pref.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if rv.Len() == 0 {
+ return conv.Zero()
+ }
+ return conv.PBValueOf(rv)
+ },
+ set: func(p pointer, v pref.Value) {
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ pv := conv.GoValueOf(v)
+ if pv.IsNil() {
+ panic(fmt.Sprintf("map field %v cannot be set with read-only value", fd.FullName()))
+ }
+ rv.Set(pv)
+ },
+ mutable: func(p pointer) pref.Value {
+ v := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if v.IsNil() {
+ v.Set(reflect.MakeMap(fs.Type))
+ }
+ return conv.PBValueOf(v)
+ },
+ newField: func() pref.Value {
+ return conv.New()
+ },
+ }
+}
+
+func fieldInfoForList(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo {
+ ft := fs.Type
+ if ft.Kind() != reflect.Slice {
+ panic(fmt.Sprintf("field %v has invalid type: got %v, want slice kind", fd.FullName(), ft))
+ }
+ conv := NewConverter(reflect.PtrTo(ft), fd)
+
+ // TODO: Implement unsafe fast path?
+ fieldOffset := offsetOf(fs, x)
+ return fieldInfo{
+ fieldDesc: fd,
+ has: func(p pointer) bool {
+ if p.IsNil() {
+ return false
+ }
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ return rv.Len() > 0
+ },
+ clear: func(p pointer) {
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ rv.Set(reflect.Zero(rv.Type()))
+ },
+ get: func(p pointer) pref.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type)
+ if rv.Elem().Len() == 0 {
+ return conv.Zero()
+ }
+ return conv.PBValueOf(rv)
+ },
+ set: func(p pointer, v pref.Value) {
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ pv := conv.GoValueOf(v)
+ if pv.IsNil() {
+ panic(fmt.Sprintf("list field %v cannot be set with read-only value", fd.FullName()))
+ }
+ rv.Set(pv.Elem())
+ },
+ mutable: func(p pointer) pref.Value {
+ v := p.Apply(fieldOffset).AsValueOf(fs.Type)
+ return conv.PBValueOf(v)
+ },
+ newField: func() pref.Value {
+ return conv.New()
+ },
+ }
+}
+
+var (
+ nilBytes = reflect.ValueOf([]byte(nil))
+ emptyBytes = reflect.ValueOf([]byte{})
+)
+
+func fieldInfoForScalar(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo {
+ ft := fs.Type
+ nullable := fd.HasPresence()
+ isBytes := ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8
+ if nullable {
+ if ft.Kind() != reflect.Ptr && ft.Kind() != reflect.Slice {
+ panic(fmt.Sprintf("field %v has invalid type: got %v, want pointer", fd.FullName(), ft))
+ }
+ if ft.Kind() == reflect.Ptr {
+ ft = ft.Elem()
+ }
+ }
+ conv := NewConverter(ft, fd)
+
+ // TODO: Implement unsafe fast path?
+ fieldOffset := offsetOf(fs, x)
+ return fieldInfo{
+ fieldDesc: fd,
+ has: func(p pointer) bool {
+ if p.IsNil() {
+ return false
+ }
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if nullable {
+ return !rv.IsNil()
+ }
+ switch rv.Kind() {
+ case reflect.Bool:
+ return rv.Bool()
+ case reflect.Int32, reflect.Int64:
+ return rv.Int() != 0
+ case reflect.Uint32, reflect.Uint64:
+ return rv.Uint() != 0
+ case reflect.Float32, reflect.Float64:
+ return rv.Float() != 0 || math.Signbit(rv.Float())
+ case reflect.String, reflect.Slice:
+ return rv.Len() > 0
+ default:
+ panic(fmt.Sprintf("field %v has invalid type: %v", fd.FullName(), rv.Type())) // should never happen
+ }
+ },
+ clear: func(p pointer) {
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ rv.Set(reflect.Zero(rv.Type()))
+ },
+ get: func(p pointer) pref.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if nullable {
+ if rv.IsNil() {
+ return conv.Zero()
+ }
+ if rv.Kind() == reflect.Ptr {
+ rv = rv.Elem()
+ }
+ }
+ return conv.PBValueOf(rv)
+ },
+ set: func(p pointer, v pref.Value) {
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if nullable && rv.Kind() == reflect.Ptr {
+ if rv.IsNil() {
+ rv.Set(reflect.New(ft))
+ }
+ rv = rv.Elem()
+ }
+ rv.Set(conv.GoValueOf(v))
+ if isBytes && rv.Len() == 0 {
+ if nullable {
+ rv.Set(emptyBytes) // preserve presence
+ } else {
+ rv.Set(nilBytes) // do not preserve presence
+ }
+ }
+ },
+ newField: func() pref.Value {
+ return conv.New()
+ },
+ }
+}
+
+func fieldInfoForWeakMessage(fd pref.FieldDescriptor, weakOffset offset) fieldInfo {
+ if !flags.ProtoLegacy {
+ panic("no support for proto1 weak fields")
+ }
+
+ var once sync.Once
+ var messageType pref.MessageType
+ lazyInit := func() {
+ once.Do(func() {
+ messageName := fd.Message().FullName()
+ messageType, _ = preg.GlobalTypes.FindMessageByName(messageName)
+ if messageType == nil {
+ panic(fmt.Sprintf("weak message %v for field %v is not linked in", messageName, fd.FullName()))
+ }
+ })
+ }
+
+ num := fd.Number()
+ return fieldInfo{
+ fieldDesc: fd,
+ has: func(p pointer) bool {
+ if p.IsNil() {
+ return false
+ }
+ _, ok := p.Apply(weakOffset).WeakFields().get(num)
+ return ok
+ },
+ clear: func(p pointer) {
+ p.Apply(weakOffset).WeakFields().clear(num)
+ },
+ get: func(p pointer) pref.Value {
+ lazyInit()
+ if p.IsNil() {
+ return pref.ValueOfMessage(messageType.Zero())
+ }
+ m, ok := p.Apply(weakOffset).WeakFields().get(num)
+ if !ok {
+ return pref.ValueOfMessage(messageType.Zero())
+ }
+ return pref.ValueOfMessage(m.ProtoReflect())
+ },
+ set: func(p pointer, v pref.Value) {
+ lazyInit()
+ m := v.Message()
+ if m.Descriptor() != messageType.Descriptor() {
+ if got, want := m.Descriptor().FullName(), messageType.Descriptor().FullName(); got != want {
+ panic(fmt.Sprintf("field %v has mismatching message descriptor: got %v, want %v", fd.FullName(), got, want))
+ }
+ panic(fmt.Sprintf("field %v has mismatching message descriptor: %v", fd.FullName(), m.Descriptor().FullName()))
+ }
+ p.Apply(weakOffset).WeakFields().set(num, m.Interface())
+ },
+ mutable: func(p pointer) pref.Value {
+ lazyInit()
+ fs := p.Apply(weakOffset).WeakFields()
+ m, ok := fs.get(num)
+ if !ok {
+ m = messageType.New().Interface()
+ fs.set(num, m)
+ }
+ return pref.ValueOfMessage(m.ProtoReflect())
+ },
+ newMessage: func() pref.Message {
+ lazyInit()
+ return messageType.New()
+ },
+ newField: func() pref.Value {
+ lazyInit()
+ return pref.ValueOfMessage(messageType.New())
+ },
+ }
+}
+
+func fieldInfoForMessage(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo {
+ ft := fs.Type
+ conv := NewConverter(ft, fd)
+
+ // TODO: Implement unsafe fast path?
+ fieldOffset := offsetOf(fs, x)
+ return fieldInfo{
+ fieldDesc: fd,
+ has: func(p pointer) bool {
+ if p.IsNil() {
+ return false
+ }
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ return !rv.IsNil()
+ },
+ clear: func(p pointer) {
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ rv.Set(reflect.Zero(rv.Type()))
+ },
+ get: func(p pointer) pref.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ return conv.PBValueOf(rv)
+ },
+ set: func(p pointer, v pref.Value) {
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ rv.Set(conv.GoValueOf(v))
+ if rv.IsNil() {
+ panic(fmt.Sprintf("field %v has invalid nil pointer", fd.FullName()))
+ }
+ },
+ mutable: func(p pointer) pref.Value {
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if rv.IsNil() {
+ rv.Set(conv.GoValueOf(conv.New()))
+ }
+ return conv.PBValueOf(rv)
+ },
+ newMessage: func() pref.Message {
+ return conv.New().Message()
+ },
+ newField: func() pref.Value {
+ return conv.New()
+ },
+ }
+}
+
+type oneofInfo struct {
+ oneofDesc pref.OneofDescriptor
+ which func(pointer) pref.FieldNumber
+}
+
+func makeOneofInfo(od pref.OneofDescriptor, si structInfo, x exporter) *oneofInfo {
+ oi := &oneofInfo{oneofDesc: od}
+ if od.IsSynthetic() {
+ fs := si.fieldsByNumber[od.Fields().Get(0).Number()]
+ fieldOffset := offsetOf(fs, x)
+ oi.which = func(p pointer) pref.FieldNumber {
+ if p.IsNil() {
+ return 0
+ }
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if rv.IsNil() { // valid on either *T or []byte
+ return 0
+ }
+ return od.Fields().Get(0).Number()
+ }
+ } else {
+ fs := si.oneofsByName[od.Name()]
+ fieldOffset := offsetOf(fs, x)
+ oi.which = func(p pointer) pref.FieldNumber {
+ if p.IsNil() {
+ return 0
+ }
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if rv.IsNil() {
+ return 0
+ }
+ rv = rv.Elem()
+ if rv.IsNil() {
+ return 0
+ }
+ return si.oneofWrappersByType[rv.Type().Elem()]
+ }
+ }
+ return oi
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go
new file mode 100644
index 000000000..741d6e5b6
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go
@@ -0,0 +1,249 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-types. DO NOT EDIT.
+
+package impl
+
+import (
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/runtime/protoiface"
+)
+
+func (m *messageState) Descriptor() protoreflect.MessageDescriptor {
+ return m.messageInfo().Desc
+}
+func (m *messageState) Type() protoreflect.MessageType {
+ return m.messageInfo()
+}
+func (m *messageState) New() protoreflect.Message {
+ return m.messageInfo().New()
+}
+func (m *messageState) Interface() protoreflect.ProtoMessage {
+ return m.protoUnwrap().(protoreflect.ProtoMessage)
+}
+func (m *messageState) protoUnwrap() interface{} {
+ return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem())
+}
+func (m *messageState) ProtoMethods() *protoiface.Methods {
+ m.messageInfo().init()
+ return &m.messageInfo().methods
+}
+
+// ProtoMessageInfo is a pseudo-internal API for allowing the v1 code
+// to be able to retrieve a v2 MessageInfo struct.
+//
+// WARNING: This method is exempt from the compatibility promise and
+// may be removed in the future without warning.
+func (m *messageState) ProtoMessageInfo() *MessageInfo {
+ return m.messageInfo()
+}
+
+func (m *messageState) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) {
+ m.messageInfo().init()
+ for _, ri := range m.messageInfo().rangeInfos {
+ switch ri := ri.(type) {
+ case *fieldInfo:
+ if ri.has(m.pointer()) {
+ if !f(ri.fieldDesc, ri.get(m.pointer())) {
+ return
+ }
+ }
+ case *oneofInfo:
+ if n := ri.which(m.pointer()); n > 0 {
+ fi := m.messageInfo().fields[n]
+ if !f(fi.fieldDesc, fi.get(m.pointer())) {
+ return
+ }
+ }
+ }
+ }
+ m.messageInfo().extensionMap(m.pointer()).Range(f)
+}
+func (m *messageState) Has(fd protoreflect.FieldDescriptor) bool {
+ m.messageInfo().init()
+ if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ return fi.has(m.pointer())
+ } else {
+ return m.messageInfo().extensionMap(m.pointer()).Has(xt)
+ }
+}
+func (m *messageState) Clear(fd protoreflect.FieldDescriptor) {
+ m.messageInfo().init()
+ if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ fi.clear(m.pointer())
+ } else {
+ m.messageInfo().extensionMap(m.pointer()).Clear(xt)
+ }
+}
+func (m *messageState) Get(fd protoreflect.FieldDescriptor) protoreflect.Value {
+ m.messageInfo().init()
+ if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ return fi.get(m.pointer())
+ } else {
+ return m.messageInfo().extensionMap(m.pointer()).Get(xt)
+ }
+}
+func (m *messageState) Set(fd protoreflect.FieldDescriptor, v protoreflect.Value) {
+ m.messageInfo().init()
+ if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ fi.set(m.pointer(), v)
+ } else {
+ m.messageInfo().extensionMap(m.pointer()).Set(xt, v)
+ }
+}
+func (m *messageState) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value {
+ m.messageInfo().init()
+ if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ return fi.mutable(m.pointer())
+ } else {
+ return m.messageInfo().extensionMap(m.pointer()).Mutable(xt)
+ }
+}
+func (m *messageState) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value {
+ m.messageInfo().init()
+ if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ return fi.newField()
+ } else {
+ return xt.New()
+ }
+}
+func (m *messageState) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.FieldDescriptor {
+ m.messageInfo().init()
+ if oi := m.messageInfo().oneofs[od.Name()]; oi != nil && oi.oneofDesc == od {
+ return od.Fields().ByNumber(oi.which(m.pointer()))
+ }
+ panic("invalid oneof descriptor " + string(od.FullName()) + " for message " + string(m.Descriptor().FullName()))
+}
+func (m *messageState) GetUnknown() protoreflect.RawFields {
+ m.messageInfo().init()
+ return m.messageInfo().getUnknown(m.pointer())
+}
+func (m *messageState) SetUnknown(b protoreflect.RawFields) {
+ m.messageInfo().init()
+ m.messageInfo().setUnknown(m.pointer(), b)
+}
+func (m *messageState) IsValid() bool {
+ return !m.pointer().IsNil()
+}
+
+func (m *messageReflectWrapper) Descriptor() protoreflect.MessageDescriptor {
+ return m.messageInfo().Desc
+}
+func (m *messageReflectWrapper) Type() protoreflect.MessageType {
+ return m.messageInfo()
+}
+func (m *messageReflectWrapper) New() protoreflect.Message {
+ return m.messageInfo().New()
+}
+func (m *messageReflectWrapper) Interface() protoreflect.ProtoMessage {
+ if m, ok := m.protoUnwrap().(protoreflect.ProtoMessage); ok {
+ return m
+ }
+ return (*messageIfaceWrapper)(m)
+}
+func (m *messageReflectWrapper) protoUnwrap() interface{} {
+ return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem())
+}
+func (m *messageReflectWrapper) ProtoMethods() *protoiface.Methods {
+ m.messageInfo().init()
+ return &m.messageInfo().methods
+}
+
+// ProtoMessageInfo is a pseudo-internal API for allowing the v1 code
+// to be able to retrieve a v2 MessageInfo struct.
+//
+// WARNING: This method is exempt from the compatibility promise and
+// may be removed in the future without warning.
+func (m *messageReflectWrapper) ProtoMessageInfo() *MessageInfo {
+ return m.messageInfo()
+}
+
+func (m *messageReflectWrapper) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) {
+ m.messageInfo().init()
+ for _, ri := range m.messageInfo().rangeInfos {
+ switch ri := ri.(type) {
+ case *fieldInfo:
+ if ri.has(m.pointer()) {
+ if !f(ri.fieldDesc, ri.get(m.pointer())) {
+ return
+ }
+ }
+ case *oneofInfo:
+ if n := ri.which(m.pointer()); n > 0 {
+ fi := m.messageInfo().fields[n]
+ if !f(fi.fieldDesc, fi.get(m.pointer())) {
+ return
+ }
+ }
+ }
+ }
+ m.messageInfo().extensionMap(m.pointer()).Range(f)
+}
+func (m *messageReflectWrapper) Has(fd protoreflect.FieldDescriptor) bool {
+ m.messageInfo().init()
+ if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ return fi.has(m.pointer())
+ } else {
+ return m.messageInfo().extensionMap(m.pointer()).Has(xt)
+ }
+}
+func (m *messageReflectWrapper) Clear(fd protoreflect.FieldDescriptor) {
+ m.messageInfo().init()
+ if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ fi.clear(m.pointer())
+ } else {
+ m.messageInfo().extensionMap(m.pointer()).Clear(xt)
+ }
+}
+func (m *messageReflectWrapper) Get(fd protoreflect.FieldDescriptor) protoreflect.Value {
+ m.messageInfo().init()
+ if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ return fi.get(m.pointer())
+ } else {
+ return m.messageInfo().extensionMap(m.pointer()).Get(xt)
+ }
+}
+func (m *messageReflectWrapper) Set(fd protoreflect.FieldDescriptor, v protoreflect.Value) {
+ m.messageInfo().init()
+ if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ fi.set(m.pointer(), v)
+ } else {
+ m.messageInfo().extensionMap(m.pointer()).Set(xt, v)
+ }
+}
+func (m *messageReflectWrapper) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value {
+ m.messageInfo().init()
+ if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ return fi.mutable(m.pointer())
+ } else {
+ return m.messageInfo().extensionMap(m.pointer()).Mutable(xt)
+ }
+}
+func (m *messageReflectWrapper) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value {
+ m.messageInfo().init()
+ if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ return fi.newField()
+ } else {
+ return xt.New()
+ }
+}
+func (m *messageReflectWrapper) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.FieldDescriptor {
+ m.messageInfo().init()
+ if oi := m.messageInfo().oneofs[od.Name()]; oi != nil && oi.oneofDesc == od {
+ return od.Fields().ByNumber(oi.which(m.pointer()))
+ }
+ panic("invalid oneof descriptor " + string(od.FullName()) + " for message " + string(m.Descriptor().FullName()))
+}
+func (m *messageReflectWrapper) GetUnknown() protoreflect.RawFields {
+ m.messageInfo().init()
+ return m.messageInfo().getUnknown(m.pointer())
+}
+func (m *messageReflectWrapper) SetUnknown(b protoreflect.RawFields) {
+ m.messageInfo().init()
+ m.messageInfo().setUnknown(m.pointer(), b)
+}
+func (m *messageReflectWrapper) IsValid() bool {
+ return !m.pointer().IsNil()
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go
new file mode 100644
index 000000000..67b4ede67
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go
@@ -0,0 +1,177 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build purego appengine
+
+package impl
+
+import (
+ "fmt"
+ "reflect"
+ "sync"
+)
+
+const UnsafeEnabled = false
+
+// Pointer is an opaque pointer type.
+type Pointer interface{}
+
+// offset represents the offset to a struct field, accessible from a pointer.
+// The offset is the field index into a struct.
+type offset struct {
+ index int
+ export exporter
+}
+
+// offsetOf returns a field offset for the struct field.
+func offsetOf(f reflect.StructField, x exporter) offset {
+ if len(f.Index) != 1 {
+ panic("embedded structs are not supported")
+ }
+ if f.PkgPath == "" {
+ return offset{index: f.Index[0]} // field is already exported
+ }
+ if x == nil {
+ panic("exporter must be provided for unexported field")
+ }
+ return offset{index: f.Index[0], export: x}
+}
+
+// IsValid reports whether the offset is valid.
+func (f offset) IsValid() bool { return f.index >= 0 }
+
+// invalidOffset is an invalid field offset.
+var invalidOffset = offset{index: -1}
+
+// zeroOffset is a noop when calling pointer.Apply.
+var zeroOffset = offset{index: 0}
+
+// pointer is an abstract representation of a pointer to a struct or field.
+type pointer struct{ v reflect.Value }
+
+// pointerOf returns p as a pointer.
+func pointerOf(p Pointer) pointer {
+ return pointerOfIface(p)
+}
+
+// pointerOfValue returns v as a pointer.
+func pointerOfValue(v reflect.Value) pointer {
+ return pointer{v: v}
+}
+
+// pointerOfIface returns the pointer portion of an interface.
+func pointerOfIface(v interface{}) pointer {
+ return pointer{v: reflect.ValueOf(v)}
+}
+
+// IsNil reports whether the pointer is nil.
+func (p pointer) IsNil() bool {
+ return p.v.IsNil()
+}
+
+// Apply adds an offset to the pointer to derive a new pointer
+// to a specified field. The current pointer must be pointing at a struct.
+func (p pointer) Apply(f offset) pointer {
+ if f.export != nil {
+ if v := reflect.ValueOf(f.export(p.v.Interface(), f.index)); v.IsValid() {
+ return pointer{v: v}
+ }
+ }
+ return pointer{v: p.v.Elem().Field(f.index).Addr()}
+}
+
+// AsValueOf treats p as a pointer to an object of type t and returns the value.
+// It is equivalent to reflect.ValueOf(p.AsIfaceOf(t))
+func (p pointer) AsValueOf(t reflect.Type) reflect.Value {
+ if got := p.v.Type().Elem(); got != t {
+ panic(fmt.Sprintf("invalid type: got %v, want %v", got, t))
+ }
+ return p.v
+}
+
+// AsIfaceOf treats p as a pointer to an object of type t and returns the value.
+// It is equivalent to p.AsValueOf(t).Interface()
+func (p pointer) AsIfaceOf(t reflect.Type) interface{} {
+ return p.AsValueOf(t).Interface()
+}
+
+func (p pointer) Bool() *bool { return p.v.Interface().(*bool) }
+func (p pointer) BoolPtr() **bool { return p.v.Interface().(**bool) }
+func (p pointer) BoolSlice() *[]bool { return p.v.Interface().(*[]bool) }
+func (p pointer) Int32() *int32 { return p.v.Interface().(*int32) }
+func (p pointer) Int32Ptr() **int32 { return p.v.Interface().(**int32) }
+func (p pointer) Int32Slice() *[]int32 { return p.v.Interface().(*[]int32) }
+func (p pointer) Int64() *int64 { return p.v.Interface().(*int64) }
+func (p pointer) Int64Ptr() **int64 { return p.v.Interface().(**int64) }
+func (p pointer) Int64Slice() *[]int64 { return p.v.Interface().(*[]int64) }
+func (p pointer) Uint32() *uint32 { return p.v.Interface().(*uint32) }
+func (p pointer) Uint32Ptr() **uint32 { return p.v.Interface().(**uint32) }
+func (p pointer) Uint32Slice() *[]uint32 { return p.v.Interface().(*[]uint32) }
+func (p pointer) Uint64() *uint64 { return p.v.Interface().(*uint64) }
+func (p pointer) Uint64Ptr() **uint64 { return p.v.Interface().(**uint64) }
+func (p pointer) Uint64Slice() *[]uint64 { return p.v.Interface().(*[]uint64) }
+func (p pointer) Float32() *float32 { return p.v.Interface().(*float32) }
+func (p pointer) Float32Ptr() **float32 { return p.v.Interface().(**float32) }
+func (p pointer) Float32Slice() *[]float32 { return p.v.Interface().(*[]float32) }
+func (p pointer) Float64() *float64 { return p.v.Interface().(*float64) }
+func (p pointer) Float64Ptr() **float64 { return p.v.Interface().(**float64) }
+func (p pointer) Float64Slice() *[]float64 { return p.v.Interface().(*[]float64) }
+func (p pointer) String() *string { return p.v.Interface().(*string) }
+func (p pointer) StringPtr() **string { return p.v.Interface().(**string) }
+func (p pointer) StringSlice() *[]string { return p.v.Interface().(*[]string) }
+func (p pointer) Bytes() *[]byte { return p.v.Interface().(*[]byte) }
+func (p pointer) BytesSlice() *[][]byte { return p.v.Interface().(*[][]byte) }
+func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.v.Interface().(*WeakFields)) }
+func (p pointer) Extensions() *map[int32]ExtensionField {
+ return p.v.Interface().(*map[int32]ExtensionField)
+}
+
+func (p pointer) Elem() pointer {
+ return pointer{v: p.v.Elem()}
+}
+
+// PointerSlice copies []*T from p as a new []pointer.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) PointerSlice() []pointer {
+ // TODO: reconsider this
+ if p.v.IsNil() {
+ return nil
+ }
+ n := p.v.Elem().Len()
+ s := make([]pointer, n)
+ for i := 0; i < n; i++ {
+ s[i] = pointer{v: p.v.Elem().Index(i)}
+ }
+ return s
+}
+
+// AppendPointerSlice appends v to p, which must be a []*T.
+func (p pointer) AppendPointerSlice(v pointer) {
+ sp := p.v.Elem()
+ sp.Set(reflect.Append(sp, v.v))
+}
+
+// SetPointer sets *p to v.
+func (p pointer) SetPointer(v pointer) {
+ p.v.Elem().Set(v.v)
+}
+
+func (Export) MessageStateOf(p Pointer) *messageState { panic("not supported") }
+func (ms *messageState) pointer() pointer { panic("not supported") }
+func (ms *messageState) messageInfo() *MessageInfo { panic("not supported") }
+func (ms *messageState) LoadMessageInfo() *MessageInfo { panic("not supported") }
+func (ms *messageState) StoreMessageInfo(mi *MessageInfo) { panic("not supported") }
+
+type atomicNilMessage struct {
+ once sync.Once
+ m messageReflectWrapper
+}
+
+func (m *atomicNilMessage) Init(mi *MessageInfo) *messageReflectWrapper {
+ m.once.Do(func() {
+ m.m.p = pointerOfIface(reflect.Zero(mi.GoReflectType).Interface())
+ m.m.mi = mi
+ })
+ return &m.m
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
new file mode 100644
index 000000000..088aa85d4
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
@@ -0,0 +1,173 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !purego,!appengine
+
+package impl
+
+import (
+ "reflect"
+ "sync/atomic"
+ "unsafe"
+)
+
+const UnsafeEnabled = true
+
+// Pointer is an opaque pointer type.
+type Pointer unsafe.Pointer
+
+// offset represents the offset to a struct field, accessible from a pointer.
+// The offset is the byte offset to the field from the start of the struct.
+type offset uintptr
+
+// offsetOf returns a field offset for the struct field.
+func offsetOf(f reflect.StructField, x exporter) offset {
+ return offset(f.Offset)
+}
+
+// IsValid reports whether the offset is valid.
+func (f offset) IsValid() bool { return f != invalidOffset }
+
+// invalidOffset is an invalid field offset.
+var invalidOffset = ^offset(0)
+
+// zeroOffset is a noop when calling pointer.Apply.
+var zeroOffset = offset(0)
+
+// pointer is a pointer to a message struct or field.
+type pointer struct{ p unsafe.Pointer }
+
+// pointerOf returns p as a pointer.
+func pointerOf(p Pointer) pointer {
+ return pointer{p: unsafe.Pointer(p)}
+}
+
+// pointerOfValue returns v as a pointer.
+func pointerOfValue(v reflect.Value) pointer {
+ return pointer{p: unsafe.Pointer(v.Pointer())}
+}
+
+// pointerOfIface returns the pointer portion of an interface.
+func pointerOfIface(v interface{}) pointer {
+ type ifaceHeader struct {
+ Type unsafe.Pointer
+ Data unsafe.Pointer
+ }
+ return pointer{p: (*ifaceHeader)(unsafe.Pointer(&v)).Data}
+}
+
+// IsNil reports whether the pointer is nil.
+func (p pointer) IsNil() bool {
+ return p.p == nil
+}
+
+// Apply adds an offset to the pointer to derive a new pointer
+// to a specified field. The pointer must be valid and pointing at a struct.
+func (p pointer) Apply(f offset) pointer {
+ if p.IsNil() {
+ panic("invalid nil pointer")
+ }
+ return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))}
+}
+
+// AsValueOf treats p as a pointer to an object of type t and returns the value.
+// It is equivalent to reflect.ValueOf(p.AsIfaceOf(t))
+func (p pointer) AsValueOf(t reflect.Type) reflect.Value {
+ return reflect.NewAt(t, p.p)
+}
+
+// AsIfaceOf treats p as a pointer to an object of type t and returns the value.
+// It is equivalent to p.AsValueOf(t).Interface()
+func (p pointer) AsIfaceOf(t reflect.Type) interface{} {
+ // TODO: Use tricky unsafe magic to directly create ifaceHeader.
+ return p.AsValueOf(t).Interface()
+}
+
+func (p pointer) Bool() *bool { return (*bool)(p.p) }
+func (p pointer) BoolPtr() **bool { return (**bool)(p.p) }
+func (p pointer) BoolSlice() *[]bool { return (*[]bool)(p.p) }
+func (p pointer) Int32() *int32 { return (*int32)(p.p) }
+func (p pointer) Int32Ptr() **int32 { return (**int32)(p.p) }
+func (p pointer) Int32Slice() *[]int32 { return (*[]int32)(p.p) }
+func (p pointer) Int64() *int64 { return (*int64)(p.p) }
+func (p pointer) Int64Ptr() **int64 { return (**int64)(p.p) }
+func (p pointer) Int64Slice() *[]int64 { return (*[]int64)(p.p) }
+func (p pointer) Uint32() *uint32 { return (*uint32)(p.p) }
+func (p pointer) Uint32Ptr() **uint32 { return (**uint32)(p.p) }
+func (p pointer) Uint32Slice() *[]uint32 { return (*[]uint32)(p.p) }
+func (p pointer) Uint64() *uint64 { return (*uint64)(p.p) }
+func (p pointer) Uint64Ptr() **uint64 { return (**uint64)(p.p) }
+func (p pointer) Uint64Slice() *[]uint64 { return (*[]uint64)(p.p) }
+func (p pointer) Float32() *float32 { return (*float32)(p.p) }
+func (p pointer) Float32Ptr() **float32 { return (**float32)(p.p) }
+func (p pointer) Float32Slice() *[]float32 { return (*[]float32)(p.p) }
+func (p pointer) Float64() *float64 { return (*float64)(p.p) }
+func (p pointer) Float64Ptr() **float64 { return (**float64)(p.p) }
+func (p pointer) Float64Slice() *[]float64 { return (*[]float64)(p.p) }
+func (p pointer) String() *string { return (*string)(p.p) }
+func (p pointer) StringPtr() **string { return (**string)(p.p) }
+func (p pointer) StringSlice() *[]string { return (*[]string)(p.p) }
+func (p pointer) Bytes() *[]byte { return (*[]byte)(p.p) }
+func (p pointer) BytesSlice() *[][]byte { return (*[][]byte)(p.p) }
+func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.p) }
+func (p pointer) Extensions() *map[int32]ExtensionField { return (*map[int32]ExtensionField)(p.p) }
+
+func (p pointer) Elem() pointer {
+ return pointer{p: *(*unsafe.Pointer)(p.p)}
+}
+
+// PointerSlice loads []*T from p as a []pointer.
+// The value returned is aliased with the original slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) PointerSlice() []pointer {
+ // Super-tricky - p should point to a []*T where T is a
+ // message type. We load it as []pointer.
+ return *(*[]pointer)(p.p)
+}
+
+// AppendPointerSlice appends v to p, which must be a []*T.
+func (p pointer) AppendPointerSlice(v pointer) {
+ *(*[]pointer)(p.p) = append(*(*[]pointer)(p.p), v)
+}
+
+// SetPointer sets *p to v.
+func (p pointer) SetPointer(v pointer) {
+ *(*unsafe.Pointer)(p.p) = (unsafe.Pointer)(v.p)
+}
+
+// Static check that MessageState does not exceed the size of a pointer.
+const _ = uint(unsafe.Sizeof(unsafe.Pointer(nil)) - unsafe.Sizeof(MessageState{}))
+
+func (Export) MessageStateOf(p Pointer) *messageState {
+ // Super-tricky - see documentation on MessageState.
+ return (*messageState)(unsafe.Pointer(p))
+}
+func (ms *messageState) pointer() pointer {
+ // Super-tricky - see documentation on MessageState.
+ return pointer{p: unsafe.Pointer(ms)}
+}
+func (ms *messageState) messageInfo() *MessageInfo {
+ mi := ms.LoadMessageInfo()
+ if mi == nil {
+ panic("invalid nil message info; this suggests memory corruption due to a race or shallow copy on the message struct")
+ }
+ return mi
+}
+func (ms *messageState) LoadMessageInfo() *MessageInfo {
+ return (*MessageInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&ms.atomicMessageInfo))))
+}
+func (ms *messageState) StoreMessageInfo(mi *MessageInfo) {
+ atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&ms.atomicMessageInfo)), unsafe.Pointer(mi))
+}
+
+type atomicNilMessage struct{ p unsafe.Pointer } // p is a *messageReflectWrapper
+
+func (m *atomicNilMessage) Init(mi *MessageInfo) *messageReflectWrapper {
+ if p := atomic.LoadPointer(&m.p); p != nil {
+ return (*messageReflectWrapper)(p)
+ }
+ w := &messageReflectWrapper{mi: mi}
+ atomic.CompareAndSwapPointer(&m.p, nil, (unsafe.Pointer)(w))
+ return (*messageReflectWrapper)(atomic.LoadPointer(&m.p))
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/validate.go b/vendor/google.golang.org/protobuf/internal/impl/validate.go
new file mode 100644
index 000000000..57de9cc85
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/validate.go
@@ -0,0 +1,575 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "fmt"
+ "math"
+ "math/bits"
+ "reflect"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/encoding/messageset"
+ "google.golang.org/protobuf/internal/flags"
+ "google.golang.org/protobuf/internal/strs"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+ preg "google.golang.org/protobuf/reflect/protoregistry"
+ piface "google.golang.org/protobuf/runtime/protoiface"
+)
+
+// ValidationStatus is the result of validating the wire-format encoding of a message.
+type ValidationStatus int
+
+const (
+ // ValidationUnknown indicates that unmarshaling the message might succeed or fail.
+ // The validator was unable to render a judgement.
+ //
+ // The only causes of this status are an aberrant message type appearing somewhere
+ // in the message or a failure in the extension resolver.
+ ValidationUnknown ValidationStatus = iota + 1
+
+ // ValidationInvalid indicates that unmarshaling the message will fail.
+ ValidationInvalid
+
+ // ValidationValid indicates that unmarshaling the message will succeed.
+ ValidationValid
+)
+
+func (v ValidationStatus) String() string {
+ switch v {
+ case ValidationUnknown:
+ return "ValidationUnknown"
+ case ValidationInvalid:
+ return "ValidationInvalid"
+ case ValidationValid:
+ return "ValidationValid"
+ default:
+ return fmt.Sprintf("ValidationStatus(%d)", int(v))
+ }
+}
+
+// Validate determines whether the contents of the buffer are a valid wire encoding
+// of the message type.
+//
+// This function is exposed for testing.
+func Validate(mt pref.MessageType, in piface.UnmarshalInput) (out piface.UnmarshalOutput, _ ValidationStatus) {
+ mi, ok := mt.(*MessageInfo)
+ if !ok {
+ return out, ValidationUnknown
+ }
+ if in.Resolver == nil {
+ in.Resolver = preg.GlobalTypes
+ }
+ o, st := mi.validate(in.Buf, 0, unmarshalOptions{
+ flags: in.Flags,
+ resolver: in.Resolver,
+ })
+ if o.initialized {
+ out.Flags |= piface.UnmarshalInitialized
+ }
+ return out, st
+}
+
+type validationInfo struct {
+ mi *MessageInfo
+ typ validationType
+ keyType, valType validationType
+
+ // For non-required fields, requiredBit is 0.
+ //
+ // For required fields, requiredBit's nth bit is set, where n is a
+ // unique index in the range [0, MessageInfo.numRequiredFields).
+ //
+ // If there are more than 64 required fields, requiredBit is 0.
+ requiredBit uint64
+}
+
+type validationType uint8
+
+const (
+ validationTypeOther validationType = iota
+ validationTypeMessage
+ validationTypeGroup
+ validationTypeMap
+ validationTypeRepeatedVarint
+ validationTypeRepeatedFixed32
+ validationTypeRepeatedFixed64
+ validationTypeVarint
+ validationTypeFixed32
+ validationTypeFixed64
+ validationTypeBytes
+ validationTypeUTF8String
+ validationTypeMessageSetItem
+)
+
+func newFieldValidationInfo(mi *MessageInfo, si structInfo, fd pref.FieldDescriptor, ft reflect.Type) validationInfo {
+ var vi validationInfo
+ switch {
+ case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
+ switch fd.Kind() {
+ case pref.MessageKind:
+ vi.typ = validationTypeMessage
+ if ot, ok := si.oneofWrappersByNumber[fd.Number()]; ok {
+ vi.mi = getMessageInfo(ot.Field(0).Type)
+ }
+ case pref.GroupKind:
+ vi.typ = validationTypeGroup
+ if ot, ok := si.oneofWrappersByNumber[fd.Number()]; ok {
+ vi.mi = getMessageInfo(ot.Field(0).Type)
+ }
+ case pref.StringKind:
+ if strs.EnforceUTF8(fd) {
+ vi.typ = validationTypeUTF8String
+ }
+ }
+ default:
+ vi = newValidationInfo(fd, ft)
+ }
+ if fd.Cardinality() == pref.Required {
+ // Avoid overflow. The required field check is done with a 64-bit mask, with
+ // any message containing more than 64 required fields always reported as
+ // potentially uninitialized, so it is not important to get a precise count
+ // of the required fields past 64.
+ if mi.numRequiredFields < math.MaxUint8 {
+ mi.numRequiredFields++
+ vi.requiredBit = 1 << (mi.numRequiredFields - 1)
+ }
+ }
+ return vi
+}
+
+func newValidationInfo(fd pref.FieldDescriptor, ft reflect.Type) validationInfo {
+ var vi validationInfo
+ switch {
+ case fd.IsList():
+ switch fd.Kind() {
+ case pref.MessageKind:
+ vi.typ = validationTypeMessage
+ if ft.Kind() == reflect.Slice {
+ vi.mi = getMessageInfo(ft.Elem())
+ }
+ case pref.GroupKind:
+ vi.typ = validationTypeGroup
+ if ft.Kind() == reflect.Slice {
+ vi.mi = getMessageInfo(ft.Elem())
+ }
+ case pref.StringKind:
+ vi.typ = validationTypeBytes
+ if strs.EnforceUTF8(fd) {
+ vi.typ = validationTypeUTF8String
+ }
+ default:
+ switch wireTypes[fd.Kind()] {
+ case protowire.VarintType:
+ vi.typ = validationTypeRepeatedVarint
+ case protowire.Fixed32Type:
+ vi.typ = validationTypeRepeatedFixed32
+ case protowire.Fixed64Type:
+ vi.typ = validationTypeRepeatedFixed64
+ }
+ }
+ case fd.IsMap():
+ vi.typ = validationTypeMap
+ switch fd.MapKey().Kind() {
+ case pref.StringKind:
+ if strs.EnforceUTF8(fd) {
+ vi.keyType = validationTypeUTF8String
+ }
+ }
+ switch fd.MapValue().Kind() {
+ case pref.MessageKind:
+ vi.valType = validationTypeMessage
+ if ft.Kind() == reflect.Map {
+ vi.mi = getMessageInfo(ft.Elem())
+ }
+ case pref.StringKind:
+ if strs.EnforceUTF8(fd) {
+ vi.valType = validationTypeUTF8String
+ }
+ }
+ default:
+ switch fd.Kind() {
+ case pref.MessageKind:
+ vi.typ = validationTypeMessage
+ if !fd.IsWeak() {
+ vi.mi = getMessageInfo(ft)
+ }
+ case pref.GroupKind:
+ vi.typ = validationTypeGroup
+ vi.mi = getMessageInfo(ft)
+ case pref.StringKind:
+ vi.typ = validationTypeBytes
+ if strs.EnforceUTF8(fd) {
+ vi.typ = validationTypeUTF8String
+ }
+ default:
+ switch wireTypes[fd.Kind()] {
+ case protowire.VarintType:
+ vi.typ = validationTypeVarint
+ case protowire.Fixed32Type:
+ vi.typ = validationTypeFixed32
+ case protowire.Fixed64Type:
+ vi.typ = validationTypeFixed64
+ case protowire.BytesType:
+ vi.typ = validationTypeBytes
+ }
+ }
+ }
+ return vi
+}
+
+func (mi *MessageInfo) validate(b []byte, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, result ValidationStatus) {
+ mi.init()
+ type validationState struct {
+ typ validationType
+ keyType, valType validationType
+ endGroup protowire.Number
+ mi *MessageInfo
+ tail []byte
+ requiredMask uint64
+ }
+
+ // Pre-allocate some slots to avoid repeated slice reallocation.
+ states := make([]validationState, 0, 16)
+ states = append(states, validationState{
+ typ: validationTypeMessage,
+ mi: mi,
+ })
+ if groupTag > 0 {
+ states[0].typ = validationTypeGroup
+ states[0].endGroup = groupTag
+ }
+ initialized := true
+ start := len(b)
+State:
+ for len(states) > 0 {
+ st := &states[len(states)-1]
+ for len(b) > 0 {
+ // Parse the tag (field number and wire type).
+ var tag uint64
+ if b[0] < 0x80 {
+ tag = uint64(b[0])
+ b = b[1:]
+ } else if len(b) >= 2 && b[1] < 128 {
+ tag = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ b = b[2:]
+ } else {
+ var n int
+ tag, n = protowire.ConsumeVarint(b)
+ if n < 0 {
+ return out, ValidationInvalid
+ }
+ b = b[n:]
+ }
+ var num protowire.Number
+ if n := tag >> 3; n < uint64(protowire.MinValidNumber) || n > uint64(protowire.MaxValidNumber) {
+ return out, ValidationInvalid
+ } else {
+ num = protowire.Number(n)
+ }
+ wtyp := protowire.Type(tag & 7)
+
+ if wtyp == protowire.EndGroupType {
+ if st.endGroup == num {
+ goto PopState
+ }
+ return out, ValidationInvalid
+ }
+ var vi validationInfo
+ switch {
+ case st.typ == validationTypeMap:
+ switch num {
+ case 1:
+ vi.typ = st.keyType
+ case 2:
+ vi.typ = st.valType
+ vi.mi = st.mi
+ vi.requiredBit = 1
+ }
+ case flags.ProtoLegacy && st.mi.isMessageSet:
+ switch num {
+ case messageset.FieldItem:
+ vi.typ = validationTypeMessageSetItem
+ }
+ default:
+ var f *coderFieldInfo
+ if int(num) < len(st.mi.denseCoderFields) {
+ f = st.mi.denseCoderFields[num]
+ } else {
+ f = st.mi.coderFields[num]
+ }
+ if f != nil {
+ vi = f.validation
+ if vi.typ == validationTypeMessage && vi.mi == nil {
+ // Probable weak field.
+ //
+ // TODO: Consider storing the results of this lookup somewhere
+ // rather than recomputing it on every validation.
+ fd := st.mi.Desc.Fields().ByNumber(num)
+ if fd == nil || !fd.IsWeak() {
+ break
+ }
+ messageName := fd.Message().FullName()
+ messageType, err := preg.GlobalTypes.FindMessageByName(messageName)
+ switch err {
+ case nil:
+ vi.mi, _ = messageType.(*MessageInfo)
+ case preg.NotFound:
+ vi.typ = validationTypeBytes
+ default:
+ return out, ValidationUnknown
+ }
+ }
+ break
+ }
+ // Possible extension field.
+ //
+ // TODO: We should return ValidationUnknown when:
+ // 1. The resolver is not frozen. (More extensions may be added to it.)
+ // 2. The resolver returns preg.NotFound.
+ // In this case, a type added to the resolver in the future could cause
+ // unmarshaling to begin failing. Supporting this requires some way to
+ // determine if the resolver is frozen.
+ xt, err := opts.resolver.FindExtensionByNumber(st.mi.Desc.FullName(), num)
+ if err != nil && err != preg.NotFound {
+ return out, ValidationUnknown
+ }
+ if err == nil {
+ vi = getExtensionFieldInfo(xt).validation
+ }
+ }
+ if vi.requiredBit != 0 {
+ // Check that the field has a compatible wire type.
+ // We only need to consider non-repeated field types,
+ // since repeated fields (and maps) can never be required.
+ ok := false
+ switch vi.typ {
+ case validationTypeVarint:
+ ok = wtyp == protowire.VarintType
+ case validationTypeFixed32:
+ ok = wtyp == protowire.Fixed32Type
+ case validationTypeFixed64:
+ ok = wtyp == protowire.Fixed64Type
+ case validationTypeBytes, validationTypeUTF8String, validationTypeMessage:
+ ok = wtyp == protowire.BytesType
+ case validationTypeGroup:
+ ok = wtyp == protowire.StartGroupType
+ }
+ if ok {
+ st.requiredMask |= vi.requiredBit
+ }
+ }
+
+ switch wtyp {
+ case protowire.VarintType:
+ if len(b) >= 10 {
+ switch {
+ case b[0] < 0x80:
+ b = b[1:]
+ case b[1] < 0x80:
+ b = b[2:]
+ case b[2] < 0x80:
+ b = b[3:]
+ case b[3] < 0x80:
+ b = b[4:]
+ case b[4] < 0x80:
+ b = b[5:]
+ case b[5] < 0x80:
+ b = b[6:]
+ case b[6] < 0x80:
+ b = b[7:]
+ case b[7] < 0x80:
+ b = b[8:]
+ case b[8] < 0x80:
+ b = b[9:]
+ case b[9] < 0x80 && b[9] < 2:
+ b = b[10:]
+ default:
+ return out, ValidationInvalid
+ }
+ } else {
+ switch {
+ case len(b) > 0 && b[0] < 0x80:
+ b = b[1:]
+ case len(b) > 1 && b[1] < 0x80:
+ b = b[2:]
+ case len(b) > 2 && b[2] < 0x80:
+ b = b[3:]
+ case len(b) > 3 && b[3] < 0x80:
+ b = b[4:]
+ case len(b) > 4 && b[4] < 0x80:
+ b = b[5:]
+ case len(b) > 5 && b[5] < 0x80:
+ b = b[6:]
+ case len(b) > 6 && b[6] < 0x80:
+ b = b[7:]
+ case len(b) > 7 && b[7] < 0x80:
+ b = b[8:]
+ case len(b) > 8 && b[8] < 0x80:
+ b = b[9:]
+ case len(b) > 9 && b[9] < 2:
+ b = b[10:]
+ default:
+ return out, ValidationInvalid
+ }
+ }
+ continue State
+ case protowire.BytesType:
+ var size uint64
+ if len(b) >= 1 && b[0] < 0x80 {
+ size = uint64(b[0])
+ b = b[1:]
+ } else if len(b) >= 2 && b[1] < 128 {
+ size = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ b = b[2:]
+ } else {
+ var n int
+ size, n = protowire.ConsumeVarint(b)
+ if n < 0 {
+ return out, ValidationInvalid
+ }
+ b = b[n:]
+ }
+ if size > uint64(len(b)) {
+ return out, ValidationInvalid
+ }
+ v := b[:size]
+ b = b[size:]
+ switch vi.typ {
+ case validationTypeMessage:
+ if vi.mi == nil {
+ return out, ValidationUnknown
+ }
+ vi.mi.init()
+ fallthrough
+ case validationTypeMap:
+ if vi.mi != nil {
+ vi.mi.init()
+ }
+ states = append(states, validationState{
+ typ: vi.typ,
+ keyType: vi.keyType,
+ valType: vi.valType,
+ mi: vi.mi,
+ tail: b,
+ })
+ b = v
+ continue State
+ case validationTypeRepeatedVarint:
+ // Packed field.
+ for len(v) > 0 {
+ _, n := protowire.ConsumeVarint(v)
+ if n < 0 {
+ return out, ValidationInvalid
+ }
+ v = v[n:]
+ }
+ case validationTypeRepeatedFixed32:
+ // Packed field.
+ if len(v)%4 != 0 {
+ return out, ValidationInvalid
+ }
+ case validationTypeRepeatedFixed64:
+ // Packed field.
+ if len(v)%8 != 0 {
+ return out, ValidationInvalid
+ }
+ case validationTypeUTF8String:
+ if !utf8.Valid(v) {
+ return out, ValidationInvalid
+ }
+ }
+ case protowire.Fixed32Type:
+ if len(b) < 4 {
+ return out, ValidationInvalid
+ }
+ b = b[4:]
+ case protowire.Fixed64Type:
+ if len(b) < 8 {
+ return out, ValidationInvalid
+ }
+ b = b[8:]
+ case protowire.StartGroupType:
+ switch {
+ case vi.typ == validationTypeGroup:
+ if vi.mi == nil {
+ return out, ValidationUnknown
+ }
+ vi.mi.init()
+ states = append(states, validationState{
+ typ: validationTypeGroup,
+ mi: vi.mi,
+ endGroup: num,
+ })
+ continue State
+ case flags.ProtoLegacy && vi.typ == validationTypeMessageSetItem:
+ typeid, v, n, err := messageset.ConsumeFieldValue(b, false)
+ if err != nil {
+ return out, ValidationInvalid
+ }
+ xt, err := opts.resolver.FindExtensionByNumber(st.mi.Desc.FullName(), typeid)
+ switch {
+ case err == preg.NotFound:
+ b = b[n:]
+ case err != nil:
+ return out, ValidationUnknown
+ default:
+ xvi := getExtensionFieldInfo(xt).validation
+ if xvi.mi != nil {
+ xvi.mi.init()
+ }
+ states = append(states, validationState{
+ typ: xvi.typ,
+ mi: xvi.mi,
+ tail: b[n:],
+ })
+ b = v
+ continue State
+ }
+ default:
+ n := protowire.ConsumeFieldValue(num, wtyp, b)
+ if n < 0 {
+ return out, ValidationInvalid
+ }
+ b = b[n:]
+ }
+ default:
+ return out, ValidationInvalid
+ }
+ }
+ if st.endGroup != 0 {
+ return out, ValidationInvalid
+ }
+ if len(b) != 0 {
+ return out, ValidationInvalid
+ }
+ b = st.tail
+ PopState:
+ numRequiredFields := 0
+ switch st.typ {
+ case validationTypeMessage, validationTypeGroup:
+ numRequiredFields = int(st.mi.numRequiredFields)
+ case validationTypeMap:
+ // If this is a map field with a message value that contains
+ // required fields, require that the value be present.
+ if st.mi != nil && st.mi.numRequiredFields > 0 {
+ numRequiredFields = 1
+ }
+ }
+ // If there are more than 64 required fields, this check will
+ // always fail and we will report that the message is potentially
+ // uninitialized.
+ if numRequiredFields > 0 && bits.OnesCount64(st.requiredMask) != numRequiredFields {
+ initialized = false
+ }
+ states = states[:len(states)-1]
+ }
+ out.n = start - len(b)
+ if initialized {
+ out.initialized = true
+ }
+ return out, ValidationValid
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/weak.go b/vendor/google.golang.org/protobuf/internal/impl/weak.go
new file mode 100644
index 000000000..009cbefd1
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/weak.go
@@ -0,0 +1,74 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "fmt"
+
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+)
+
+// weakFields adds methods to the exported WeakFields type for internal use.
+//
+// The exported type is an alias to an unnamed type, so methods can't be
+// defined directly on it.
+type weakFields WeakFields
+
+func (w weakFields) get(num pref.FieldNumber) (pref.ProtoMessage, bool) {
+ m, ok := w[int32(num)]
+ return m, ok
+}
+
+func (w *weakFields) set(num pref.FieldNumber, m pref.ProtoMessage) {
+ if *w == nil {
+ *w = make(weakFields)
+ }
+ (*w)[int32(num)] = m
+}
+
+func (w *weakFields) clear(num pref.FieldNumber) {
+ delete(*w, int32(num))
+}
+
+func (Export) HasWeak(w WeakFields, num pref.FieldNumber) bool {
+ _, ok := w[int32(num)]
+ return ok
+}
+
+func (Export) ClearWeak(w *WeakFields, num pref.FieldNumber) {
+ delete(*w, int32(num))
+}
+
+func (Export) GetWeak(w WeakFields, num pref.FieldNumber, name pref.FullName) pref.ProtoMessage {
+ if m, ok := w[int32(num)]; ok {
+ return m
+ }
+ mt, _ := protoregistry.GlobalTypes.FindMessageByName(name)
+ if mt == nil {
+ panic(fmt.Sprintf("message %v for weak field is not linked in", name))
+ }
+ return mt.Zero().Interface()
+}
+
+func (Export) SetWeak(w *WeakFields, num pref.FieldNumber, name pref.FullName, m pref.ProtoMessage) {
+ if m != nil {
+ mt, _ := protoregistry.GlobalTypes.FindMessageByName(name)
+ if mt == nil {
+ panic(fmt.Sprintf("message %v for weak field is not linked in", name))
+ }
+ if mt != m.ProtoReflect().Type() {
+ panic(fmt.Sprintf("invalid message type for weak field: got %T, want %T", m, mt.Zero().Interface()))
+ }
+ }
+ if m == nil || !m.ProtoReflect().IsValid() {
+ delete(*w, int32(num))
+ return
+ }
+ if *w == nil {
+ *w = make(weakFields)
+ }
+ (*w)[int32(num)] = m
+}
diff --git a/vendor/google.golang.org/protobuf/internal/mapsort/mapsort.go b/vendor/google.golang.org/protobuf/internal/mapsort/mapsort.go
new file mode 100644
index 000000000..a3de1cf32
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/mapsort/mapsort.go
@@ -0,0 +1,43 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package mapsort provides sorted access to maps.
+package mapsort
+
+import (
+ "sort"
+
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// Range iterates over every map entry in sorted key order,
+// calling f for each key and value encountered.
+func Range(mapv protoreflect.Map, keyKind protoreflect.Kind, f func(protoreflect.MapKey, protoreflect.Value) bool) {
+ var keys []protoreflect.MapKey
+ mapv.Range(func(key protoreflect.MapKey, _ protoreflect.Value) bool {
+ keys = append(keys, key)
+ return true
+ })
+ sort.Slice(keys, func(i, j int) bool {
+ switch keyKind {
+ case protoreflect.BoolKind:
+ return !keys[i].Bool() && keys[j].Bool()
+ case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind,
+ protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
+ return keys[i].Int() < keys[j].Int()
+ case protoreflect.Uint32Kind, protoreflect.Fixed32Kind,
+ protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
+ return keys[i].Uint() < keys[j].Uint()
+ case protoreflect.StringKind:
+ return keys[i].String() < keys[j].String()
+ default:
+ panic("invalid kind: " + keyKind.String())
+ }
+ })
+ for _, key := range keys {
+ if !f(key, mapv.Get(key)) {
+ break
+ }
+ }
+}
diff --git a/vendor/google.golang.org/protobuf/internal/pragma/pragma.go b/vendor/google.golang.org/protobuf/internal/pragma/pragma.go
new file mode 100644
index 000000000..49dc4fcd9
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/pragma/pragma.go
@@ -0,0 +1,29 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package pragma provides types that can be embedded into a struct to
+// statically enforce or prevent certain language properties.
+package pragma
+
+import "sync"
+
+// NoUnkeyedLiterals can be embedded in a struct to prevent unkeyed literals.
+type NoUnkeyedLiterals struct{}
+
+// DoNotImplement can be embedded in an interface to prevent trivial
+// implementations of the interface.
+//
+// This is useful to prevent unauthorized implementations of an interface
+// so that it can be extended in the future for any protobuf language changes.
+type DoNotImplement interface{ ProtoInternal(DoNotImplement) }
+
+// DoNotCompare can be embedded in a struct to prevent comparability.
+type DoNotCompare [0]func()
+
+// DoNotCopy can be embedded in a struct to help prevent shallow copies.
+// This does not rely on a Go language feature, but rather a special case
+// within the vet checker.
+//
+// See https://golang.org/issues/8005.
+type DoNotCopy [0]sync.Mutex
diff --git a/vendor/google.golang.org/protobuf/internal/set/ints.go b/vendor/google.golang.org/protobuf/internal/set/ints.go
new file mode 100644
index 000000000..d3d7f89ab
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/set/ints.go
@@ -0,0 +1,58 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package set provides simple set data structures for uint64s.
+package set
+
+import "math/bits"
+
+// int64s represents a set of integers within the range of 0..63.
+type int64s uint64
+
+func (bs *int64s) Len() int {
+ return bits.OnesCount64(uint64(*bs))
+}
+func (bs *int64s) Has(n uint64) bool {
+ return uint64(*bs)&(uint64(1)<<n) > 0
+}
+func (bs *int64s) Set(n uint64) {
+ *(*uint64)(bs) |= uint64(1) << n
+}
+func (bs *int64s) Clear(n uint64) {
+ *(*uint64)(bs) &^= uint64(1) << n
+}
+
+// Ints represents a set of integers within the range of 0..math.MaxUint64.
+type Ints struct {
+ lo int64s
+ hi map[uint64]struct{}
+}
+
+func (bs *Ints) Len() int {
+ return bs.lo.Len() + len(bs.hi)
+}
+func (bs *Ints) Has(n uint64) bool {
+ if n < 64 {
+ return bs.lo.Has(n)
+ }
+ _, ok := bs.hi[n]
+ return ok
+}
+func (bs *Ints) Set(n uint64) {
+ if n < 64 {
+ bs.lo.Set(n)
+ return
+ }
+ if bs.hi == nil {
+ bs.hi = make(map[uint64]struct{})
+ }
+ bs.hi[n] = struct{}{}
+}
+func (bs *Ints) Clear(n uint64) {
+ if n < 64 {
+ bs.lo.Clear(n)
+ return
+ }
+ delete(bs.hi, n)
+}
diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings.go b/vendor/google.golang.org/protobuf/internal/strs/strings.go
new file mode 100644
index 000000000..0b74e7658
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/strs/strings.go
@@ -0,0 +1,196 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package strs provides string manipulation functionality specific to protobuf.
+package strs
+
+import (
+ "go/token"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/internal/flags"
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// EnforceUTF8 reports whether to enforce strict UTF-8 validation.
+func EnforceUTF8(fd protoreflect.FieldDescriptor) bool {
+ if flags.ProtoLegacy {
+ if fd, ok := fd.(interface{ EnforceUTF8() bool }); ok {
+ return fd.EnforceUTF8()
+ }
+ }
+ return fd.Syntax() == protoreflect.Proto3
+}
+
+// GoCamelCase camel-cases a protobuf name for use as a Go identifier.
+//
+// If there is an interior underscore followed by a lower case letter,
+// drop the underscore and convert the letter to upper case.
+func GoCamelCase(s string) string {
+ // Invariant: if the next letter is lower case, it must be converted
+ // to upper case.
+ // That is, we process a word at a time, where words are marked by _ or
+ // upper case letter. Digits are treated as words.
+ var b []byte
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ switch {
+ case c == '.' && i+1 < len(s) && isASCIILower(s[i+1]):
+ // Skip over '.' in ".{{lowercase}}".
+ case c == '.':
+ b = append(b, '_') // convert '.' to '_'
+ case c == '_' && (i == 0 || s[i-1] == '.'):
+ // Convert initial '_' to ensure we start with a capital letter.
+ // Do the same for '_' after '.' to match historic behavior.
+ b = append(b, 'X') // convert '_' to 'X'
+ case c == '_' && i+1 < len(s) && isASCIILower(s[i+1]):
+ // Skip over '_' in "_{{lowercase}}".
+ case isASCIIDigit(c):
+ b = append(b, c)
+ default:
+ // Assume we have a letter now - if not, it's a bogus identifier.
+ // The next word is a sequence of characters that must start upper case.
+ if isASCIILower(c) {
+ c -= 'a' - 'A' // convert lowercase to uppercase
+ }
+ b = append(b, c)
+
+ // Accept lower case sequence that follows.
+ for ; i+1 < len(s) && isASCIILower(s[i+1]); i++ {
+ b = append(b, s[i+1])
+ }
+ }
+ }
+ return string(b)
+}
+
+// GoSanitized converts a string to a valid Go identifier.
+func GoSanitized(s string) string {
+ // Sanitize the input to the set of valid characters,
+ // which must be '_' or be in the Unicode L or N categories.
+ s = strings.Map(func(r rune) rune {
+ if unicode.IsLetter(r) || unicode.IsDigit(r) {
+ return r
+ }
+ return '_'
+ }, s)
+
+ // Prepend '_' in the event of a Go keyword conflict or if
+ // the identifier is invalid (does not start in the Unicode L category).
+ r, _ := utf8.DecodeRuneInString(s)
+ if token.Lookup(s).IsKeyword() || !unicode.IsLetter(r) {
+ return "_" + s
+ }
+ return s
+}
+
+// JSONCamelCase converts a snake_case identifier to a camelCase identifier,
+// according to the protobuf JSON specification.
+func JSONCamelCase(s string) string {
+ var b []byte
+ var wasUnderscore bool
+ for i := 0; i < len(s); i++ { // proto identifiers are always ASCII
+ c := s[i]
+ if c != '_' {
+ if wasUnderscore && isASCIILower(c) {
+ c -= 'a' - 'A' // convert to uppercase
+ }
+ b = append(b, c)
+ }
+ wasUnderscore = c == '_'
+ }
+ return string(b)
+}
+
+// JSONSnakeCase converts a camelCase identifier to a snake_case identifier,
+// according to the protobuf JSON specification.
+func JSONSnakeCase(s string) string {
+ var b []byte
+ for i := 0; i < len(s); i++ { // proto identifiers are always ASCII
+ c := s[i]
+ if isASCIIUpper(c) {
+ b = append(b, '_')
+ c += 'a' - 'A' // convert to lowercase
+ }
+ b = append(b, c)
+ }
+ return string(b)
+}
+
+// MapEntryName derives the name of the map entry message given the field name.
+// See protoc v3.8.0: src/google/protobuf/descriptor.cc:254-276,6057
+func MapEntryName(s string) string {
+ var b []byte
+ upperNext := true
+ for _, c := range s {
+ switch {
+ case c == '_':
+ upperNext = true
+ case upperNext:
+ b = append(b, byte(unicode.ToUpper(c)))
+ upperNext = false
+ default:
+ b = append(b, byte(c))
+ }
+ }
+ b = append(b, "Entry"...)
+ return string(b)
+}
+
+// EnumValueName derives the camel-cased enum value name.
+// See protoc v3.8.0: src/google/protobuf/descriptor.cc:297-313
+func EnumValueName(s string) string {
+ var b []byte
+ upperNext := true
+ for _, c := range s {
+ switch {
+ case c == '_':
+ upperNext = true
+ case upperNext:
+ b = append(b, byte(unicode.ToUpper(c)))
+ upperNext = false
+ default:
+ b = append(b, byte(unicode.ToLower(c)))
+ upperNext = false
+ }
+ }
+ return string(b)
+}
+
+// TrimEnumPrefix trims the enum name prefix from an enum value name,
+// where the prefix is all lowercase without underscores.
+// See protoc v3.8.0: src/google/protobuf/descriptor.cc:330-375
+func TrimEnumPrefix(s, prefix string) string {
+ s0 := s // original input
+ for len(s) > 0 && len(prefix) > 0 {
+ if s[0] == '_' {
+ s = s[1:]
+ continue
+ }
+ if unicode.ToLower(rune(s[0])) != rune(prefix[0]) {
+ return s0 // no prefix match
+ }
+ s, prefix = s[1:], prefix[1:]
+ }
+ if len(prefix) > 0 {
+ return s0 // no prefix match
+ }
+ s = strings.TrimLeft(s, "_")
+ if len(s) == 0 {
+ return s0 // avoid returning empty string
+ }
+ return s
+}
+
+func isASCIILower(c byte) bool {
+ return 'a' <= c && c <= 'z'
+}
+func isASCIIUpper(c byte) bool {
+ return 'A' <= c && c <= 'Z'
+}
+func isASCIIDigit(c byte) bool {
+ return '0' <= c && c <= '9'
+}
diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go b/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go
new file mode 100644
index 000000000..85e074c97
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go
@@ -0,0 +1,27 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build purego appengine
+
+package strs
+
+import pref "google.golang.org/protobuf/reflect/protoreflect"
+
+func UnsafeString(b []byte) string {
+ return string(b)
+}
+
+func UnsafeBytes(s string) []byte {
+ return []byte(s)
+}
+
+type Builder struct{}
+
+func (*Builder) AppendFullName(prefix pref.FullName, name pref.Name) pref.FullName {
+ return prefix.Append(name)
+}
+
+func (*Builder) MakeString(b []byte) string {
+ return string(b)
+}
diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go
new file mode 100644
index 000000000..2160c7019
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go
@@ -0,0 +1,94 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !purego,!appengine
+
+package strs
+
+import (
+ "unsafe"
+
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+type (
+ stringHeader struct {
+ Data unsafe.Pointer
+ Len int
+ }
+ sliceHeader struct {
+ Data unsafe.Pointer
+ Len int
+ Cap int
+ }
+)
+
+// UnsafeString returns an unsafe string reference of b.
+// The caller must treat the input slice as immutable.
+//
+// WARNING: Use carefully. The returned result must not leak to the end user
+// unless the input slice is provably immutable.
+func UnsafeString(b []byte) (s string) {
+ src := (*sliceHeader)(unsafe.Pointer(&b))
+ dst := (*stringHeader)(unsafe.Pointer(&s))
+ dst.Data = src.Data
+ dst.Len = src.Len
+ return s
+}
+
+// UnsafeBytes returns an unsafe bytes slice reference of s.
+// The caller must treat returned slice as immutable.
+//
+// WARNING: Use carefully. The returned result must not leak to the end user.
+func UnsafeBytes(s string) (b []byte) {
+ src := (*stringHeader)(unsafe.Pointer(&s))
+ dst := (*sliceHeader)(unsafe.Pointer(&b))
+ dst.Data = src.Data
+ dst.Len = src.Len
+ dst.Cap = src.Len
+ return b
+}
+
+// Builder builds a set of strings with shared lifetime.
+// This differs from strings.Builder, which is for building a single string.
+type Builder struct {
+ buf []byte
+}
+
+// AppendFullName is equivalent to protoreflect.FullName.Append,
+// but optimized for large batches where each name has a shared lifetime.
+func (sb *Builder) AppendFullName(prefix pref.FullName, name pref.Name) pref.FullName {
+ n := len(prefix) + len(".") + len(name)
+ if len(prefix) == 0 {
+ n -= len(".")
+ }
+ sb.grow(n)
+ sb.buf = append(sb.buf, prefix...)
+ sb.buf = append(sb.buf, '.')
+ sb.buf = append(sb.buf, name...)
+ return pref.FullName(sb.last(n))
+}
+
+// MakeString is equivalent to string(b), but optimized for large batches
+// with a shared lifetime.
+func (sb *Builder) MakeString(b []byte) string {
+ sb.grow(len(b))
+ sb.buf = append(sb.buf, b...)
+ return sb.last(len(b))
+}
+
+func (sb *Builder) grow(n int) {
+ if cap(sb.buf)-len(sb.buf) >= n {
+ return
+ }
+
+ // Unlike strings.Builder, we do not need to copy over the contents
+ // of the old buffer since our builder provides no API for
+ // retrieving previously created strings.
+ sb.buf = make([]byte, 2*(cap(sb.buf)+n))
+}
+
+func (sb *Builder) last(n int) string {
+ return UnsafeString(sb.buf[len(sb.buf)-n:])
+}
diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go
new file mode 100644
index 000000000..4088e59c6
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/version/version.go
@@ -0,0 +1,79 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package version records versioning information about this module.
+package version
+
+import (
+ "fmt"
+ "strings"
+)
+
+// These constants determine the current version of this module.
+//
+//
+// For our release process, we enforce the following rules:
+// * Tagged releases use a tag that is identical to String.
+// * Tagged releases never reference a commit where the String
+// contains "devel".
+// * The set of all commits in this repository where String
+// does not contain "devel" must have a unique String.
+//
+//
+// Steps for tagging a new release:
+// 1. Create a new CL.
+//
+// 2. Update Minor, Patch, and/or PreRelease as necessary.
+// PreRelease must not contain the string "devel".
+//
+// 3. Since the last released minor version, have there been any changes to
+// generator that relies on new functionality in the runtime?
+// If yes, then increment RequiredGenerated.
+//
+// 4. Since the last released minor version, have there been any changes to
+// the runtime that removes support for old .pb.go source code?
+// If yes, then increment SupportMinimum.
+//
+// 5. Send out the CL for review and submit it.
+// Note that the next CL in step 8 must be submitted after this CL
+// without any other CLs in-between.
+//
+// 6. Tag a new version, where the tag is is the current String.
+//
+// 7. Write release notes for all notable changes
+// between this release and the last release.
+//
+// 8. Create a new CL.
+//
+// 9. Update PreRelease to include the string "devel".
+// For example: "" -> "devel" or "rc.1" -> "rc.1.devel"
+//
+// 10. Send out the CL for review and submit it.
+const (
+ Major = 1
+ Minor = 23
+ Patch = 0
+ PreRelease = ""
+)
+
+// String formats the version string for this module in semver format.
+//
+// Examples:
+// v1.20.1
+// v1.21.0-rc.1
+func String() string {
+ v := fmt.Sprintf("v%d.%d.%d", Major, Minor, Patch)
+ if PreRelease != "" {
+ v += "-" + PreRelease
+
+ // TODO: Add metadata about the commit or build hash.
+ // See https://golang.org/issue/29814
+ // See https://golang.org/issue/33533
+ var metadata string
+ if strings.Contains(PreRelease, "devel") && metadata != "" {
+ v += "+" + metadata
+ }
+ }
+ return v
+}
diff --git a/vendor/google.golang.org/protobuf/proto/checkinit.go b/vendor/google.golang.org/protobuf/proto/checkinit.go
new file mode 100644
index 000000000..3e9a6a2f6
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/proto/checkinit.go
@@ -0,0 +1,71 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/runtime/protoiface"
+)
+
+// CheckInitialized returns an error if any required fields in m are not set.
+func CheckInitialized(m Message) error {
+ // Treat a nil message interface as an "untyped" empty message,
+ // which we assume to have no required fields.
+ if m == nil {
+ return nil
+ }
+
+ return checkInitialized(m.ProtoReflect())
+}
+
+// CheckInitialized returns an error if any required fields in m are not set.
+func checkInitialized(m protoreflect.Message) error {
+ if methods := protoMethods(m); methods != nil && methods.CheckInitialized != nil {
+ _, err := methods.CheckInitialized(protoiface.CheckInitializedInput{
+ Message: m,
+ })
+ return err
+ }
+ return checkInitializedSlow(m)
+}
+
+func checkInitializedSlow(m protoreflect.Message) error {
+ md := m.Descriptor()
+ fds := md.Fields()
+ for i, nums := 0, md.RequiredNumbers(); i < nums.Len(); i++ {
+ fd := fds.ByNumber(nums.Get(i))
+ if !m.Has(fd) {
+ return errors.RequiredNotSet(string(fd.FullName()))
+ }
+ }
+ var err error
+ m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+ switch {
+ case fd.IsList():
+ if fd.Message() == nil {
+ return true
+ }
+ for i, list := 0, v.List(); i < list.Len() && err == nil; i++ {
+ err = checkInitialized(list.Get(i).Message())
+ }
+ case fd.IsMap():
+ if fd.MapValue().Message() == nil {
+ return true
+ }
+ v.Map().Range(func(key protoreflect.MapKey, v protoreflect.Value) bool {
+ err = checkInitialized(v.Message())
+ return err == nil
+ })
+ default:
+ if fd.Message() == nil {
+ return true
+ }
+ err = checkInitialized(v.Message())
+ }
+ return err == nil
+ })
+ return err
+}
diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go
new file mode 100644
index 000000000..128214760
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/proto/decode.go
@@ -0,0 +1,270 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/encoding/messageset"
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/internal/flags"
+ "google.golang.org/protobuf/internal/pragma"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+ "google.golang.org/protobuf/runtime/protoiface"
+)
+
+// UnmarshalOptions configures the unmarshaler.
+//
+// Example usage:
+// err := UnmarshalOptions{DiscardUnknown: true}.Unmarshal(b, m)
+type UnmarshalOptions struct {
+ pragma.NoUnkeyedLiterals
+
+ // Merge merges the input into the destination message.
+ // The default behavior is to always reset the message before unmarshaling,
+ // unless Merge is specified.
+ Merge bool
+
+ // AllowPartial accepts input for messages that will result in missing
+ // required fields. If AllowPartial is false (the default), Unmarshal will
+ // return an error if there are any missing required fields.
+ AllowPartial bool
+
+ // If DiscardUnknown is set, unknown fields are ignored.
+ DiscardUnknown bool
+
+ // Resolver is used for looking up types when unmarshaling extension fields.
+ // If nil, this defaults to using protoregistry.GlobalTypes.
+ Resolver interface {
+ FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error)
+ FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error)
+ }
+}
+
+// Unmarshal parses the wire-format message in b and places the result in m.
+func Unmarshal(b []byte, m Message) error {
+ _, err := UnmarshalOptions{}.unmarshal(b, m.ProtoReflect())
+ return err
+}
+
+// Unmarshal parses the wire-format message in b and places the result in m.
+func (o UnmarshalOptions) Unmarshal(b []byte, m Message) error {
+ _, err := o.unmarshal(b, m.ProtoReflect())
+ return err
+}
+
+// UnmarshalState parses a wire-format message and places the result in m.
+//
+// This method permits fine-grained control over the unmarshaler.
+// Most users should use Unmarshal instead.
+func (o UnmarshalOptions) UnmarshalState(in protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) {
+ return o.unmarshal(in.Buf, in.Message)
+}
+
+func (o UnmarshalOptions) unmarshal(b []byte, m protoreflect.Message) (out protoiface.UnmarshalOutput, err error) {
+ if o.Resolver == nil {
+ o.Resolver = protoregistry.GlobalTypes
+ }
+ if !o.Merge {
+ Reset(m.Interface()) // TODO
+ }
+ allowPartial := o.AllowPartial
+ o.Merge = true
+ o.AllowPartial = true
+ methods := protoMethods(m)
+ if methods != nil && methods.Unmarshal != nil &&
+ !(o.DiscardUnknown && methods.Flags&protoiface.SupportUnmarshalDiscardUnknown == 0) {
+ in := protoiface.UnmarshalInput{
+ Message: m,
+ Buf: b,
+ Resolver: o.Resolver,
+ }
+ if o.DiscardUnknown {
+ in.Flags |= protoiface.UnmarshalDiscardUnknown
+ }
+ out, err = methods.Unmarshal(in)
+ } else {
+ err = o.unmarshalMessageSlow(b, m)
+ }
+ if err != nil {
+ return out, err
+ }
+ if allowPartial || (out.Flags&protoiface.UnmarshalInitialized != 0) {
+ return out, nil
+ }
+ return out, checkInitialized(m)
+}
+
+func (o UnmarshalOptions) unmarshalMessage(b []byte, m protoreflect.Message) error {
+ _, err := o.unmarshal(b, m)
+ return err
+}
+
+func (o UnmarshalOptions) unmarshalMessageSlow(b []byte, m protoreflect.Message) error {
+ md := m.Descriptor()
+ if messageset.IsMessageSet(md) {
+ return unmarshalMessageSet(b, m, o)
+ }
+ fields := md.Fields()
+ for len(b) > 0 {
+ // Parse the tag (field number and wire type).
+ num, wtyp, tagLen := protowire.ConsumeTag(b)
+ if tagLen < 0 {
+ return protowire.ParseError(tagLen)
+ }
+ if num > protowire.MaxValidNumber {
+ return errors.New("invalid field number")
+ }
+
+ // Find the field descriptor for this field number.
+ fd := fields.ByNumber(num)
+ if fd == nil && md.ExtensionRanges().Has(num) {
+ extType, err := o.Resolver.FindExtensionByNumber(md.FullName(), num)
+ if err != nil && err != protoregistry.NotFound {
+ return errors.New("%v: unable to resolve extension %v: %v", md.FullName(), num, err)
+ }
+ if extType != nil {
+ fd = extType.TypeDescriptor()
+ }
+ }
+ var err error
+ if fd == nil {
+ err = errUnknown
+ } else if flags.ProtoLegacy {
+ if fd.IsWeak() && fd.Message().IsPlaceholder() {
+ err = errUnknown // weak referent is not linked in
+ }
+ }
+
+ // Parse the field value.
+ var valLen int
+ switch {
+ case err != nil:
+ case fd.IsList():
+ valLen, err = o.unmarshalList(b[tagLen:], wtyp, m.Mutable(fd).List(), fd)
+ case fd.IsMap():
+ valLen, err = o.unmarshalMap(b[tagLen:], wtyp, m.Mutable(fd).Map(), fd)
+ default:
+ valLen, err = o.unmarshalSingular(b[tagLen:], wtyp, m, fd)
+ }
+ if err != nil {
+ if err != errUnknown {
+ return err
+ }
+ valLen = protowire.ConsumeFieldValue(num, wtyp, b[tagLen:])
+ if valLen < 0 {
+ return protowire.ParseError(valLen)
+ }
+ if !o.DiscardUnknown {
+ m.SetUnknown(append(m.GetUnknown(), b[:tagLen+valLen]...))
+ }
+ }
+ b = b[tagLen+valLen:]
+ }
+ return nil
+}
+
+func (o UnmarshalOptions) unmarshalSingular(b []byte, wtyp protowire.Type, m protoreflect.Message, fd protoreflect.FieldDescriptor) (n int, err error) {
+ v, n, err := o.unmarshalScalar(b, wtyp, fd)
+ if err != nil {
+ return 0, err
+ }
+ switch fd.Kind() {
+ case protoreflect.GroupKind, protoreflect.MessageKind:
+ m2 := m.Mutable(fd).Message()
+ if err := o.unmarshalMessage(v.Bytes(), m2); err != nil {
+ return n, err
+ }
+ default:
+ // Non-message scalars replace the previous value.
+ m.Set(fd, v)
+ }
+ return n, nil
+}
+
+func (o UnmarshalOptions) unmarshalMap(b []byte, wtyp protowire.Type, mapv protoreflect.Map, fd protoreflect.FieldDescriptor) (n int, err error) {
+ if wtyp != protowire.BytesType {
+ return 0, errUnknown
+ }
+ b, n = protowire.ConsumeBytes(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ var (
+ keyField = fd.MapKey()
+ valField = fd.MapValue()
+ key protoreflect.Value
+ val protoreflect.Value
+ haveKey bool
+ haveVal bool
+ )
+ switch valField.Kind() {
+ case protoreflect.GroupKind, protoreflect.MessageKind:
+ val = mapv.NewValue()
+ }
+ // Map entries are represented as a two-element message with fields
+ // containing the key and value.
+ for len(b) > 0 {
+ num, wtyp, n := protowire.ConsumeTag(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ if num > protowire.MaxValidNumber {
+ return 0, errors.New("invalid field number")
+ }
+ b = b[n:]
+ err = errUnknown
+ switch num {
+ case 1:
+ key, n, err = o.unmarshalScalar(b, wtyp, keyField)
+ if err != nil {
+ break
+ }
+ haveKey = true
+ case 2:
+ var v protoreflect.Value
+ v, n, err = o.unmarshalScalar(b, wtyp, valField)
+ if err != nil {
+ break
+ }
+ switch valField.Kind() {
+ case protoreflect.GroupKind, protoreflect.MessageKind:
+ if err := o.unmarshalMessage(v.Bytes(), val.Message()); err != nil {
+ return 0, err
+ }
+ default:
+ val = v
+ }
+ haveVal = true
+ }
+ if err == errUnknown {
+ n = protowire.ConsumeFieldValue(num, wtyp, b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ } else if err != nil {
+ return 0, err
+ }
+ b = b[n:]
+ }
+ // Every map entry should have entries for key and value, but this is not strictly required.
+ if !haveKey {
+ key = keyField.Default()
+ }
+ if !haveVal {
+ switch valField.Kind() {
+ case protoreflect.GroupKind, protoreflect.MessageKind:
+ default:
+ val = valField.Default()
+ }
+ }
+ mapv.Set(key.MapKey(), val)
+ return n, nil
+}
+
+// errUnknown is used internally to indicate fields which should be added
+// to the unknown field set of a message. It is never returned from an exported
+// function.
+var errUnknown = errors.New("BUG: internal error (unknown)")
diff --git a/vendor/google.golang.org/protobuf/proto/decode_gen.go b/vendor/google.golang.org/protobuf/proto/decode_gen.go
new file mode 100644
index 000000000..d6dc904dc
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/proto/decode_gen.go
@@ -0,0 +1,603 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-types. DO NOT EDIT.
+
+package proto
+
+import (
+ "math"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/internal/strs"
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// unmarshalScalar decodes a value of the given kind.
+//
+// Message values are decoded into a []byte which aliases the input data.
+func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd protoreflect.FieldDescriptor) (val protoreflect.Value, n int, err error) {
+ switch fd.Kind() {
+ case protoreflect.BoolKind:
+ if wtyp != protowire.VarintType {
+ return val, 0, errUnknown
+ }
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return val, 0, protowire.ParseError(n)
+ }
+ return protoreflect.ValueOfBool(protowire.DecodeBool(v)), n, nil
+ case protoreflect.EnumKind:
+ if wtyp != protowire.VarintType {
+ return val, 0, errUnknown
+ }
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return val, 0, protowire.ParseError(n)
+ }
+ return protoreflect.ValueOfEnum(protoreflect.EnumNumber(v)), n, nil
+ case protoreflect.Int32Kind:
+ if wtyp != protowire.VarintType {
+ return val, 0, errUnknown
+ }
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return val, 0, protowire.ParseError(n)
+ }
+ return protoreflect.ValueOfInt32(int32(v)), n, nil
+ case protoreflect.Sint32Kind:
+ if wtyp != protowire.VarintType {
+ return val, 0, errUnknown
+ }
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return val, 0, protowire.ParseError(n)
+ }
+ return protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32))), n, nil
+ case protoreflect.Uint32Kind:
+ if wtyp != protowire.VarintType {
+ return val, 0, errUnknown
+ }
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return val, 0, protowire.ParseError(n)
+ }
+ return protoreflect.ValueOfUint32(uint32(v)), n, nil
+ case protoreflect.Int64Kind:
+ if wtyp != protowire.VarintType {
+ return val, 0, errUnknown
+ }
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return val, 0, protowire.ParseError(n)
+ }
+ return protoreflect.ValueOfInt64(int64(v)), n, nil
+ case protoreflect.Sint64Kind:
+ if wtyp != protowire.VarintType {
+ return val, 0, errUnknown
+ }
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return val, 0, protowire.ParseError(n)
+ }
+ return protoreflect.ValueOfInt64(protowire.DecodeZigZag(v)), n, nil
+ case protoreflect.Uint64Kind:
+ if wtyp != protowire.VarintType {
+ return val, 0, errUnknown
+ }
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return val, 0, protowire.ParseError(n)
+ }
+ return protoreflect.ValueOfUint64(v), n, nil
+ case protoreflect.Sfixed32Kind:
+ if wtyp != protowire.Fixed32Type {
+ return val, 0, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return val, 0, protowire.ParseError(n)
+ }
+ return protoreflect.ValueOfInt32(int32(v)), n, nil
+ case protoreflect.Fixed32Kind:
+ if wtyp != protowire.Fixed32Type {
+ return val, 0, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return val, 0, protowire.ParseError(n)
+ }
+ return protoreflect.ValueOfUint32(uint32(v)), n, nil
+ case protoreflect.FloatKind:
+ if wtyp != protowire.Fixed32Type {
+ return val, 0, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return val, 0, protowire.ParseError(n)
+ }
+ return protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v))), n, nil
+ case protoreflect.Sfixed64Kind:
+ if wtyp != protowire.Fixed64Type {
+ return val, 0, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return val, 0, protowire.ParseError(n)
+ }
+ return protoreflect.ValueOfInt64(int64(v)), n, nil
+ case protoreflect.Fixed64Kind:
+ if wtyp != protowire.Fixed64Type {
+ return val, 0, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return val, 0, protowire.ParseError(n)
+ }
+ return protoreflect.ValueOfUint64(v), n, nil
+ case protoreflect.DoubleKind:
+ if wtyp != protowire.Fixed64Type {
+ return val, 0, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return val, 0, protowire.ParseError(n)
+ }
+ return protoreflect.ValueOfFloat64(math.Float64frombits(v)), n, nil
+ case protoreflect.StringKind:
+ if wtyp != protowire.BytesType {
+ return val, 0, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return val, 0, protowire.ParseError(n)
+ }
+ if strs.EnforceUTF8(fd) && !utf8.Valid(v) {
+ return protoreflect.Value{}, 0, errors.InvalidUTF8(string(fd.FullName()))
+ }
+ return protoreflect.ValueOfString(string(v)), n, nil
+ case protoreflect.BytesKind:
+ if wtyp != protowire.BytesType {
+ return val, 0, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return val, 0, protowire.ParseError(n)
+ }
+ return protoreflect.ValueOfBytes(append(emptyBuf[:], v...)), n, nil
+ case protoreflect.MessageKind:
+ if wtyp != protowire.BytesType {
+ return val, 0, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return val, 0, protowire.ParseError(n)
+ }
+ return protoreflect.ValueOfBytes(v), n, nil
+ case protoreflect.GroupKind:
+ if wtyp != protowire.StartGroupType {
+ return val, 0, errUnknown
+ }
+ v, n := protowire.ConsumeGroup(fd.Number(), b)
+ if n < 0 {
+ return val, 0, protowire.ParseError(n)
+ }
+ return protoreflect.ValueOfBytes(v), n, nil
+ default:
+ return val, 0, errUnknown
+ }
+}
+
+func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list protoreflect.List, fd protoreflect.FieldDescriptor) (n int, err error) {
+ switch fd.Kind() {
+ case protoreflect.BoolKind:
+ if wtyp == protowire.BytesType {
+ buf, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ for len(buf) > 0 {
+ v, n := protowire.ConsumeVarint(buf)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ buf = buf[n:]
+ list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v)))
+ }
+ return n, nil
+ }
+ if wtyp != protowire.VarintType {
+ return 0, errUnknown
+ }
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v)))
+ return n, nil
+ case protoreflect.EnumKind:
+ if wtyp == protowire.BytesType {
+ buf, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ for len(buf) > 0 {
+ v, n := protowire.ConsumeVarint(buf)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ buf = buf[n:]
+ list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v)))
+ }
+ return n, nil
+ }
+ if wtyp != protowire.VarintType {
+ return 0, errUnknown
+ }
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v)))
+ return n, nil
+ case protoreflect.Int32Kind:
+ if wtyp == protowire.BytesType {
+ buf, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ for len(buf) > 0 {
+ v, n := protowire.ConsumeVarint(buf)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ buf = buf[n:]
+ list.Append(protoreflect.ValueOfInt32(int32(v)))
+ }
+ return n, nil
+ }
+ if wtyp != protowire.VarintType {
+ return 0, errUnknown
+ }
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfInt32(int32(v)))
+ return n, nil
+ case protoreflect.Sint32Kind:
+ if wtyp == protowire.BytesType {
+ buf, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ for len(buf) > 0 {
+ v, n := protowire.ConsumeVarint(buf)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ buf = buf[n:]
+ list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32))))
+ }
+ return n, nil
+ }
+ if wtyp != protowire.VarintType {
+ return 0, errUnknown
+ }
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32))))
+ return n, nil
+ case protoreflect.Uint32Kind:
+ if wtyp == protowire.BytesType {
+ buf, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ for len(buf) > 0 {
+ v, n := protowire.ConsumeVarint(buf)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ buf = buf[n:]
+ list.Append(protoreflect.ValueOfUint32(uint32(v)))
+ }
+ return n, nil
+ }
+ if wtyp != protowire.VarintType {
+ return 0, errUnknown
+ }
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfUint32(uint32(v)))
+ return n, nil
+ case protoreflect.Int64Kind:
+ if wtyp == protowire.BytesType {
+ buf, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ for len(buf) > 0 {
+ v, n := protowire.ConsumeVarint(buf)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ buf = buf[n:]
+ list.Append(protoreflect.ValueOfInt64(int64(v)))
+ }
+ return n, nil
+ }
+ if wtyp != protowire.VarintType {
+ return 0, errUnknown
+ }
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfInt64(int64(v)))
+ return n, nil
+ case protoreflect.Sint64Kind:
+ if wtyp == protowire.BytesType {
+ buf, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ for len(buf) > 0 {
+ v, n := protowire.ConsumeVarint(buf)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ buf = buf[n:]
+ list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v)))
+ }
+ return n, nil
+ }
+ if wtyp != protowire.VarintType {
+ return 0, errUnknown
+ }
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v)))
+ return n, nil
+ case protoreflect.Uint64Kind:
+ if wtyp == protowire.BytesType {
+ buf, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ for len(buf) > 0 {
+ v, n := protowire.ConsumeVarint(buf)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ buf = buf[n:]
+ list.Append(protoreflect.ValueOfUint64(v))
+ }
+ return n, nil
+ }
+ if wtyp != protowire.VarintType {
+ return 0, errUnknown
+ }
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfUint64(v))
+ return n, nil
+ case protoreflect.Sfixed32Kind:
+ if wtyp == protowire.BytesType {
+ buf, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ for len(buf) > 0 {
+ v, n := protowire.ConsumeFixed32(buf)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ buf = buf[n:]
+ list.Append(protoreflect.ValueOfInt32(int32(v)))
+ }
+ return n, nil
+ }
+ if wtyp != protowire.Fixed32Type {
+ return 0, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfInt32(int32(v)))
+ return n, nil
+ case protoreflect.Fixed32Kind:
+ if wtyp == protowire.BytesType {
+ buf, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ for len(buf) > 0 {
+ v, n := protowire.ConsumeFixed32(buf)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ buf = buf[n:]
+ list.Append(protoreflect.ValueOfUint32(uint32(v)))
+ }
+ return n, nil
+ }
+ if wtyp != protowire.Fixed32Type {
+ return 0, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfUint32(uint32(v)))
+ return n, nil
+ case protoreflect.FloatKind:
+ if wtyp == protowire.BytesType {
+ buf, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ for len(buf) > 0 {
+ v, n := protowire.ConsumeFixed32(buf)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ buf = buf[n:]
+ list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v))))
+ }
+ return n, nil
+ }
+ if wtyp != protowire.Fixed32Type {
+ return 0, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v))))
+ return n, nil
+ case protoreflect.Sfixed64Kind:
+ if wtyp == protowire.BytesType {
+ buf, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ for len(buf) > 0 {
+ v, n := protowire.ConsumeFixed64(buf)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ buf = buf[n:]
+ list.Append(protoreflect.ValueOfInt64(int64(v)))
+ }
+ return n, nil
+ }
+ if wtyp != protowire.Fixed64Type {
+ return 0, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfInt64(int64(v)))
+ return n, nil
+ case protoreflect.Fixed64Kind:
+ if wtyp == protowire.BytesType {
+ buf, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ for len(buf) > 0 {
+ v, n := protowire.ConsumeFixed64(buf)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ buf = buf[n:]
+ list.Append(protoreflect.ValueOfUint64(v))
+ }
+ return n, nil
+ }
+ if wtyp != protowire.Fixed64Type {
+ return 0, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfUint64(v))
+ return n, nil
+ case protoreflect.DoubleKind:
+ if wtyp == protowire.BytesType {
+ buf, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ for len(buf) > 0 {
+ v, n := protowire.ConsumeFixed64(buf)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ buf = buf[n:]
+ list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v)))
+ }
+ return n, nil
+ }
+ if wtyp != protowire.Fixed64Type {
+ return 0, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v)))
+ return n, nil
+ case protoreflect.StringKind:
+ if wtyp != protowire.BytesType {
+ return 0, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ if strs.EnforceUTF8(fd) && !utf8.Valid(v) {
+ return 0, errors.InvalidUTF8(string(fd.FullName()))
+ }
+ list.Append(protoreflect.ValueOfString(string(v)))
+ return n, nil
+ case protoreflect.BytesKind:
+ if wtyp != protowire.BytesType {
+ return 0, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfBytes(append(emptyBuf[:], v...)))
+ return n, nil
+ case protoreflect.MessageKind:
+ if wtyp != protowire.BytesType {
+ return 0, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ m := list.NewElement()
+ if err := o.unmarshalMessage(v, m.Message()); err != nil {
+ return 0, err
+ }
+ list.Append(m)
+ return n, nil
+ case protoreflect.GroupKind:
+ if wtyp != protowire.StartGroupType {
+ return 0, errUnknown
+ }
+ v, n := protowire.ConsumeGroup(fd.Number(), b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ m := list.NewElement()
+ if err := o.unmarshalMessage(v, m.Message()); err != nil {
+ return 0, err
+ }
+ list.Append(m)
+ return n, nil
+ default:
+ return 0, errUnknown
+ }
+}
+
+// We append to an empty array rather than a nil []byte to get non-nil zero-length byte slices.
+var emptyBuf [0]byte
diff --git a/vendor/google.golang.org/protobuf/proto/doc.go b/vendor/google.golang.org/protobuf/proto/doc.go
new file mode 100644
index 000000000..c52d8c4ab
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/proto/doc.go
@@ -0,0 +1,94 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package proto provides functions operating on protocol buffer messages.
+//
+// For documentation on protocol buffers in general, see:
+//
+// https://developers.google.com/protocol-buffers
+//
+// For a tutorial on using protocol buffers with Go, see:
+//
+// https://developers.google.com/protocol-buffers/docs/gotutorial
+//
+// For a guide to generated Go protocol buffer code, see:
+//
+// https://developers.google.com/protocol-buffers/docs/reference/go-generated
+//
+//
+// Binary serialization
+//
+// This package contains functions to convert to and from the wire format,
+// an efficient binary serialization of protocol buffers.
+//
+// • Size reports the size of a message in the wire format.
+//
+// • Marshal converts a message to the wire format.
+// The MarshalOptions type provides more control over wire marshaling.
+//
+// • Unmarshal converts a message from the wire format.
+// The UnmarshalOptions type provides more control over wire unmarshaling.
+//
+//
+// Basic message operations
+//
+// • Clone makes a deep copy of a message.
+//
+// • Merge merges the content of a message into another.
+//
+// • Equal compares two messages. For more control over comparisons
+// and detailed reporting of differences, see package
+// "google.golang.org/protobuf/testing/protocmp".
+//
+// • Reset clears the content of a message.
+//
+// • CheckInitialized reports whether all required fields in a message are set.
+//
+//
+// Optional scalar constructors
+//
+// The API for some generated messages represents optional scalar fields
+// as pointers to a value. For example, an optional string field has the
+// Go type *string.
+//
+// • Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, and String
+// take a value and return a pointer to a new instance of it,
+// to simplify construction of optional field values.
+//
+// Generated enum types usually have an Enum method which performs the
+// same operation.
+//
+// Optional scalar fields are only supported in proto2.
+//
+//
+// Extension accessors
+//
+// • HasExtension, GetExtension, SetExtension, and ClearExtension
+// access extension field values in a protocol buffer message.
+//
+// Extension fields are only supported in proto2.
+//
+//
+// Related packages
+//
+// • Package "google.golang.org/protobuf/encoding/protojson" converts messages to
+// and from JSON.
+//
+// • Package "google.golang.org/protobuf/encoding/prototext" converts messages to
+// and from the text format.
+//
+// • Package "google.golang.org/protobuf/reflect/protoreflect" provides a
+// reflection interface for protocol buffer data types.
+//
+// • Package "google.golang.org/protobuf/testing/protocmp" provides features
+// to compare protocol buffer messages with the "github.com/google/go-cmp/cmp"
+// package.
+//
+// • Package "google.golang.org/protobuf/types/dynamicpb" provides a dynamic
+// message type, suitable for working with messages where the protocol buffer
+// type is only known at runtime.
+//
+// This module contains additional packages for more specialized use cases.
+// Consult the individual package documentation for details.
+package proto
diff --git a/vendor/google.golang.org/protobuf/proto/encode.go b/vendor/google.golang.org/protobuf/proto/encode.go
new file mode 100644
index 000000000..456bfda47
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/proto/encode.go
@@ -0,0 +1,343 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "sort"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/encoding/messageset"
+ "google.golang.org/protobuf/internal/fieldsort"
+ "google.golang.org/protobuf/internal/mapsort"
+ "google.golang.org/protobuf/internal/pragma"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/runtime/protoiface"
+)
+
+// MarshalOptions configures the marshaler.
+//
+// Example usage:
+// b, err := MarshalOptions{Deterministic: true}.Marshal(m)
+type MarshalOptions struct {
+ pragma.NoUnkeyedLiterals
+
+ // AllowPartial allows messages that have missing required fields to marshal
+ // without returning an error. If AllowPartial is false (the default),
+ // Marshal will return an error if there are any missing required fields.
+ AllowPartial bool
+
+ // Deterministic controls whether the same message will always be
+ // serialized to the same bytes within the same binary.
+ //
+ // Setting this option guarantees that repeated serialization of
+ // the same message will return the same bytes, and that different
+ // processes of the same binary (which may be executing on different
+ // machines) will serialize equal messages to the same bytes.
+ // It has no effect on the resulting size of the encoded message compared
+ // to a non-deterministic marshal.
+ //
+ // Note that the deterministic serialization is NOT canonical across
+ // languages. It is not guaranteed to remain stable over time. It is
+ // unstable across different builds with schema changes due to unknown
+ // fields. Users who need canonical serialization (e.g., persistent
+ // storage in a canonical form, fingerprinting, etc.) must define
+ // their own canonicalization specification and implement their own
+ // serializer rather than relying on this API.
+ //
+ // If deterministic serialization is requested, map entries will be
+ // sorted by keys in lexographical order. This is an implementation
+ // detail and subject to change.
+ Deterministic bool
+
+ // UseCachedSize indicates that the result of a previous Size call
+ // may be reused.
+ //
+ // Setting this option asserts that:
+ //
+ // 1. Size has previously been called on this message with identical
+ // options (except for UseCachedSize itself).
+ //
+ // 2. The message and all its submessages have not changed in any
+ // way since the Size call.
+ //
+ // If either of these invariants is violated,
+ // the results are undefined and may include panics or corrupted output.
+ //
+ // Implementations MAY take this option into account to provide
+ // better performance, but there is no guarantee that they will do so.
+ // There is absolutely no guarantee that Size followed by Marshal with
+ // UseCachedSize set will perform equivalently to Marshal alone.
+ UseCachedSize bool
+}
+
+// Marshal returns the wire-format encoding of m.
+func Marshal(m Message) ([]byte, error) {
+ // Treat nil message interface as an empty message; nothing to output.
+ if m == nil {
+ return nil, nil
+ }
+
+ out, err := MarshalOptions{}.marshal(nil, m.ProtoReflect())
+ if len(out.Buf) == 0 && err == nil {
+ out.Buf = emptyBytesForMessage(m)
+ }
+ return out.Buf, err
+}
+
+// Marshal returns the wire-format encoding of m.
+func (o MarshalOptions) Marshal(m Message) ([]byte, error) {
+ // Treat nil message interface as an empty message; nothing to output.
+ if m == nil {
+ return nil, nil
+ }
+
+ out, err := o.marshal(nil, m.ProtoReflect())
+ if len(out.Buf) == 0 && err == nil {
+ out.Buf = emptyBytesForMessage(m)
+ }
+ return out.Buf, err
+}
+
+// emptyBytesForMessage returns a nil buffer if and only if m is invalid,
+// otherwise it returns a non-nil empty buffer.
+//
+// This is to assist the edge-case where user-code does the following:
+// m1.OptionalBytes, _ = proto.Marshal(m2)
+// where they expect the proto2 "optional_bytes" field to be populated
+// if any only if m2 is a valid message.
+func emptyBytesForMessage(m Message) []byte {
+ if m == nil || !m.ProtoReflect().IsValid() {
+ return nil
+ }
+ return emptyBuf[:]
+}
+
+// MarshalAppend appends the wire-format encoding of m to b,
+// returning the result.
+func (o MarshalOptions) MarshalAppend(b []byte, m Message) ([]byte, error) {
+ // Treat nil message interface as an empty message; nothing to append.
+ if m == nil {
+ return b, nil
+ }
+
+ out, err := o.marshal(b, m.ProtoReflect())
+ return out.Buf, err
+}
+
+// MarshalState returns the wire-format encoding of a message.
+//
+// This method permits fine-grained control over the marshaler.
+// Most users should use Marshal instead.
+func (o MarshalOptions) MarshalState(in protoiface.MarshalInput) (protoiface.MarshalOutput, error) {
+ return o.marshal(in.Buf, in.Message)
+}
+
+func (o MarshalOptions) marshal(b []byte, m protoreflect.Message) (out protoiface.MarshalOutput, err error) {
+ allowPartial := o.AllowPartial
+ o.AllowPartial = true
+ if methods := protoMethods(m); methods != nil && methods.Marshal != nil &&
+ !(o.Deterministic && methods.Flags&protoiface.SupportMarshalDeterministic == 0) {
+ in := protoiface.MarshalInput{
+ Message: m,
+ Buf: b,
+ }
+ if o.Deterministic {
+ in.Flags |= protoiface.MarshalDeterministic
+ }
+ if o.UseCachedSize {
+ in.Flags |= protoiface.MarshalUseCachedSize
+ }
+ if methods.Size != nil {
+ sout := methods.Size(protoiface.SizeInput{
+ Message: m,
+ Flags: in.Flags,
+ })
+ if cap(b) < len(b)+sout.Size {
+ in.Buf = make([]byte, len(b), growcap(cap(b), len(b)+sout.Size))
+ copy(in.Buf, b)
+ }
+ in.Flags |= protoiface.MarshalUseCachedSize
+ }
+ out, err = methods.Marshal(in)
+ } else {
+ out.Buf, err = o.marshalMessageSlow(b, m)
+ }
+ if err != nil {
+ return out, err
+ }
+ if allowPartial {
+ return out, nil
+ }
+ return out, checkInitialized(m)
+}
+
+func (o MarshalOptions) marshalMessage(b []byte, m protoreflect.Message) ([]byte, error) {
+ out, err := o.marshal(b, m)
+ return out.Buf, err
+}
+
+// growcap scales up the capacity of a slice.
+//
+// Given a slice with a current capacity of oldcap and a desired
+// capacity of wantcap, growcap returns a new capacity >= wantcap.
+//
+// The algorithm is mostly identical to the one used by append as of Go 1.14.
+func growcap(oldcap, wantcap int) (newcap int) {
+ if wantcap > oldcap*2 {
+ newcap = wantcap
+ } else if oldcap < 1024 {
+ // The Go 1.14 runtime takes this case when len(s) < 1024,
+ // not when cap(s) < 1024. The difference doesn't seem
+ // significant here.
+ newcap = oldcap * 2
+ } else {
+ newcap = oldcap
+ for 0 < newcap && newcap < wantcap {
+ newcap += newcap / 4
+ }
+ if newcap <= 0 {
+ newcap = wantcap
+ }
+ }
+ return newcap
+}
+
+func (o MarshalOptions) marshalMessageSlow(b []byte, m protoreflect.Message) ([]byte, error) {
+ if messageset.IsMessageSet(m.Descriptor()) {
+ return marshalMessageSet(b, m, o)
+ }
+ // There are many choices for what order we visit fields in. The default one here
+ // is chosen for reasonable efficiency and simplicity given the protoreflect API.
+ // It is not deterministic, since Message.Range does not return fields in any
+ // defined order.
+ //
+ // When using deterministic serialization, we sort the known fields.
+ var err error
+ o.rangeFields(m, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+ b, err = o.marshalField(b, fd, v)
+ return err == nil
+ })
+ if err != nil {
+ return b, err
+ }
+ b = append(b, m.GetUnknown()...)
+ return b, nil
+}
+
+// rangeFields visits fields in a defined order when deterministic serialization is enabled.
+func (o MarshalOptions) rangeFields(m protoreflect.Message, f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) {
+ if !o.Deterministic {
+ m.Range(f)
+ return
+ }
+ var fds []protoreflect.FieldDescriptor
+ m.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
+ fds = append(fds, fd)
+ return true
+ })
+ sort.Slice(fds, func(a, b int) bool {
+ return fieldsort.Less(fds[a], fds[b])
+ })
+ for _, fd := range fds {
+ if !f(fd, m.Get(fd)) {
+ break
+ }
+ }
+}
+
+func (o MarshalOptions) marshalField(b []byte, fd protoreflect.FieldDescriptor, value protoreflect.Value) ([]byte, error) {
+ switch {
+ case fd.IsList():
+ return o.marshalList(b, fd, value.List())
+ case fd.IsMap():
+ return o.marshalMap(b, fd, value.Map())
+ default:
+ b = protowire.AppendTag(b, fd.Number(), wireTypes[fd.Kind()])
+ return o.marshalSingular(b, fd, value)
+ }
+}
+
+func (o MarshalOptions) marshalList(b []byte, fd protoreflect.FieldDescriptor, list protoreflect.List) ([]byte, error) {
+ if fd.IsPacked() && list.Len() > 0 {
+ b = protowire.AppendTag(b, fd.Number(), protowire.BytesType)
+ b, pos := appendSpeculativeLength(b)
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ var err error
+ b, err = o.marshalSingular(b, fd, list.Get(i))
+ if err != nil {
+ return b, err
+ }
+ }
+ b = finishSpeculativeLength(b, pos)
+ return b, nil
+ }
+
+ kind := fd.Kind()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ var err error
+ b = protowire.AppendTag(b, fd.Number(), wireTypes[kind])
+ b, err = o.marshalSingular(b, fd, list.Get(i))
+ if err != nil {
+ return b, err
+ }
+ }
+ return b, nil
+}
+
+func (o MarshalOptions) marshalMap(b []byte, fd protoreflect.FieldDescriptor, mapv protoreflect.Map) ([]byte, error) {
+ keyf := fd.MapKey()
+ valf := fd.MapValue()
+ var err error
+ o.rangeMap(mapv, keyf.Kind(), func(key protoreflect.MapKey, value protoreflect.Value) bool {
+ b = protowire.AppendTag(b, fd.Number(), protowire.BytesType)
+ var pos int
+ b, pos = appendSpeculativeLength(b)
+
+ b, err = o.marshalField(b, keyf, key.Value())
+ if err != nil {
+ return false
+ }
+ b, err = o.marshalField(b, valf, value)
+ if err != nil {
+ return false
+ }
+ b = finishSpeculativeLength(b, pos)
+ return true
+ })
+ return b, err
+}
+
+func (o MarshalOptions) rangeMap(mapv protoreflect.Map, kind protoreflect.Kind, f func(protoreflect.MapKey, protoreflect.Value) bool) {
+ if !o.Deterministic {
+ mapv.Range(f)
+ return
+ }
+ mapsort.Range(mapv, kind, f)
+}
+
+// When encoding length-prefixed fields, we speculatively set aside some number of bytes
+// for the length, encode the data, and then encode the length (shifting the data if necessary
+// to make room).
+const speculativeLength = 1
+
+func appendSpeculativeLength(b []byte) ([]byte, int) {
+ pos := len(b)
+ b = append(b, "\x00\x00\x00\x00"[:speculativeLength]...)
+ return b, pos
+}
+
+func finishSpeculativeLength(b []byte, pos int) []byte {
+ mlen := len(b) - pos - speculativeLength
+ msiz := protowire.SizeVarint(uint64(mlen))
+ if msiz != speculativeLength {
+ for i := 0; i < msiz-speculativeLength; i++ {
+ b = append(b, 0)
+ }
+ copy(b[pos+msiz:], b[pos+speculativeLength:])
+ b = b[:pos+msiz+mlen]
+ }
+ protowire.AppendVarint(b[:pos], uint64(mlen))
+ return b
+}
diff --git a/vendor/google.golang.org/protobuf/proto/encode_gen.go b/vendor/google.golang.org/protobuf/proto/encode_gen.go
new file mode 100644
index 000000000..185dacfb4
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/proto/encode_gen.go
@@ -0,0 +1,97 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-types. DO NOT EDIT.
+
+package proto
+
+import (
+ "math"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/internal/strs"
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+var wireTypes = map[protoreflect.Kind]protowire.Type{
+ protoreflect.BoolKind: protowire.VarintType,
+ protoreflect.EnumKind: protowire.VarintType,
+ protoreflect.Int32Kind: protowire.VarintType,
+ protoreflect.Sint32Kind: protowire.VarintType,
+ protoreflect.Uint32Kind: protowire.VarintType,
+ protoreflect.Int64Kind: protowire.VarintType,
+ protoreflect.Sint64Kind: protowire.VarintType,
+ protoreflect.Uint64Kind: protowire.VarintType,
+ protoreflect.Sfixed32Kind: protowire.Fixed32Type,
+ protoreflect.Fixed32Kind: protowire.Fixed32Type,
+ protoreflect.FloatKind: protowire.Fixed32Type,
+ protoreflect.Sfixed64Kind: protowire.Fixed64Type,
+ protoreflect.Fixed64Kind: protowire.Fixed64Type,
+ protoreflect.DoubleKind: protowire.Fixed64Type,
+ protoreflect.StringKind: protowire.BytesType,
+ protoreflect.BytesKind: protowire.BytesType,
+ protoreflect.MessageKind: protowire.BytesType,
+ protoreflect.GroupKind: protowire.StartGroupType,
+}
+
+func (o MarshalOptions) marshalSingular(b []byte, fd protoreflect.FieldDescriptor, v protoreflect.Value) ([]byte, error) {
+ switch fd.Kind() {
+ case protoreflect.BoolKind:
+ b = protowire.AppendVarint(b, protowire.EncodeBool(v.Bool()))
+ case protoreflect.EnumKind:
+ b = protowire.AppendVarint(b, uint64(v.Enum()))
+ case protoreflect.Int32Kind:
+ b = protowire.AppendVarint(b, uint64(int32(v.Int())))
+ case protoreflect.Sint32Kind:
+ b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(int32(v.Int()))))
+ case protoreflect.Uint32Kind:
+ b = protowire.AppendVarint(b, uint64(uint32(v.Uint())))
+ case protoreflect.Int64Kind:
+ b = protowire.AppendVarint(b, uint64(v.Int()))
+ case protoreflect.Sint64Kind:
+ b = protowire.AppendVarint(b, protowire.EncodeZigZag(v.Int()))
+ case protoreflect.Uint64Kind:
+ b = protowire.AppendVarint(b, v.Uint())
+ case protoreflect.Sfixed32Kind:
+ b = protowire.AppendFixed32(b, uint32(v.Int()))
+ case protoreflect.Fixed32Kind:
+ b = protowire.AppendFixed32(b, uint32(v.Uint()))
+ case protoreflect.FloatKind:
+ b = protowire.AppendFixed32(b, math.Float32bits(float32(v.Float())))
+ case protoreflect.Sfixed64Kind:
+ b = protowire.AppendFixed64(b, uint64(v.Int()))
+ case protoreflect.Fixed64Kind:
+ b = protowire.AppendFixed64(b, v.Uint())
+ case protoreflect.DoubleKind:
+ b = protowire.AppendFixed64(b, math.Float64bits(v.Float()))
+ case protoreflect.StringKind:
+ if strs.EnforceUTF8(fd) && !utf8.ValidString(v.String()) {
+ return b, errors.InvalidUTF8(string(fd.FullName()))
+ }
+ b = protowire.AppendString(b, v.String())
+ case protoreflect.BytesKind:
+ b = protowire.AppendBytes(b, v.Bytes())
+ case protoreflect.MessageKind:
+ var pos int
+ var err error
+ b, pos = appendSpeculativeLength(b)
+ b, err = o.marshalMessage(b, v.Message())
+ if err != nil {
+ return b, err
+ }
+ b = finishSpeculativeLength(b, pos)
+ case protoreflect.GroupKind:
+ var err error
+ b, err = o.marshalMessage(b, v.Message())
+ if err != nil {
+ return b, err
+ }
+ b = protowire.AppendVarint(b, protowire.EncodeTag(fd.Number(), protowire.EndGroupType))
+ default:
+ return b, errors.New("invalid kind %v", fd.Kind())
+ }
+ return b, nil
+}
diff --git a/vendor/google.golang.org/protobuf/proto/equal.go b/vendor/google.golang.org/protobuf/proto/equal.go
new file mode 100644
index 000000000..10902bd85
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/proto/equal.go
@@ -0,0 +1,154 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "bytes"
+ "math"
+ "reflect"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// Equal reports whether two messages are equal.
+// If two messages marshal to the same bytes under deterministic serialization,
+// then Equal is guaranteed to report true.
+//
+// Two messages are equal if they belong to the same message descriptor,
+// have the same set of populated known and extension field values,
+// and the same set of unknown fields values. If either of the top-level
+// messages are invalid, then Equal reports true only if both are invalid.
+//
+// Scalar values are compared with the equivalent of the == operator in Go,
+// except bytes values which are compared using bytes.Equal and
+// floating point values which specially treat NaNs as equal.
+// Message values are compared by recursively calling Equal.
+// Lists are equal if each element value is also equal.
+// Maps are equal if they have the same set of keys, where the pair of values
+// for each key is also equal.
+func Equal(x, y Message) bool {
+ if x == nil || y == nil {
+ return x == nil && y == nil
+ }
+ mx := x.ProtoReflect()
+ my := y.ProtoReflect()
+ if mx.IsValid() != my.IsValid() {
+ return false
+ }
+ return equalMessage(mx, my)
+}
+
+// equalMessage compares two messages.
+func equalMessage(mx, my pref.Message) bool {
+ if mx.Descriptor() != my.Descriptor() {
+ return false
+ }
+
+ nx := 0
+ equal := true
+ mx.Range(func(fd pref.FieldDescriptor, vx pref.Value) bool {
+ nx++
+ vy := my.Get(fd)
+ equal = my.Has(fd) && equalField(fd, vx, vy)
+ return equal
+ })
+ if !equal {
+ return false
+ }
+ ny := 0
+ my.Range(func(fd pref.FieldDescriptor, vx pref.Value) bool {
+ ny++
+ return true
+ })
+ if nx != ny {
+ return false
+ }
+
+ return equalUnknown(mx.GetUnknown(), my.GetUnknown())
+}
+
+// equalField compares two fields.
+func equalField(fd pref.FieldDescriptor, x, y pref.Value) bool {
+ switch {
+ case fd.IsList():
+ return equalList(fd, x.List(), y.List())
+ case fd.IsMap():
+ return equalMap(fd, x.Map(), y.Map())
+ default:
+ return equalValue(fd, x, y)
+ }
+}
+
+// equalMap compares two maps.
+func equalMap(fd pref.FieldDescriptor, x, y pref.Map) bool {
+ if x.Len() != y.Len() {
+ return false
+ }
+ equal := true
+ x.Range(func(k pref.MapKey, vx pref.Value) bool {
+ vy := y.Get(k)
+ equal = y.Has(k) && equalValue(fd.MapValue(), vx, vy)
+ return equal
+ })
+ return equal
+}
+
+// equalList compares two lists.
+func equalList(fd pref.FieldDescriptor, x, y pref.List) bool {
+ if x.Len() != y.Len() {
+ return false
+ }
+ for i := x.Len() - 1; i >= 0; i-- {
+ if !equalValue(fd, x.Get(i), y.Get(i)) {
+ return false
+ }
+ }
+ return true
+}
+
+// equalValue compares two singular values.
+func equalValue(fd pref.FieldDescriptor, x, y pref.Value) bool {
+ switch {
+ case fd.Message() != nil:
+ return equalMessage(x.Message(), y.Message())
+ case fd.Kind() == pref.BytesKind:
+ return bytes.Equal(x.Bytes(), y.Bytes())
+ case fd.Kind() == pref.FloatKind, fd.Kind() == pref.DoubleKind:
+ fx := x.Float()
+ fy := y.Float()
+ if math.IsNaN(fx) || math.IsNaN(fy) {
+ return math.IsNaN(fx) && math.IsNaN(fy)
+ }
+ return fx == fy
+ default:
+ return x.Interface() == y.Interface()
+ }
+}
+
+// equalUnknown compares unknown fields by direct comparison on the raw bytes
+// of each individual field number.
+func equalUnknown(x, y pref.RawFields) bool {
+ if len(x) != len(y) {
+ return false
+ }
+ if bytes.Equal([]byte(x), []byte(y)) {
+ return true
+ }
+
+ mx := make(map[pref.FieldNumber]pref.RawFields)
+ my := make(map[pref.FieldNumber]pref.RawFields)
+ for len(x) > 0 {
+ fnum, _, n := protowire.ConsumeField(x)
+ mx[fnum] = append(mx[fnum], x[:n]...)
+ x = x[n:]
+ }
+ for len(y) > 0 {
+ fnum, _, n := protowire.ConsumeField(y)
+ my[fnum] = append(my[fnum], y[:n]...)
+ y = y[n:]
+ }
+ return reflect.DeepEqual(mx, my)
+}
diff --git a/vendor/google.golang.org/protobuf/proto/extension.go b/vendor/google.golang.org/protobuf/proto/extension.go
new file mode 100644
index 000000000..5f293cda8
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/proto/extension.go
@@ -0,0 +1,92 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// HasExtension reports whether an extension field is populated.
+// It returns false if m is invalid or if xt does not extend m.
+func HasExtension(m Message, xt protoreflect.ExtensionType) bool {
+ // Treat nil message interface as an empty message; no populated fields.
+ if m == nil {
+ return false
+ }
+
+ // As a special-case, we reports invalid or mismatching descriptors
+ // as always not being populated (since they aren't).
+ if xt == nil || m.ProtoReflect().Descriptor() != xt.TypeDescriptor().ContainingMessage() {
+ return false
+ }
+
+ return m.ProtoReflect().Has(xt.TypeDescriptor())
+}
+
+// ClearExtension clears an extension field such that subsequent
+// HasExtension calls return false.
+// It panics if m is invalid or if xt does not extend m.
+func ClearExtension(m Message, xt protoreflect.ExtensionType) {
+ m.ProtoReflect().Clear(xt.TypeDescriptor())
+}
+
+// GetExtension retrieves the value for an extension field.
+// If the field is unpopulated, it returns the default value for
+// scalars and an immutable, empty value for lists or messages.
+// It panics if xt does not extend m.
+func GetExtension(m Message, xt protoreflect.ExtensionType) interface{} {
+ // Treat nil message interface as an empty message; return the default.
+ if m == nil {
+ return xt.InterfaceOf(xt.Zero())
+ }
+
+ return xt.InterfaceOf(m.ProtoReflect().Get(xt.TypeDescriptor()))
+}
+
+// SetExtension stores the value of an extension field.
+// It panics if m is invalid, xt does not extend m, or if type of v
+// is invalid for the specified extension field.
+func SetExtension(m Message, xt protoreflect.ExtensionType, v interface{}) {
+ xd := xt.TypeDescriptor()
+ pv := xt.ValueOf(v)
+
+ // Specially treat an invalid list, map, or message as clear.
+ isValid := true
+ switch {
+ case xd.IsList():
+ isValid = pv.List().IsValid()
+ case xd.IsMap():
+ isValid = pv.Map().IsValid()
+ case xd.Message() != nil:
+ isValid = pv.Message().IsValid()
+ }
+ if !isValid {
+ m.ProtoReflect().Clear(xd)
+ return
+ }
+
+ m.ProtoReflect().Set(xd, pv)
+}
+
+// RangeExtensions iterates over every populated extension field in m in an
+// undefined order, calling f for each extension type and value encountered.
+// It returns immediately if f returns false.
+// While iterating, mutating operations may only be performed
+// on the current extension field.
+func RangeExtensions(m Message, f func(protoreflect.ExtensionType, interface{}) bool) {
+ // Treat nil message interface as an empty message; nothing to range over.
+ if m == nil {
+ return
+ }
+
+ m.ProtoReflect().Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+ if fd.IsExtension() {
+ xt := fd.(protoreflect.ExtensionTypeDescriptor).Type()
+ vi := xt.InterfaceOf(v)
+ return f(xt, vi)
+ }
+ return true
+ })
+}
diff --git a/vendor/google.golang.org/protobuf/proto/merge.go b/vendor/google.golang.org/protobuf/proto/merge.go
new file mode 100644
index 000000000..d761ab331
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/proto/merge.go
@@ -0,0 +1,139 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "fmt"
+
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/runtime/protoiface"
+)
+
+// Merge merges src into dst, which must be a message with the same descriptor.
+//
+// Populated scalar fields in src are copied to dst, while populated
+// singular messages in src are merged into dst by recursively calling Merge.
+// The elements of every list field in src is appended to the corresponded
+// list fields in dst. The entries of every map field in src is copied into
+// the corresponding map field in dst, possibly replacing existing entries.
+// The unknown fields of src are appended to the unknown fields of dst.
+//
+// It is semantically equivalent to unmarshaling the encoded form of src
+// into dst with the UnmarshalOptions.Merge option specified.
+func Merge(dst, src Message) {
+ // TODO: Should nil src be treated as semantically equivalent to a
+ // untyped, read-only, empty message? What about a nil dst?
+
+ dstMsg, srcMsg := dst.ProtoReflect(), src.ProtoReflect()
+ if dstMsg.Descriptor() != srcMsg.Descriptor() {
+ if got, want := dstMsg.Descriptor().FullName(), srcMsg.Descriptor().FullName(); got != want {
+ panic(fmt.Sprintf("descriptor mismatch: %v != %v", got, want))
+ }
+ panic("descriptor mismatch")
+ }
+ mergeOptions{}.mergeMessage(dstMsg, srcMsg)
+}
+
+// Clone returns a deep copy of m.
+// If the top-level message is invalid, it returns an invalid message as well.
+func Clone(m Message) Message {
+ // NOTE: Most usages of Clone assume the following properties:
+ // t := reflect.TypeOf(m)
+ // t == reflect.TypeOf(m.ProtoReflect().New().Interface())
+ // t == reflect.TypeOf(m.ProtoReflect().Type().Zero().Interface())
+ //
+ // Embedding protobuf messages breaks this since the parent type will have
+ // a forwarded ProtoReflect method, but the Interface method will return
+ // the underlying embedded message type.
+ if m == nil {
+ return nil
+ }
+ src := m.ProtoReflect()
+ if !src.IsValid() {
+ return src.Type().Zero().Interface()
+ }
+ dst := src.New()
+ mergeOptions{}.mergeMessage(dst, src)
+ return dst.Interface()
+}
+
+// mergeOptions provides a namespace for merge functions, and can be
+// exported in the future if we add user-visible merge options.
+type mergeOptions struct{}
+
+func (o mergeOptions) mergeMessage(dst, src protoreflect.Message) {
+ methods := protoMethods(dst)
+ if methods != nil && methods.Merge != nil {
+ in := protoiface.MergeInput{
+ Destination: dst,
+ Source: src,
+ }
+ out := methods.Merge(in)
+ if out.Flags&protoiface.MergeComplete != 0 {
+ return
+ }
+ }
+
+ if !dst.IsValid() {
+ panic(fmt.Sprintf("cannot merge into invalid %v message", dst.Descriptor().FullName()))
+ }
+
+ src.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+ switch {
+ case fd.IsList():
+ o.mergeList(dst.Mutable(fd).List(), v.List(), fd)
+ case fd.IsMap():
+ o.mergeMap(dst.Mutable(fd).Map(), v.Map(), fd.MapValue())
+ case fd.Message() != nil:
+ o.mergeMessage(dst.Mutable(fd).Message(), v.Message())
+ case fd.Kind() == protoreflect.BytesKind:
+ dst.Set(fd, o.cloneBytes(v))
+ default:
+ dst.Set(fd, v)
+ }
+ return true
+ })
+
+ if len(src.GetUnknown()) > 0 {
+ dst.SetUnknown(append(dst.GetUnknown(), src.GetUnknown()...))
+ }
+}
+
+func (o mergeOptions) mergeList(dst, src protoreflect.List, fd protoreflect.FieldDescriptor) {
+ // Merge semantics appends to the end of the existing list.
+ for i, n := 0, src.Len(); i < n; i++ {
+ switch v := src.Get(i); {
+ case fd.Message() != nil:
+ dstv := dst.NewElement()
+ o.mergeMessage(dstv.Message(), v.Message())
+ dst.Append(dstv)
+ case fd.Kind() == protoreflect.BytesKind:
+ dst.Append(o.cloneBytes(v))
+ default:
+ dst.Append(v)
+ }
+ }
+}
+
+func (o mergeOptions) mergeMap(dst, src protoreflect.Map, fd protoreflect.FieldDescriptor) {
+ // Merge semantics replaces, rather than merges into existing entries.
+ src.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
+ switch {
+ case fd.Message() != nil:
+ dstv := dst.NewValue()
+ o.mergeMessage(dstv.Message(), v.Message())
+ dst.Set(k, dstv)
+ case fd.Kind() == protoreflect.BytesKind:
+ dst.Set(k, o.cloneBytes(v))
+ default:
+ dst.Set(k, v)
+ }
+ return true
+ })
+}
+
+func (o mergeOptions) cloneBytes(v protoreflect.Value) protoreflect.Value {
+ return protoreflect.ValueOfBytes(append([]byte{}, v.Bytes()...))
+}
diff --git a/vendor/google.golang.org/protobuf/proto/messageset.go b/vendor/google.golang.org/protobuf/proto/messageset.go
new file mode 100644
index 000000000..b6b3de591
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/proto/messageset.go
@@ -0,0 +1,88 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/encoding/messageset"
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/internal/flags"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+)
+
+func sizeMessageSet(m protoreflect.Message) (size int) {
+ m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+ size += messageset.SizeField(fd.Number())
+ size += protowire.SizeTag(messageset.FieldMessage)
+ size += protowire.SizeBytes(sizeMessage(v.Message()))
+ return true
+ })
+ size += messageset.SizeUnknown(m.GetUnknown())
+ return size
+}
+
+func marshalMessageSet(b []byte, m protoreflect.Message, o MarshalOptions) ([]byte, error) {
+ if !flags.ProtoLegacy {
+ return b, errors.New("no support for message_set_wire_format")
+ }
+ var err error
+ o.rangeFields(m, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+ b, err = marshalMessageSetField(b, fd, v, o)
+ return err == nil
+ })
+ if err != nil {
+ return b, err
+ }
+ return messageset.AppendUnknown(b, m.GetUnknown())
+}
+
+func marshalMessageSetField(b []byte, fd protoreflect.FieldDescriptor, value protoreflect.Value, o MarshalOptions) ([]byte, error) {
+ b = messageset.AppendFieldStart(b, fd.Number())
+ b = protowire.AppendTag(b, messageset.FieldMessage, protowire.BytesType)
+ b = protowire.AppendVarint(b, uint64(o.Size(value.Message().Interface())))
+ b, err := o.marshalMessage(b, value.Message())
+ if err != nil {
+ return b, err
+ }
+ b = messageset.AppendFieldEnd(b)
+ return b, nil
+}
+
+func unmarshalMessageSet(b []byte, m protoreflect.Message, o UnmarshalOptions) error {
+ if !flags.ProtoLegacy {
+ return errors.New("no support for message_set_wire_format")
+ }
+ return messageset.Unmarshal(b, false, func(num protowire.Number, v []byte) error {
+ err := unmarshalMessageSetField(m, num, v, o)
+ if err == errUnknown {
+ unknown := m.GetUnknown()
+ unknown = protowire.AppendTag(unknown, num, protowire.BytesType)
+ unknown = protowire.AppendBytes(unknown, v)
+ m.SetUnknown(unknown)
+ return nil
+ }
+ return err
+ })
+}
+
+func unmarshalMessageSetField(m protoreflect.Message, num protowire.Number, v []byte, o UnmarshalOptions) error {
+ md := m.Descriptor()
+ if !md.ExtensionRanges().Has(num) {
+ return errUnknown
+ }
+ xt, err := o.Resolver.FindExtensionByNumber(md.FullName(), num)
+ if err == protoregistry.NotFound {
+ return errUnknown
+ }
+ if err != nil {
+ return errors.New("%v: unable to resolve extension %v: %v", md.FullName(), num, err)
+ }
+ xd := xt.TypeDescriptor()
+ if err := o.unmarshalMessage(v, m.Mutable(xd).Message()); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/google.golang.org/protobuf/proto/proto.go b/vendor/google.golang.org/protobuf/proto/proto.go
new file mode 100644
index 000000000..ca14b09c3
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/proto/proto.go
@@ -0,0 +1,34 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// Message is the top-level interface that all messages must implement.
+// It provides access to a reflective view of a message.
+// Any implementation of this interface may be used with all functions in the
+// protobuf module that accept a Message, except where otherwise specified.
+//
+// This is the v2 interface definition for protobuf messages.
+// The v1 interface definition is "github.com/golang/protobuf/proto".Message.
+//
+// To convert a v1 message to a v2 message,
+// use "github.com/golang/protobuf/proto".MessageV2.
+// To convert a v2 message to a v1 message,
+// use "github.com/golang/protobuf/proto".MessageV1.
+type Message = protoreflect.ProtoMessage
+
+// Error matches all errors produced by packages in the protobuf module.
+//
+// That is, errors.Is(err, Error) reports whether an error is produced
+// by this module.
+var Error error
+
+func init() {
+ Error = errors.Error
+}
diff --git a/vendor/google.golang.org/protobuf/proto/proto_methods.go b/vendor/google.golang.org/protobuf/proto/proto_methods.go
new file mode 100644
index 000000000..d8dd604f6
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/proto/proto_methods.go
@@ -0,0 +1,19 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// The protoreflect build tag disables use of fast-path methods.
+// +build !protoreflect
+
+package proto
+
+import (
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/runtime/protoiface"
+)
+
+const hasProtoMethods = true
+
+func protoMethods(m protoreflect.Message) *protoiface.Methods {
+ return m.ProtoMethods()
+}
diff --git a/vendor/google.golang.org/protobuf/proto/proto_reflect.go b/vendor/google.golang.org/protobuf/proto/proto_reflect.go
new file mode 100644
index 000000000..b103d4320
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/proto/proto_reflect.go
@@ -0,0 +1,19 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// The protoreflect build tag disables use of fast-path methods.
+// +build protoreflect
+
+package proto
+
+import (
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/runtime/protoiface"
+)
+
+const hasProtoMethods = false
+
+func protoMethods(m protoreflect.Message) *protoiface.Methods {
+ return nil
+}
diff --git a/vendor/google.golang.org/protobuf/proto/reset.go b/vendor/google.golang.org/protobuf/proto/reset.go
new file mode 100644
index 000000000..3d7f89436
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/proto/reset.go
@@ -0,0 +1,43 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "fmt"
+
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// Reset clears every field in the message.
+// The resulting message shares no observable memory with its previous state
+// other than the memory for the message itself.
+func Reset(m Message) {
+ if mr, ok := m.(interface{ Reset() }); ok && hasProtoMethods {
+ mr.Reset()
+ return
+ }
+ resetMessage(m.ProtoReflect())
+}
+
+func resetMessage(m protoreflect.Message) {
+ if !m.IsValid() {
+ panic(fmt.Sprintf("cannot reset invalid %v message", m.Descriptor().FullName()))
+ }
+
+ // Clear all known fields.
+ fds := m.Descriptor().Fields()
+ for i := 0; i < fds.Len(); i++ {
+ m.Clear(fds.Get(i))
+ }
+
+ // Clear extension fields.
+ m.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
+ m.Clear(fd)
+ return true
+ })
+
+ // Clear unknown fields.
+ m.SetUnknown(nil)
+}
diff --git a/vendor/google.golang.org/protobuf/proto/size.go b/vendor/google.golang.org/protobuf/proto/size.go
new file mode 100644
index 000000000..11ba84146
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/proto/size.go
@@ -0,0 +1,94 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/encoding/messageset"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/runtime/protoiface"
+)
+
+// Size returns the size in bytes of the wire-format encoding of m.
+func Size(m Message) int {
+ return MarshalOptions{}.Size(m)
+}
+
+// Size returns the size in bytes of the wire-format encoding of m.
+func (o MarshalOptions) Size(m Message) int {
+ // Treat a nil message interface as an empty message; nothing to output.
+ if m == nil {
+ return 0
+ }
+
+ return sizeMessage(m.ProtoReflect())
+}
+
+func sizeMessage(m protoreflect.Message) (size int) {
+ methods := protoMethods(m)
+ if methods != nil && methods.Size != nil {
+ out := methods.Size(protoiface.SizeInput{
+ Message: m,
+ })
+ return out.Size
+ }
+ if methods != nil && methods.Marshal != nil {
+ // This is not efficient, but we don't have any choice.
+ // This case is mainly used for legacy types with a Marshal method.
+ out, _ := methods.Marshal(protoiface.MarshalInput{
+ Message: m,
+ })
+ return len(out.Buf)
+ }
+ return sizeMessageSlow(m)
+}
+
+func sizeMessageSlow(m protoreflect.Message) (size int) {
+ if messageset.IsMessageSet(m.Descriptor()) {
+ return sizeMessageSet(m)
+ }
+ m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+ size += sizeField(fd, v)
+ return true
+ })
+ size += len(m.GetUnknown())
+ return size
+}
+
+func sizeField(fd protoreflect.FieldDescriptor, value protoreflect.Value) (size int) {
+ num := fd.Number()
+ switch {
+ case fd.IsList():
+ return sizeList(num, fd, value.List())
+ case fd.IsMap():
+ return sizeMap(num, fd, value.Map())
+ default:
+ return protowire.SizeTag(num) + sizeSingular(num, fd.Kind(), value)
+ }
+}
+
+func sizeList(num protowire.Number, fd protoreflect.FieldDescriptor, list protoreflect.List) (size int) {
+ if fd.IsPacked() && list.Len() > 0 {
+ content := 0
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ content += sizeSingular(num, fd.Kind(), list.Get(i))
+ }
+ return protowire.SizeTag(num) + protowire.SizeBytes(content)
+ }
+
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ size += protowire.SizeTag(num) + sizeSingular(num, fd.Kind(), list.Get(i))
+ }
+ return size
+}
+
+func sizeMap(num protowire.Number, fd protoreflect.FieldDescriptor, mapv protoreflect.Map) (size int) {
+ mapv.Range(func(key protoreflect.MapKey, value protoreflect.Value) bool {
+ size += protowire.SizeTag(num)
+ size += protowire.SizeBytes(sizeField(fd.MapKey(), key.Value()) + sizeField(fd.MapValue(), value))
+ return true
+ })
+ return size
+}
diff --git a/vendor/google.golang.org/protobuf/proto/size_gen.go b/vendor/google.golang.org/protobuf/proto/size_gen.go
new file mode 100644
index 000000000..1118460f6
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/proto/size_gen.go
@@ -0,0 +1,55 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-types. DO NOT EDIT.
+
+package proto
+
+import (
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+func sizeSingular(num protowire.Number, kind protoreflect.Kind, v protoreflect.Value) int {
+ switch kind {
+ case protoreflect.BoolKind:
+ return protowire.SizeVarint(protowire.EncodeBool(v.Bool()))
+ case protoreflect.EnumKind:
+ return protowire.SizeVarint(uint64(v.Enum()))
+ case protoreflect.Int32Kind:
+ return protowire.SizeVarint(uint64(int32(v.Int())))
+ case protoreflect.Sint32Kind:
+ return protowire.SizeVarint(protowire.EncodeZigZag(int64(int32(v.Int()))))
+ case protoreflect.Uint32Kind:
+ return protowire.SizeVarint(uint64(uint32(v.Uint())))
+ case protoreflect.Int64Kind:
+ return protowire.SizeVarint(uint64(v.Int()))
+ case protoreflect.Sint64Kind:
+ return protowire.SizeVarint(protowire.EncodeZigZag(v.Int()))
+ case protoreflect.Uint64Kind:
+ return protowire.SizeVarint(v.Uint())
+ case protoreflect.Sfixed32Kind:
+ return protowire.SizeFixed32()
+ case protoreflect.Fixed32Kind:
+ return protowire.SizeFixed32()
+ case protoreflect.FloatKind:
+ return protowire.SizeFixed32()
+ case protoreflect.Sfixed64Kind:
+ return protowire.SizeFixed64()
+ case protoreflect.Fixed64Kind:
+ return protowire.SizeFixed64()
+ case protoreflect.DoubleKind:
+ return protowire.SizeFixed64()
+ case protoreflect.StringKind:
+ return protowire.SizeBytes(len(v.String()))
+ case protoreflect.BytesKind:
+ return protowire.SizeBytes(len(v.Bytes()))
+ case protoreflect.MessageKind:
+ return protowire.SizeBytes(sizeMessage(v.Message()))
+ case protoreflect.GroupKind:
+ return protowire.SizeGroup(num, sizeMessage(v.Message()))
+ default:
+ return 0
+ }
+}
diff --git a/vendor/google.golang.org/protobuf/proto/wrappers.go b/vendor/google.golang.org/protobuf/proto/wrappers.go
new file mode 100644
index 000000000..653b12c3a
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/proto/wrappers.go
@@ -0,0 +1,29 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+// Bool stores v in a new bool value and returns a pointer to it.
+func Bool(v bool) *bool { return &v }
+
+// Int32 stores v in a new int32 value and returns a pointer to it.
+func Int32(v int32) *int32 { return &v }
+
+// Int64 stores v in a new int64 value and returns a pointer to it.
+func Int64(v int64) *int64 { return &v }
+
+// Float32 stores v in a new float32 value and returns a pointer to it.
+func Float32(v float32) *float32 { return &v }
+
+// Float64 stores v in a new float64 value and returns a pointer to it.
+func Float64(v float64) *float64 { return &v }
+
+// Uint32 stores v in a new uint32 value and returns a pointer to it.
+func Uint32(v uint32) *uint32 { return &v }
+
+// Uint64 stores v in a new uint64 value and returns a pointer to it.
+func Uint64(v uint64) *uint64 { return &v }
+
+// String stores v in a new string value and returns a pointer to it.
+func String(v string) *string { return &v }
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go
new file mode 100644
index 000000000..6be5d16e9
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go
@@ -0,0 +1,77 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package protoreflect
+
+import (
+ "google.golang.org/protobuf/internal/pragma"
+)
+
+// The following types are used by the fast-path Message.ProtoMethods method.
+//
+// To avoid polluting the public protoreflect API with types used only by
+// low-level implementations, the canonical definitions of these types are
+// in the runtime/protoiface package. The definitions here and in protoiface
+// must be kept in sync.
+type (
+ methods = struct {
+ pragma.NoUnkeyedLiterals
+ Flags supportFlags
+ Size func(sizeInput) sizeOutput
+ Marshal func(marshalInput) (marshalOutput, error)
+ Unmarshal func(unmarshalInput) (unmarshalOutput, error)
+ Merge func(mergeInput) mergeOutput
+ CheckInitialized func(checkInitializedInput) (checkInitializedOutput, error)
+ }
+ supportFlags = uint64
+ sizeInput = struct {
+ pragma.NoUnkeyedLiterals
+ Message Message
+ Flags uint8
+ }
+ sizeOutput = struct {
+ pragma.NoUnkeyedLiterals
+ Size int
+ }
+ marshalInput = struct {
+ pragma.NoUnkeyedLiterals
+ Message Message
+ Buf []byte
+ Flags uint8
+ }
+ marshalOutput = struct {
+ pragma.NoUnkeyedLiterals
+ Buf []byte
+ }
+ unmarshalInput = struct {
+ pragma.NoUnkeyedLiterals
+ Message Message
+ Buf []byte
+ Flags uint8
+ Resolver interface {
+ FindExtensionByName(field FullName) (ExtensionType, error)
+ FindExtensionByNumber(message FullName, field FieldNumber) (ExtensionType, error)
+ }
+ }
+ unmarshalOutput = struct {
+ pragma.NoUnkeyedLiterals
+ Flags uint8
+ }
+ mergeInput = struct {
+ pragma.NoUnkeyedLiterals
+ Source Message
+ Destination Message
+ }
+ mergeOutput = struct {
+ pragma.NoUnkeyedLiterals
+ Flags uint8
+ }
+ checkInitializedInput = struct {
+ pragma.NoUnkeyedLiterals
+ Message Message
+ }
+ checkInitializedOutput = struct {
+ pragma.NoUnkeyedLiterals
+ }
+)
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go
new file mode 100644
index 000000000..b669a4e76
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go
@@ -0,0 +1,478 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package protoreflect provides interfaces to dynamically manipulate messages.
+//
+// This package includes type descriptors which describe the structure of types
+// defined in proto source files and value interfaces which provide the
+// ability to examine and manipulate the contents of messages.
+//
+//
+// Protocol Buffer Descriptors
+//
+// Protobuf descriptors (e.g., EnumDescriptor or MessageDescriptor)
+// are immutable objects that represent protobuf type information.
+// They are wrappers around the messages declared in descriptor.proto.
+// Protobuf descriptors alone lack any information regarding Go types.
+//
+// Enums and messages generated by this module implement Enum and ProtoMessage,
+// where the Descriptor and ProtoReflect.Descriptor accessors respectively
+// return the protobuf descriptor for the values.
+//
+// The protobuf descriptor interfaces are not meant to be implemented by
+// user code since they might need to be extended in the future to support
+// additions to the protobuf language.
+// The "google.golang.org/protobuf/reflect/protodesc" package converts between
+// google.protobuf.DescriptorProto messages and protobuf descriptors.
+//
+//
+// Go Type Descriptors
+//
+// A type descriptor (e.g., EnumType or MessageType) is a constructor for
+// a concrete Go type that represents the associated protobuf descriptor.
+// There is commonly a one-to-one relationship between protobuf descriptors and
+// Go type descriptors, but it can potentially be a one-to-many relationship.
+//
+// Enums and messages generated by this module implement Enum and ProtoMessage,
+// where the Type and ProtoReflect.Type accessors respectively
+// return the protobuf descriptor for the values.
+//
+// The "google.golang.org/protobuf/types/dynamicpb" package can be used to
+// create Go type descriptors from protobuf descriptors.
+//
+//
+// Value Interfaces
+//
+// The Enum and Message interfaces provide a reflective view over an
+// enum or message instance. For enums, it provides the ability to retrieve
+// the enum value number for any concrete enum type. For messages, it provides
+// the ability to access or manipulate fields of the message.
+//
+// To convert a proto.Message to a protoreflect.Message, use the
+// former's ProtoReflect method. Since the ProtoReflect method is new to the
+// v2 message interface, it may not be present on older message implementations.
+// The "github.com/golang/protobuf/proto".MessageReflect function can be used
+// to obtain a reflective view on older messages.
+//
+//
+// Relationships
+//
+// The following diagrams demonstrate the relationships between
+// various types declared in this package.
+//
+//
+// ┌───────────────────────────────────┐
+// V │
+// ┌────────────── New(n) ─────────────┐ │
+// │ │ │
+// │ ┌──── Descriptor() ──┐ │ ┌── Number() ──┐ │
+// │ │ V V │ V │
+// ╔════════════╗ ╔════════════════╗ ╔════════╗ ╔════════════╗
+// ║ EnumType ║ ║ EnumDescriptor ║ ║ Enum ║ ║ EnumNumber ║
+// ╚════════════╝ ╚════════════════╝ ╚════════╝ ╚════════════╝
+// Λ Λ │ │
+// │ └─── Descriptor() ──┘ │
+// │ │
+// └────────────────── Type() ───────┘
+//
+// • An EnumType describes a concrete Go enum type.
+// It has an EnumDescriptor and can construct an Enum instance.
+//
+// • An EnumDescriptor describes an abstract protobuf enum type.
+//
+// • An Enum is a concrete enum instance. Generated enums implement Enum.
+//
+//
+// ┌──────────────── New() ─────────────────┐
+// │ │
+// │ ┌─── Descriptor() ─────┐ │ ┌── Interface() ───┐
+// │ │ V V │ V
+// ╔═════════════╗ ╔═══════════════════╗ ╔═════════╗ ╔══════════════╗
+// ║ MessageType ║ ║ MessageDescriptor ║ ║ Message ║ ║ ProtoMessage ║
+// ╚═════════════╝ ╚═══════════════════╝ ╚═════════╝ ╚══════════════╝
+// Λ Λ │ │ Λ │
+// │ └──── Descriptor() ────┘ │ └─ ProtoReflect() ─┘
+// │ │
+// └─────────────────── Type() ─────────┘
+//
+// • A MessageType describes a concrete Go message type.
+// It has a MessageDescriptor and can construct a Message instance.
+//
+// • A MessageDescriptor describes an abstract protobuf message type.
+//
+// • A Message is a concrete message instance. Generated messages implement
+// ProtoMessage, which can convert to/from a Message.
+//
+//
+// ┌── TypeDescriptor() ──┐ ┌───── Descriptor() ─────┐
+// │ V │ V
+// ╔═══════════════╗ ╔═════════════════════════╗ ╔═════════════════════╗
+// ║ ExtensionType ║ ║ ExtensionTypeDescriptor ║ ║ ExtensionDescriptor ║
+// ╚═══════════════╝ ╚═════════════════════════╝ ╚═════════════════════╝
+// Λ │ │ Λ │ Λ
+// └─────── Type() ───────┘ │ └─── may implement ────┘ │
+// │ │
+// └────── implements ────────┘
+//
+// • An ExtensionType describes a concrete Go implementation of an extension.
+// It has an ExtensionTypeDescriptor and can convert to/from
+// abstract Values and Go values.
+//
+// • An ExtensionTypeDescriptor is an ExtensionDescriptor
+// which also has an ExtensionType.
+//
+// • An ExtensionDescriptor describes an abstract protobuf extension field and
+// may not always be an ExtensionTypeDescriptor.
+package protoreflect
+
+import (
+ "fmt"
+ "regexp"
+ "strings"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/pragma"
+)
+
+type doNotImplement pragma.DoNotImplement
+
+// ProtoMessage is the top-level interface that all proto messages implement.
+// This is declared in the protoreflect package to avoid a cyclic dependency;
+// use the proto.Message type instead, which aliases this type.
+type ProtoMessage interface{ ProtoReflect() Message }
+
+// Syntax is the language version of the proto file.
+type Syntax syntax
+
+type syntax int8 // keep exact type opaque as the int type may change
+
+const (
+ Proto2 Syntax = 2
+ Proto3 Syntax = 3
+)
+
+// IsValid reports whether the syntax is valid.
+func (s Syntax) IsValid() bool {
+ switch s {
+ case Proto2, Proto3:
+ return true
+ default:
+ return false
+ }
+}
+
+// String returns s as a proto source identifier (e.g., "proto2").
+func (s Syntax) String() string {
+ switch s {
+ case Proto2:
+ return "proto2"
+ case Proto3:
+ return "proto3"
+ default:
+ return fmt.Sprintf("<unknown:%d>", s)
+ }
+}
+
+// GoString returns s as a Go source identifier (e.g., "Proto2").
+func (s Syntax) GoString() string {
+ switch s {
+ case Proto2:
+ return "Proto2"
+ case Proto3:
+ return "Proto3"
+ default:
+ return fmt.Sprintf("Syntax(%d)", s)
+ }
+}
+
+// Cardinality determines whether a field is optional, required, or repeated.
+type Cardinality cardinality
+
+type cardinality int8 // keep exact type opaque as the int type may change
+
+// Constants as defined by the google.protobuf.Cardinality enumeration.
+const (
+ Optional Cardinality = 1 // appears zero or one times
+ Required Cardinality = 2 // appears exactly one time; invalid with Proto3
+ Repeated Cardinality = 3 // appears zero or more times
+)
+
+// IsValid reports whether the cardinality is valid.
+func (c Cardinality) IsValid() bool {
+ switch c {
+ case Optional, Required, Repeated:
+ return true
+ default:
+ return false
+ }
+}
+
+// String returns c as a proto source identifier (e.g., "optional").
+func (c Cardinality) String() string {
+ switch c {
+ case Optional:
+ return "optional"
+ case Required:
+ return "required"
+ case Repeated:
+ return "repeated"
+ default:
+ return fmt.Sprintf("<unknown:%d>", c)
+ }
+}
+
+// GoString returns c as a Go source identifier (e.g., "Optional").
+func (c Cardinality) GoString() string {
+ switch c {
+ case Optional:
+ return "Optional"
+ case Required:
+ return "Required"
+ case Repeated:
+ return "Repeated"
+ default:
+ return fmt.Sprintf("Cardinality(%d)", c)
+ }
+}
+
+// Kind indicates the basic proto kind of a field.
+type Kind kind
+
+type kind int8 // keep exact type opaque as the int type may change
+
+// Constants as defined by the google.protobuf.Field.Kind enumeration.
+const (
+ BoolKind Kind = 8
+ EnumKind Kind = 14
+ Int32Kind Kind = 5
+ Sint32Kind Kind = 17
+ Uint32Kind Kind = 13
+ Int64Kind Kind = 3
+ Sint64Kind Kind = 18
+ Uint64Kind Kind = 4
+ Sfixed32Kind Kind = 15
+ Fixed32Kind Kind = 7
+ FloatKind Kind = 2
+ Sfixed64Kind Kind = 16
+ Fixed64Kind Kind = 6
+ DoubleKind Kind = 1
+ StringKind Kind = 9
+ BytesKind Kind = 12
+ MessageKind Kind = 11
+ GroupKind Kind = 10
+)
+
+// IsValid reports whether the kind is valid.
+func (k Kind) IsValid() bool {
+ switch k {
+ case BoolKind, EnumKind,
+ Int32Kind, Sint32Kind, Uint32Kind,
+ Int64Kind, Sint64Kind, Uint64Kind,
+ Sfixed32Kind, Fixed32Kind, FloatKind,
+ Sfixed64Kind, Fixed64Kind, DoubleKind,
+ StringKind, BytesKind, MessageKind, GroupKind:
+ return true
+ default:
+ return false
+ }
+}
+
+// String returns k as a proto source identifier (e.g., "bool").
+func (k Kind) String() string {
+ switch k {
+ case BoolKind:
+ return "bool"
+ case EnumKind:
+ return "enum"
+ case Int32Kind:
+ return "int32"
+ case Sint32Kind:
+ return "sint32"
+ case Uint32Kind:
+ return "uint32"
+ case Int64Kind:
+ return "int64"
+ case Sint64Kind:
+ return "sint64"
+ case Uint64Kind:
+ return "uint64"
+ case Sfixed32Kind:
+ return "sfixed32"
+ case Fixed32Kind:
+ return "fixed32"
+ case FloatKind:
+ return "float"
+ case Sfixed64Kind:
+ return "sfixed64"
+ case Fixed64Kind:
+ return "fixed64"
+ case DoubleKind:
+ return "double"
+ case StringKind:
+ return "string"
+ case BytesKind:
+ return "bytes"
+ case MessageKind:
+ return "message"
+ case GroupKind:
+ return "group"
+ default:
+ return fmt.Sprintf("<unknown:%d>", k)
+ }
+}
+
+// GoString returns k as a Go source identifier (e.g., "BoolKind").
+func (k Kind) GoString() string {
+ switch k {
+ case BoolKind:
+ return "BoolKind"
+ case EnumKind:
+ return "EnumKind"
+ case Int32Kind:
+ return "Int32Kind"
+ case Sint32Kind:
+ return "Sint32Kind"
+ case Uint32Kind:
+ return "Uint32Kind"
+ case Int64Kind:
+ return "Int64Kind"
+ case Sint64Kind:
+ return "Sint64Kind"
+ case Uint64Kind:
+ return "Uint64Kind"
+ case Sfixed32Kind:
+ return "Sfixed32Kind"
+ case Fixed32Kind:
+ return "Fixed32Kind"
+ case FloatKind:
+ return "FloatKind"
+ case Sfixed64Kind:
+ return "Sfixed64Kind"
+ case Fixed64Kind:
+ return "Fixed64Kind"
+ case DoubleKind:
+ return "DoubleKind"
+ case StringKind:
+ return "StringKind"
+ case BytesKind:
+ return "BytesKind"
+ case MessageKind:
+ return "MessageKind"
+ case GroupKind:
+ return "GroupKind"
+ default:
+ return fmt.Sprintf("Kind(%d)", k)
+ }
+}
+
+// FieldNumber is the field number in a message.
+type FieldNumber = protowire.Number
+
+// FieldNumbers represent a list of field numbers.
+type FieldNumbers interface {
+ // Len reports the number of fields in the list.
+ Len() int
+ // Get returns the ith field number. It panics if out of bounds.
+ Get(i int) FieldNumber
+ // Has reports whether n is within the list of fields.
+ Has(n FieldNumber) bool
+
+ doNotImplement
+}
+
+// FieldRanges represent a list of field number ranges.
+type FieldRanges interface {
+ // Len reports the number of ranges in the list.
+ Len() int
+ // Get returns the ith range. It panics if out of bounds.
+ Get(i int) [2]FieldNumber // start inclusive; end exclusive
+ // Has reports whether n is within any of the ranges.
+ Has(n FieldNumber) bool
+
+ doNotImplement
+}
+
+// EnumNumber is the numeric value for an enum.
+type EnumNumber int32
+
+// EnumRanges represent a list of enum number ranges.
+type EnumRanges interface {
+ // Len reports the number of ranges in the list.
+ Len() int
+ // Get returns the ith range. It panics if out of bounds.
+ Get(i int) [2]EnumNumber // start inclusive; end inclusive
+ // Has reports whether n is within any of the ranges.
+ Has(n EnumNumber) bool
+
+ doNotImplement
+}
+
+var (
+ regexName = regexp.MustCompile(`^[_a-zA-Z][_a-zA-Z0-9]*$`)
+ regexFullName = regexp.MustCompile(`^[_a-zA-Z][_a-zA-Z0-9]*(\.[_a-zA-Z][_a-zA-Z0-9]*)*$`)
+)
+
+// Name is the short name for a proto declaration. This is not the name
+// as used in Go source code, which might not be identical to the proto name.
+type Name string // e.g., "Kind"
+
+// IsValid reports whether n is a syntactically valid name.
+// An empty name is invalid.
+func (n Name) IsValid() bool {
+ return regexName.MatchString(string(n))
+}
+
+// Names represent a list of names.
+type Names interface {
+ // Len reports the number of names in the list.
+ Len() int
+ // Get returns the ith name. It panics if out of bounds.
+ Get(i int) Name
+ // Has reports whether s matches any names in the list.
+ Has(s Name) bool
+
+ doNotImplement
+}
+
+// FullName is a qualified name that uniquely identifies a proto declaration.
+// A qualified name is the concatenation of the proto package along with the
+// fully-declared name (i.e., name of parent preceding the name of the child),
+// with a '.' delimiter placed between each Name.
+//
+// This should not have any leading or trailing dots.
+type FullName string // e.g., "google.protobuf.Field.Kind"
+
+// IsValid reports whether n is a syntactically valid full name.
+// An empty full name is invalid.
+func (n FullName) IsValid() bool {
+ return regexFullName.MatchString(string(n))
+}
+
+// Name returns the short name, which is the last identifier segment.
+// A single segment FullName is the Name itself.
+func (n FullName) Name() Name {
+ if i := strings.LastIndexByte(string(n), '.'); i >= 0 {
+ return Name(n[i+1:])
+ }
+ return Name(n)
+}
+
+// Parent returns the full name with the trailing identifier removed.
+// A single segment FullName has no parent.
+func (n FullName) Parent() FullName {
+ if i := strings.LastIndexByte(string(n), '.'); i >= 0 {
+ return n[:i]
+ }
+ return ""
+}
+
+// Append returns the qualified name appended with the provided short name.
+//
+// Invariant: n == n.Parent().Append(n.Name()) // assuming n is valid
+func (n FullName) Append(s Name) FullName {
+ if n == "" {
+ return FullName(s)
+ }
+ return n + "." + FullName(s)
+}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go
new file mode 100644
index 000000000..32ea3d98c
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go
@@ -0,0 +1,52 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package protoreflect
+
+// SourceLocations is a list of source locations.
+type SourceLocations interface {
+ // Len reports the number of source locations in the proto file.
+ Len() int
+ // Get returns the ith SourceLocation. It panics if out of bounds.
+ Get(int) SourceLocation
+
+ doNotImplement
+
+ // TODO: Add ByPath and ByDescriptor helper methods.
+}
+
+// SourceLocation describes a source location and
+// corresponds with the google.protobuf.SourceCodeInfo.Location message.
+type SourceLocation struct {
+ // Path is the path to the declaration from the root file descriptor.
+ // The contents of this slice must not be mutated.
+ Path SourcePath
+
+ // StartLine and StartColumn are the zero-indexed starting location
+ // in the source file for the declaration.
+ StartLine, StartColumn int
+ // EndLine and EndColumn are the zero-indexed ending location
+ // in the source file for the declaration.
+ // In the descriptor.proto, the end line may be omitted if it is identical
+ // to the start line. Here, it is always populated.
+ EndLine, EndColumn int
+
+ // LeadingDetachedComments are the leading detached comments
+ // for the declaration. The contents of this slice must not be mutated.
+ LeadingDetachedComments []string
+ // LeadingComments is the leading attached comment for the declaration.
+ LeadingComments string
+ // TrailingComments is the trailing attached comment for the declaration.
+ TrailingComments string
+}
+
+// SourcePath identifies part of a file descriptor for a source location.
+// The SourcePath is a sequence of either field numbers or indexes into
+// a repeated field that form a path starting from the root file descriptor.
+//
+// See google.protobuf.SourceCodeInfo.Location.path.
+type SourcePath []int32
+
+// TODO: Add SourcePath.String method to pretty-print the path. For example:
+// ".message_type[6].nested_type[15].field[3]"
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go
new file mode 100644
index 000000000..5be14a725
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go
@@ -0,0 +1,631 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package protoreflect
+
+// Descriptor provides a set of accessors that are common to every descriptor.
+// Each descriptor type wraps the equivalent google.protobuf.XXXDescriptorProto,
+// but provides efficient lookup and immutability.
+//
+// Each descriptor is comparable. Equality implies that the two types are
+// exactly identical. However, it is possible for the same semantically
+// identical proto type to be represented by multiple type descriptors.
+//
+// For example, suppose we have t1 and t2 which are both MessageDescriptors.
+// If t1 == t2, then the types are definitely equal and all accessors return
+// the same information. However, if t1 != t2, then it is still possible that
+// they still represent the same proto type (e.g., t1.FullName == t2.FullName).
+// This can occur if a descriptor type is created dynamically, or multiple
+// versions of the same proto type are accidentally linked into the Go binary.
+type Descriptor interface {
+ // ParentFile returns the parent file descriptor that this descriptor
+ // is declared within. The parent file for the file descriptor is itself.
+ //
+ // Support for this functionality is optional and may return nil.
+ ParentFile() FileDescriptor
+
+ // Parent returns the parent containing this descriptor declaration.
+ // The following shows the mapping from child type to possible parent types:
+ //
+ // ╔═════════════════════╤═══════════════════════════════════╗
+ // ║ Child type │ Possible parent types ║
+ // ╠═════════════════════╪═══════════════════════════════════╣
+ // ║ FileDescriptor │ nil ║
+ // ║ MessageDescriptor │ FileDescriptor, MessageDescriptor ║
+ // ║ FieldDescriptor │ FileDescriptor, MessageDescriptor ║
+ // ║ OneofDescriptor │ MessageDescriptor ║
+ // ║ EnumDescriptor │ FileDescriptor, MessageDescriptor ║
+ // ║ EnumValueDescriptor │ EnumDescriptor ║
+ // ║ ServiceDescriptor │ FileDescriptor ║
+ // ║ MethodDescriptor │ ServiceDescriptor ║
+ // ╚═════════════════════╧═══════════════════════════════════╝
+ //
+ // Support for this functionality is optional and may return nil.
+ Parent() Descriptor
+
+ // Index returns the index of this descriptor within its parent.
+ // It returns 0 if the descriptor does not have a parent or if the parent
+ // is unknown.
+ Index() int
+
+ // Syntax is the protobuf syntax.
+ Syntax() Syntax // e.g., Proto2 or Proto3
+
+ // Name is the short name of the declaration (i.e., FullName.Name).
+ Name() Name // e.g., "Any"
+
+ // FullName is the fully-qualified name of the declaration.
+ //
+ // The FullName is a concatenation of the full name of the type that this
+ // type is declared within and the declaration name. For example,
+ // field "foo_field" in message "proto.package.MyMessage" is
+ // uniquely identified as "proto.package.MyMessage.foo_field".
+ // Enum values are an exception to the rule (see EnumValueDescriptor).
+ FullName() FullName // e.g., "google.protobuf.Any"
+
+ // IsPlaceholder reports whether type information is missing since a
+ // dependency is not resolved, in which case only name information is known.
+ //
+ // Placeholder types may only be returned by the following accessors
+ // as a result of unresolved dependencies or weak imports:
+ //
+ // ╔═══════════════════════════════════╤═════════════════════╗
+ // ║ Accessor │ Descriptor ║
+ // ╠═══════════════════════════════════╪═════════════════════╣
+ // ║ FileImports.FileDescriptor │ FileDescriptor ║
+ // ║ FieldDescriptor.Enum │ EnumDescriptor ║
+ // ║ FieldDescriptor.Message │ MessageDescriptor ║
+ // ║ FieldDescriptor.DefaultEnumValue │ EnumValueDescriptor ║
+ // ║ FieldDescriptor.ContainingMessage │ MessageDescriptor ║
+ // ║ MethodDescriptor.Input │ MessageDescriptor ║
+ // ║ MethodDescriptor.Output │ MessageDescriptor ║
+ // ╚═══════════════════════════════════╧═════════════════════╝
+ //
+ // If true, only Name and FullName are valid.
+ // For FileDescriptor, the Path is also valid.
+ IsPlaceholder() bool
+
+ // Options returns the descriptor options. The caller must not modify
+ // the returned value.
+ //
+ // To avoid a dependency cycle, this function returns a proto.Message value.
+ // The proto message type returned for each descriptor type is as follows:
+ // ╔═════════════════════╤══════════════════════════════════════════╗
+ // ║ Go type │ Protobuf message type ║
+ // ╠═════════════════════╪══════════════════════════════════════════╣
+ // ║ FileDescriptor │ google.protobuf.FileOptions ║
+ // ║ EnumDescriptor │ google.protobuf.EnumOptions ║
+ // ║ EnumValueDescriptor │ google.protobuf.EnumValueOptions ║
+ // ║ MessageDescriptor │ google.protobuf.MessageOptions ║
+ // ║ FieldDescriptor │ google.protobuf.FieldOptions ║
+ // ║ OneofDescriptor │ google.protobuf.OneofOptions ║
+ // ║ ServiceDescriptor │ google.protobuf.ServiceOptions ║
+ // ║ MethodDescriptor │ google.protobuf.MethodOptions ║
+ // ╚═════════════════════╧══════════════════════════════════════════╝
+ //
+ // This method returns a typed nil-pointer if no options are present.
+ // The caller must import the descriptorpb package to use this.
+ Options() ProtoMessage
+
+ doNotImplement
+}
+
+// FileDescriptor describes the types in a complete proto file and
+// corresponds with the google.protobuf.FileDescriptorProto message.
+//
+// Top-level declarations:
+// EnumDescriptor, MessageDescriptor, FieldDescriptor, and/or ServiceDescriptor.
+type FileDescriptor interface {
+ Descriptor // Descriptor.FullName is identical to Package
+
+ // Path returns the file name, relative to the source tree root.
+ Path() string // e.g., "path/to/file.proto"
+ // Package returns the protobuf package namespace.
+ Package() FullName // e.g., "google.protobuf"
+
+ // Imports is a list of imported proto files.
+ Imports() FileImports
+
+ // Enums is a list of the top-level enum declarations.
+ Enums() EnumDescriptors
+ // Messages is a list of the top-level message declarations.
+ Messages() MessageDescriptors
+ // Extensions is a list of the top-level extension declarations.
+ Extensions() ExtensionDescriptors
+ // Services is a list of the top-level service declarations.
+ Services() ServiceDescriptors
+
+ // SourceLocations is a list of source locations.
+ SourceLocations() SourceLocations
+
+ isFileDescriptor
+}
+type isFileDescriptor interface{ ProtoType(FileDescriptor) }
+
+// FileImports is a list of file imports.
+type FileImports interface {
+ // Len reports the number of files imported by this proto file.
+ Len() int
+ // Get returns the ith FileImport. It panics if out of bounds.
+ Get(i int) FileImport
+
+ doNotImplement
+}
+
+// FileImport is the declaration for a proto file import.
+type FileImport struct {
+ // FileDescriptor is the file type for the given import.
+ // It is a placeholder descriptor if IsWeak is set or if a dependency has
+ // not been regenerated to implement the new reflection APIs.
+ FileDescriptor
+
+ // IsPublic reports whether this is a public import, which causes this file
+ // to alias declarations within the imported file. The intended use cases
+ // for this feature is the ability to move proto files without breaking
+ // existing dependencies.
+ //
+ // The current file and the imported file must be within proto package.
+ IsPublic bool
+
+ // IsWeak reports whether this is a weak import, which does not impose
+ // a direct dependency on the target file.
+ //
+ // Weak imports are a legacy proto1 feature. Equivalent behavior is
+ // achieved using proto2 extension fields or proto3 Any messages.
+ IsWeak bool
+}
+
+// MessageDescriptor describes a message and
+// corresponds with the google.protobuf.DescriptorProto message.
+//
+// Nested declarations:
+// FieldDescriptor, OneofDescriptor, FieldDescriptor, EnumDescriptor,
+// and/or MessageDescriptor.
+type MessageDescriptor interface {
+ Descriptor
+
+ // IsMapEntry indicates that this is an auto-generated message type to
+ // represent the entry type for a map field.
+ //
+ // Map entry messages have only two fields:
+ // • a "key" field with a field number of 1
+ // • a "value" field with a field number of 2
+ // The key and value types are determined by these two fields.
+ //
+ // If IsMapEntry is true, it implies that FieldDescriptor.IsMap is true
+ // for some field with this message type.
+ IsMapEntry() bool
+
+ // Fields is a list of nested field declarations.
+ Fields() FieldDescriptors
+ // Oneofs is a list of nested oneof declarations.
+ Oneofs() OneofDescriptors
+
+ // ReservedNames is a list of reserved field names.
+ ReservedNames() Names
+ // ReservedRanges is a list of reserved ranges of field numbers.
+ ReservedRanges() FieldRanges
+ // RequiredNumbers is a list of required field numbers.
+ // In Proto3, it is always an empty list.
+ RequiredNumbers() FieldNumbers
+ // ExtensionRanges is the field ranges used for extension fields.
+ // In Proto3, it is always an empty ranges.
+ ExtensionRanges() FieldRanges
+ // ExtensionRangeOptions returns the ith extension range options.
+ //
+ // To avoid a dependency cycle, this method returns a proto.Message value,
+ // which always contains a google.protobuf.ExtensionRangeOptions message.
+ // This method returns a typed nil-pointer if no options are present.
+ // The caller must import the descriptorpb package to use this.
+ ExtensionRangeOptions(i int) ProtoMessage
+
+ // Enums is a list of nested enum declarations.
+ Enums() EnumDescriptors
+ // Messages is a list of nested message declarations.
+ Messages() MessageDescriptors
+ // Extensions is a list of nested extension declarations.
+ Extensions() ExtensionDescriptors
+
+ isMessageDescriptor
+}
+type isMessageDescriptor interface{ ProtoType(MessageDescriptor) }
+
+// MessageType encapsulates a MessageDescriptor with a concrete Go implementation.
+type MessageType interface {
+ // New returns a newly allocated empty message.
+ New() Message
+
+ // Zero returns an empty, read-only message.
+ Zero() Message
+
+ // Descriptor returns the message descriptor.
+ //
+ // Invariant: t.Descriptor() == t.New().Descriptor()
+ Descriptor() MessageDescriptor
+}
+
+// MessageDescriptors is a list of message declarations.
+type MessageDescriptors interface {
+ // Len reports the number of messages.
+ Len() int
+ // Get returns the ith MessageDescriptor. It panics if out of bounds.
+ Get(i int) MessageDescriptor
+ // ByName returns the MessageDescriptor for a message named s.
+ // It returns nil if not found.
+ ByName(s Name) MessageDescriptor
+
+ doNotImplement
+}
+
+// FieldDescriptor describes a field within a message and
+// corresponds with the google.protobuf.FieldDescriptorProto message.
+//
+// It is used for both normal fields defined within the parent message
+// (e.g., MessageDescriptor.Fields) and fields that extend some remote message
+// (e.g., FileDescriptor.Extensions or MessageDescriptor.Extensions).
+type FieldDescriptor interface {
+ Descriptor
+
+ // Number reports the unique number for this field.
+ Number() FieldNumber
+ // Cardinality reports the cardinality for this field.
+ Cardinality() Cardinality
+ // Kind reports the basic kind for this field.
+ Kind() Kind
+
+ // HasJSONName reports whether this field has an explicitly set JSON name.
+ HasJSONName() bool
+
+ // JSONName reports the name used for JSON serialization.
+ // It is usually the camel-cased form of the field name.
+ JSONName() string
+
+ // HasPresence reports whether the field distinguishes between unpopulated
+ // and default values.
+ HasPresence() bool
+
+ // IsExtension reports whether this is an extension field. If false,
+ // then Parent and ContainingMessage refer to the same message.
+ // Otherwise, ContainingMessage and Parent likely differ.
+ IsExtension() bool
+
+ // HasOptionalKeyword reports whether the "optional" keyword was explicitly
+ // specified in the source .proto file.
+ HasOptionalKeyword() bool
+
+ // IsWeak reports whether this is a weak field, which does not impose a
+ // direct dependency on the target type.
+ // If true, then Message returns a placeholder type.
+ IsWeak() bool
+
+ // IsPacked reports whether repeated primitive numeric kinds should be
+ // serialized using a packed encoding.
+ // If true, then it implies Cardinality is Repeated.
+ IsPacked() bool
+
+ // IsList reports whether this field represents a list,
+ // where the value type for the associated field is a List.
+ // It is equivalent to checking whether Cardinality is Repeated and
+ // that IsMap reports false.
+ IsList() bool
+
+ // IsMap reports whether this field represents a map,
+ // where the value type for the associated field is a Map.
+ // It is equivalent to checking whether Cardinality is Repeated,
+ // that the Kind is MessageKind, and that Message.IsMapEntry reports true.
+ IsMap() bool
+
+ // MapKey returns the field descriptor for the key in the map entry.
+ // It returns nil if IsMap reports false.
+ MapKey() FieldDescriptor
+
+ // MapValue returns the field descriptor for the value in the map entry.
+ // It returns nil if IsMap reports false.
+ MapValue() FieldDescriptor
+
+ // HasDefault reports whether this field has a default value.
+ HasDefault() bool
+
+ // Default returns the default value for scalar fields.
+ // For proto2, it is the default value as specified in the proto file,
+ // or the zero value if unspecified.
+ // For proto3, it is always the zero value of the scalar.
+ // The Value type is determined by the Kind.
+ Default() Value
+
+ // DefaultEnumValue returns the enum value descriptor for the default value
+ // of an enum field, and is nil for any other kind of field.
+ DefaultEnumValue() EnumValueDescriptor
+
+ // ContainingOneof is the containing oneof that this field belongs to,
+ // and is nil if this field is not part of a oneof.
+ ContainingOneof() OneofDescriptor
+
+ // ContainingMessage is the containing message that this field belongs to.
+ // For extension fields, this may not necessarily be the parent message
+ // that the field is declared within.
+ ContainingMessage() MessageDescriptor
+
+ // Enum is the enum descriptor if Kind is EnumKind.
+ // It returns nil for any other Kind.
+ Enum() EnumDescriptor
+
+ // Message is the message descriptor if Kind is
+ // MessageKind or GroupKind. It returns nil for any other Kind.
+ Message() MessageDescriptor
+
+ isFieldDescriptor
+}
+type isFieldDescriptor interface{ ProtoType(FieldDescriptor) }
+
+// FieldDescriptors is a list of field declarations.
+type FieldDescriptors interface {
+ // Len reports the number of fields.
+ Len() int
+ // Get returns the ith FieldDescriptor. It panics if out of bounds.
+ Get(i int) FieldDescriptor
+ // ByName returns the FieldDescriptor for a field named s.
+ // It returns nil if not found.
+ ByName(s Name) FieldDescriptor
+ // ByJSONName returns the FieldDescriptor for a field with s as the JSON name.
+ // It returns nil if not found.
+ ByJSONName(s string) FieldDescriptor
+ // ByNumber returns the FieldDescriptor for a field numbered n.
+ // It returns nil if not found.
+ ByNumber(n FieldNumber) FieldDescriptor
+
+ doNotImplement
+}
+
+// OneofDescriptor describes a oneof field set within a given message and
+// corresponds with the google.protobuf.OneofDescriptorProto message.
+type OneofDescriptor interface {
+ Descriptor
+
+ // IsSynthetic reports whether this is a synthetic oneof created to support
+ // proto3 optional semantics. If true, Fields contains exactly one field
+ // with HasOptionalKeyword specified.
+ IsSynthetic() bool
+
+ // Fields is a list of fields belonging to this oneof.
+ Fields() FieldDescriptors
+
+ isOneofDescriptor
+}
+type isOneofDescriptor interface{ ProtoType(OneofDescriptor) }
+
+// OneofDescriptors is a list of oneof declarations.
+type OneofDescriptors interface {
+ // Len reports the number of oneof fields.
+ Len() int
+ // Get returns the ith OneofDescriptor. It panics if out of bounds.
+ Get(i int) OneofDescriptor
+ // ByName returns the OneofDescriptor for a oneof named s.
+ // It returns nil if not found.
+ ByName(s Name) OneofDescriptor
+
+ doNotImplement
+}
+
+// ExtensionDescriptor is an alias of FieldDescriptor for documentation.
+type ExtensionDescriptor = FieldDescriptor
+
+// ExtensionTypeDescriptor is an ExtensionDescriptor with an associated ExtensionType.
+type ExtensionTypeDescriptor interface {
+ ExtensionDescriptor
+
+ // Type returns the associated ExtensionType.
+ Type() ExtensionType
+
+ // Descriptor returns the plain ExtensionDescriptor without the
+ // associated ExtensionType.
+ Descriptor() ExtensionDescriptor
+}
+
+// ExtensionDescriptors is a list of field declarations.
+type ExtensionDescriptors interface {
+ // Len reports the number of fields.
+ Len() int
+ // Get returns the ith ExtensionDescriptor. It panics if out of bounds.
+ Get(i int) ExtensionDescriptor
+ // ByName returns the ExtensionDescriptor for a field named s.
+ // It returns nil if not found.
+ ByName(s Name) ExtensionDescriptor
+
+ doNotImplement
+}
+
+// ExtensionType encapsulates an ExtensionDescriptor with a concrete
+// Go implementation. The nested field descriptor must be for a extension field.
+//
+// While a normal field is a member of the parent message that it is declared
+// within (see Descriptor.Parent), an extension field is a member of some other
+// target message (see ExtensionDescriptor.Extendee) and may have no
+// relationship with the parent. However, the full name of an extension field is
+// relative to the parent that it is declared within.
+//
+// For example:
+// syntax = "proto2";
+// package example;
+// message FooMessage {
+// extensions 100 to max;
+// }
+// message BarMessage {
+// extends FooMessage { optional BarMessage bar_field = 100; }
+// }
+//
+// Field "bar_field" is an extension of FooMessage, but its full name is
+// "example.BarMessage.bar_field" instead of "example.FooMessage.bar_field".
+type ExtensionType interface {
+ // New returns a new value for the field.
+ // For scalars, this returns the default value in native Go form.
+ New() Value
+
+ // Zero returns a new value for the field.
+ // For scalars, this returns the default value in native Go form.
+ // For composite types, this returns an empty, read-only message, list, or map.
+ Zero() Value
+
+ // TypeDescriptor returns the extension type descriptor.
+ TypeDescriptor() ExtensionTypeDescriptor
+
+ // ValueOf wraps the input and returns it as a Value.
+ // ValueOf panics if the input value is invalid or not the appropriate type.
+ //
+ // ValueOf is more extensive than protoreflect.ValueOf for a given field's
+ // value as it has more type information available.
+ ValueOf(interface{}) Value
+
+ // InterfaceOf completely unwraps the Value to the underlying Go type.
+ // InterfaceOf panics if the input is nil or does not represent the
+ // appropriate underlying Go type. For composite types, it panics if the
+ // value is not mutable.
+ //
+ // InterfaceOf is able to unwrap the Value further than Value.Interface
+ // as it has more type information available.
+ InterfaceOf(Value) interface{}
+
+ // IsValidValue reports whether the Value is valid to assign to the field.
+ IsValidValue(Value) bool
+
+ // IsValidInterface reports whether the input is valid to assign to the field.
+ IsValidInterface(interface{}) bool
+}
+
+// EnumDescriptor describes an enum and
+// corresponds with the google.protobuf.EnumDescriptorProto message.
+//
+// Nested declarations:
+// EnumValueDescriptor.
+type EnumDescriptor interface {
+ Descriptor
+
+ // Values is a list of nested enum value declarations.
+ Values() EnumValueDescriptors
+
+ // ReservedNames is a list of reserved enum names.
+ ReservedNames() Names
+ // ReservedRanges is a list of reserved ranges of enum numbers.
+ ReservedRanges() EnumRanges
+
+ isEnumDescriptor
+}
+type isEnumDescriptor interface{ ProtoType(EnumDescriptor) }
+
+// EnumType encapsulates an EnumDescriptor with a concrete Go implementation.
+type EnumType interface {
+ // New returns an instance of this enum type with its value set to n.
+ New(n EnumNumber) Enum
+
+ // Descriptor returns the enum descriptor.
+ //
+ // Invariant: t.Descriptor() == t.New(0).Descriptor()
+ Descriptor() EnumDescriptor
+}
+
+// EnumDescriptors is a list of enum declarations.
+type EnumDescriptors interface {
+ // Len reports the number of enum types.
+ Len() int
+ // Get returns the ith EnumDescriptor. It panics if out of bounds.
+ Get(i int) EnumDescriptor
+ // ByName returns the EnumDescriptor for an enum named s.
+ // It returns nil if not found.
+ ByName(s Name) EnumDescriptor
+
+ doNotImplement
+}
+
+// EnumValueDescriptor describes an enum value and
+// corresponds with the google.protobuf.EnumValueDescriptorProto message.
+//
+// All other proto declarations are in the namespace of the parent.
+// However, enum values do not follow this rule and are within the namespace
+// of the parent's parent (i.e., they are a sibling of the containing enum).
+// Thus, a value named "FOO_VALUE" declared within an enum uniquely identified
+// as "proto.package.MyEnum" has a full name of "proto.package.FOO_VALUE".
+type EnumValueDescriptor interface {
+ Descriptor
+
+ // Number returns the enum value as an integer.
+ Number() EnumNumber
+
+ isEnumValueDescriptor
+}
+type isEnumValueDescriptor interface{ ProtoType(EnumValueDescriptor) }
+
+// EnumValueDescriptors is a list of enum value declarations.
+type EnumValueDescriptors interface {
+ // Len reports the number of enum values.
+ Len() int
+ // Get returns the ith EnumValueDescriptor. It panics if out of bounds.
+ Get(i int) EnumValueDescriptor
+ // ByName returns the EnumValueDescriptor for the enum value named s.
+ // It returns nil if not found.
+ ByName(s Name) EnumValueDescriptor
+ // ByNumber returns the EnumValueDescriptor for the enum value numbered n.
+ // If multiple have the same number, the first one defined is returned
+ // It returns nil if not found.
+ ByNumber(n EnumNumber) EnumValueDescriptor
+
+ doNotImplement
+}
+
+// ServiceDescriptor describes a service and
+// corresponds with the google.protobuf.ServiceDescriptorProto message.
+//
+// Nested declarations: MethodDescriptor.
+type ServiceDescriptor interface {
+ Descriptor
+
+ // Methods is a list of nested message declarations.
+ Methods() MethodDescriptors
+
+ isServiceDescriptor
+}
+type isServiceDescriptor interface{ ProtoType(ServiceDescriptor) }
+
+// ServiceDescriptors is a list of service declarations.
+type ServiceDescriptors interface {
+ // Len reports the number of services.
+ Len() int
+ // Get returns the ith ServiceDescriptor. It panics if out of bounds.
+ Get(i int) ServiceDescriptor
+ // ByName returns the ServiceDescriptor for a service named s.
+ // It returns nil if not found.
+ ByName(s Name) ServiceDescriptor
+
+ doNotImplement
+}
+
+// MethodDescriptor describes a method and
+// corresponds with the google.protobuf.MethodDescriptorProto message.
+type MethodDescriptor interface {
+ Descriptor
+
+ // Input is the input message descriptor.
+ Input() MessageDescriptor
+ // Output is the output message descriptor.
+ Output() MessageDescriptor
+ // IsStreamingClient reports whether the client streams multiple messages.
+ IsStreamingClient() bool
+ // IsStreamingServer reports whether the server streams multiple messages.
+ IsStreamingServer() bool
+
+ isMethodDescriptor
+}
+type isMethodDescriptor interface{ ProtoType(MethodDescriptor) }
+
+// MethodDescriptors is a list of method declarations.
+type MethodDescriptors interface {
+ // Len reports the number of methods.
+ Len() int
+ // Get returns the ith MethodDescriptor. It panics if out of bounds.
+ Get(i int) MethodDescriptor
+ // ByName returns the MethodDescriptor for a service method named s.
+ // It returns nil if not found.
+ ByName(s Name) MethodDescriptor
+
+ doNotImplement
+}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go
new file mode 100644
index 000000000..f31981077
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go
@@ -0,0 +1,285 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package protoreflect
+
+import "google.golang.org/protobuf/encoding/protowire"
+
+// Enum is a reflection interface for a concrete enum value,
+// which provides type information and a getter for the enum number.
+// Enum does not provide a mutable API since enums are commonly backed by
+// Go constants, which are not addressable.
+type Enum interface {
+ // Descriptor returns enum descriptor, which contains only the protobuf
+ // type information for the enum.
+ Descriptor() EnumDescriptor
+
+ // Type returns the enum type, which encapsulates both Go and protobuf
+ // type information. If the Go type information is not needed,
+ // it is recommended that the enum descriptor be used instead.
+ Type() EnumType
+
+ // Number returns the enum value as an integer.
+ Number() EnumNumber
+}
+
+// Message is a reflective interface for a concrete message value,
+// encapsulating both type and value information for the message.
+//
+// Accessor/mutators for individual fields are keyed by FieldDescriptor.
+// For non-extension fields, the descriptor must exactly match the
+// field known by the parent message.
+// For extension fields, the descriptor must implement ExtensionTypeDescriptor,
+// extend the parent message (i.e., have the same message FullName), and
+// be within the parent's extension range.
+//
+// Each field Value can be a scalar or a composite type (Message, List, or Map).
+// See Value for the Go types associated with a FieldDescriptor.
+// Providing a Value that is invalid or of an incorrect type panics.
+type Message interface {
+ // Descriptor returns message descriptor, which contains only the protobuf
+ // type information for the message.
+ Descriptor() MessageDescriptor
+
+ // Type returns the message type, which encapsulates both Go and protobuf
+ // type information. If the Go type information is not needed,
+ // it is recommended that the message descriptor be used instead.
+ Type() MessageType
+
+ // New returns a newly allocated and mutable empty message.
+ New() Message
+
+ // Interface unwraps the message reflection interface and
+ // returns the underlying ProtoMessage interface.
+ Interface() ProtoMessage
+
+ // Range iterates over every populated field in an undefined order,
+ // calling f for each field descriptor and value encountered.
+ // Range returns immediately if f returns false.
+ // While iterating, mutating operations may only be performed
+ // on the current field descriptor.
+ Range(f func(FieldDescriptor, Value) bool)
+
+ // Has reports whether a field is populated.
+ //
+ // Some fields have the property of nullability where it is possible to
+ // distinguish between the default value of a field and whether the field
+ // was explicitly populated with the default value. Singular message fields,
+ // member fields of a oneof, and proto2 scalar fields are nullable. Such
+ // fields are populated only if explicitly set.
+ //
+ // In other cases (aside from the nullable cases above),
+ // a proto3 scalar field is populated if it contains a non-zero value, and
+ // a repeated field is populated if it is non-empty.
+ Has(FieldDescriptor) bool
+
+ // Clear clears the field such that a subsequent Has call reports false.
+ //
+ // Clearing an extension field clears both the extension type and value
+ // associated with the given field number.
+ //
+ // Clear is a mutating operation and unsafe for concurrent use.
+ Clear(FieldDescriptor)
+
+ // Get retrieves the value for a field.
+ //
+ // For unpopulated scalars, it returns the default value, where
+ // the default value of a bytes scalar is guaranteed to be a copy.
+ // For unpopulated composite types, it returns an empty, read-only view
+ // of the value; to obtain a mutable reference, use Mutable.
+ Get(FieldDescriptor) Value
+
+ // Set stores the value for a field.
+ //
+ // For a field belonging to a oneof, it implicitly clears any other field
+ // that may be currently set within the same oneof.
+ // For extension fields, it implicitly stores the provided ExtensionType.
+ // When setting a composite type, it is unspecified whether the stored value
+ // aliases the source's memory in any way. If the composite value is an
+ // empty, read-only value, then it panics.
+ //
+ // Set is a mutating operation and unsafe for concurrent use.
+ Set(FieldDescriptor, Value)
+
+ // Mutable returns a mutable reference to a composite type.
+ //
+ // If the field is unpopulated, it may allocate a composite value.
+ // For a field belonging to a oneof, it implicitly clears any other field
+ // that may be currently set within the same oneof.
+ // For extension fields, it implicitly stores the provided ExtensionType
+ // if not already stored.
+ // It panics if the field does not contain a composite type.
+ //
+ // Mutable is a mutating operation and unsafe for concurrent use.
+ Mutable(FieldDescriptor) Value
+
+ // NewField returns a new value that is assignable to the field
+ // for the given descriptor. For scalars, this returns the default value.
+ // For lists, maps, and messages, this returns a new, empty, mutable value.
+ NewField(FieldDescriptor) Value
+
+ // WhichOneof reports which field within the oneof is populated,
+ // returning nil if none are populated.
+ // It panics if the oneof descriptor does not belong to this message.
+ WhichOneof(OneofDescriptor) FieldDescriptor
+
+ // GetUnknown retrieves the entire list of unknown fields.
+ // The caller may only mutate the contents of the RawFields
+ // if the mutated bytes are stored back into the message with SetUnknown.
+ GetUnknown() RawFields
+
+ // SetUnknown stores an entire list of unknown fields.
+ // The raw fields must be syntactically valid according to the wire format.
+ // An implementation may panic if this is not the case.
+ // Once stored, the caller must not mutate the content of the RawFields.
+ // An empty RawFields may be passed to clear the fields.
+ //
+ // SetUnknown is a mutating operation and unsafe for concurrent use.
+ SetUnknown(RawFields)
+
+ // IsValid reports whether the message is valid.
+ //
+ // An invalid message is an empty, read-only value.
+ //
+ // An invalid message often corresponds to a nil pointer of the concrete
+ // message type, but the details are implementation dependent.
+ // Validity is not part of the protobuf data model, and may not
+ // be preserved in marshaling or other operations.
+ IsValid() bool
+
+ // ProtoMethods returns optional fast-path implementions of various operations.
+ // This method may return nil.
+ //
+ // The returned methods type is identical to
+ // "google.golang.org/protobuf/runtime/protoiface".Methods.
+ // Consult the protoiface package documentation for details.
+ ProtoMethods() *methods
+}
+
+// RawFields is the raw bytes for an ordered sequence of fields.
+// Each field contains both the tag (representing field number and wire type),
+// and also the wire data itself.
+type RawFields []byte
+
+// IsValid reports whether b is syntactically correct wire format.
+func (b RawFields) IsValid() bool {
+ for len(b) > 0 {
+ _, _, n := protowire.ConsumeField(b)
+ if n < 0 {
+ return false
+ }
+ b = b[n:]
+ }
+ return true
+}
+
+// List is a zero-indexed, ordered list.
+// The element Value type is determined by FieldDescriptor.Kind.
+// Providing a Value that is invalid or of an incorrect type panics.
+type List interface {
+ // Len reports the number of entries in the List.
+ // Get, Set, and Truncate panic with out of bound indexes.
+ Len() int
+
+ // Get retrieves the value at the given index.
+ // It never returns an invalid value.
+ Get(int) Value
+
+ // Set stores a value for the given index.
+ // When setting a composite type, it is unspecified whether the set
+ // value aliases the source's memory in any way.
+ //
+ // Set is a mutating operation and unsafe for concurrent use.
+ Set(int, Value)
+
+ // Append appends the provided value to the end of the list.
+ // When appending a composite type, it is unspecified whether the appended
+ // value aliases the source's memory in any way.
+ //
+ // Append is a mutating operation and unsafe for concurrent use.
+ Append(Value)
+
+ // AppendMutable appends a new, empty, mutable message value to the end
+ // of the list and returns it.
+ // It panics if the list does not contain a message type.
+ AppendMutable() Value
+
+ // Truncate truncates the list to a smaller length.
+ //
+ // Truncate is a mutating operation and unsafe for concurrent use.
+ Truncate(int)
+
+ // NewElement returns a new value for a list element.
+ // For enums, this returns the first enum value.
+ // For other scalars, this returns the zero value.
+ // For messages, this returns a new, empty, mutable value.
+ NewElement() Value
+
+ // IsValid reports whether the list is valid.
+ //
+ // An invalid list is an empty, read-only value.
+ //
+ // Validity is not part of the protobuf data model, and may not
+ // be preserved in marshaling or other operations.
+ IsValid() bool
+}
+
+// Map is an unordered, associative map.
+// The entry MapKey type is determined by FieldDescriptor.MapKey.Kind.
+// The entry Value type is determined by FieldDescriptor.MapValue.Kind.
+// Providing a MapKey or Value that is invalid or of an incorrect type panics.
+type Map interface {
+ // Len reports the number of elements in the map.
+ Len() int
+
+ // Range iterates over every map entry in an undefined order,
+ // calling f for each key and value encountered.
+ // Range calls f Len times unless f returns false, which stops iteration.
+ // While iterating, mutating operations may only be performed
+ // on the current map key.
+ Range(f func(MapKey, Value) bool)
+
+ // Has reports whether an entry with the given key is in the map.
+ Has(MapKey) bool
+
+ // Clear clears the entry associated with they given key.
+ // The operation does nothing if there is no entry associated with the key.
+ //
+ // Clear is a mutating operation and unsafe for concurrent use.
+ Clear(MapKey)
+
+ // Get retrieves the value for an entry with the given key.
+ // It returns an invalid value for non-existent entries.
+ Get(MapKey) Value
+
+ // Set stores the value for an entry with the given key.
+ // It panics when given a key or value that is invalid or the wrong type.
+ // When setting a composite type, it is unspecified whether the set
+ // value aliases the source's memory in any way.
+ //
+ // Set is a mutating operation and unsafe for concurrent use.
+ Set(MapKey, Value)
+
+ // Mutable retrieves a mutable reference to the entry for the given key.
+ // If no entry exists for the key, it creates a new, empty, mutable value
+ // and stores it as the entry for the key.
+ // It panics if the map value is not a message.
+ Mutable(MapKey) Value
+
+ // NewValue returns a new value assignable as a map value.
+ // For enums, this returns the first enum value.
+ // For other scalars, this returns the zero value.
+ // For messages, this returns a new, empty, mutable value.
+ NewValue() Value
+
+ // IsValid reports whether the map is valid.
+ //
+ // An invalid map is an empty, read-only value.
+ //
+ // An invalid message often corresponds to a nil Go map value,
+ // but the details are implementation dependent.
+ // Validity is not part of the protobuf data model, and may not
+ // be preserved in marshaling or other operations.
+ IsValid() bool
+}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go
new file mode 100644
index 000000000..918e685e1
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go
@@ -0,0 +1,59 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build purego appengine
+
+package protoreflect
+
+import "google.golang.org/protobuf/internal/pragma"
+
+type valueType int
+
+const (
+ nilType valueType = iota
+ boolType
+ int32Type
+ int64Type
+ uint32Type
+ uint64Type
+ float32Type
+ float64Type
+ stringType
+ bytesType
+ enumType
+ ifaceType
+)
+
+// value is a union where only one type can be represented at a time.
+// This uses a distinct field for each type. This is type safe in Go, but
+// occupies more memory than necessary (72B).
+type value struct {
+ pragma.DoNotCompare // 0B
+
+ typ valueType // 8B
+ num uint64 // 8B
+ str string // 16B
+ bin []byte // 24B
+ iface interface{} // 16B
+}
+
+func valueOfString(v string) Value {
+ return Value{typ: stringType, str: v}
+}
+func valueOfBytes(v []byte) Value {
+ return Value{typ: bytesType, bin: v}
+}
+func valueOfIface(v interface{}) Value {
+ return Value{typ: ifaceType, iface: v}
+}
+
+func (v Value) getString() string {
+ return v.str
+}
+func (v Value) getBytes() []byte {
+ return v.bin
+}
+func (v Value) getIface() interface{} {
+ return v.iface
+}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go
new file mode 100644
index 000000000..f334f71bc
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go
@@ -0,0 +1,409 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package protoreflect
+
+import (
+ "fmt"
+ "math"
+)
+
+// Value is a union where only one Go type may be set at a time.
+// The Value is used to represent all possible values a field may take.
+// The following shows which Go type is used to represent each proto Kind:
+//
+// ╔════════════╤═════════════════════════════════════╗
+// ║ Go type │ Protobuf kind ║
+// ╠════════════╪═════════════════════════════════════╣
+// ║ bool │ BoolKind ║
+// ║ int32 │ Int32Kind, Sint32Kind, Sfixed32Kind ║
+// ║ int64 │ Int64Kind, Sint64Kind, Sfixed64Kind ║
+// ║ uint32 │ Uint32Kind, Fixed32Kind ║
+// ║ uint64 │ Uint64Kind, Fixed64Kind ║
+// ║ float32 │ FloatKind ║
+// ║ float64 │ DoubleKind ║
+// ║ string │ StringKind ║
+// ║ []byte │ BytesKind ║
+// ║ EnumNumber │ EnumKind ║
+// ║ Message │ MessageKind, GroupKind ║
+// ╚════════════╧═════════════════════════════════════╝
+//
+// Multiple protobuf Kinds may be represented by a single Go type if the type
+// can losslessly represent the information for the proto kind. For example,
+// Int64Kind, Sint64Kind, and Sfixed64Kind are all represented by int64,
+// but use different integer encoding methods.
+//
+// The List or Map types are used if the field cardinality is repeated.
+// A field is a List if FieldDescriptor.IsList reports true.
+// A field is a Map if FieldDescriptor.IsMap reports true.
+//
+// Converting to/from a Value and a concrete Go value panics on type mismatch.
+// For example, ValueOf("hello").Int() panics because this attempts to
+// retrieve an int64 from a string.
+type Value value
+
+// The protoreflect API uses a custom Value union type instead of interface{}
+// to keep the future open for performance optimizations. Using an interface{}
+// always incurs an allocation for primitives (e.g., int64) since it needs to
+// be boxed on the heap (as interfaces can only contain pointers natively).
+// Instead, we represent the Value union as a flat struct that internally keeps
+// track of which type is set. Using unsafe, the Value union can be reduced
+// down to 24B, which is identical in size to a slice.
+//
+// The latest compiler (Go1.11) currently suffers from some limitations:
+// • With inlining, the compiler should be able to statically prove that
+// only one of these switch cases are taken and inline one specific case.
+// See https://golang.org/issue/22310.
+
+// ValueOf returns a Value initialized with the concrete value stored in v.
+// This panics if the type does not match one of the allowed types in the
+// Value union.
+func ValueOf(v interface{}) Value {
+ switch v := v.(type) {
+ case nil:
+ return Value{}
+ case bool:
+ return ValueOfBool(v)
+ case int32:
+ return ValueOfInt32(v)
+ case int64:
+ return ValueOfInt64(v)
+ case uint32:
+ return ValueOfUint32(v)
+ case uint64:
+ return ValueOfUint64(v)
+ case float32:
+ return ValueOfFloat32(v)
+ case float64:
+ return ValueOfFloat64(v)
+ case string:
+ return ValueOfString(v)
+ case []byte:
+ return ValueOfBytes(v)
+ case EnumNumber:
+ return ValueOfEnum(v)
+ case Message, List, Map:
+ return valueOfIface(v)
+ default:
+ panic(fmt.Sprintf("invalid type: %T", v))
+ }
+}
+
+// ValueOfBool returns a new boolean value.
+func ValueOfBool(v bool) Value {
+ if v {
+ return Value{typ: boolType, num: 1}
+ } else {
+ return Value{typ: boolType, num: 0}
+ }
+}
+
+// ValueOfInt32 returns a new int32 value.
+func ValueOfInt32(v int32) Value {
+ return Value{typ: int32Type, num: uint64(v)}
+}
+
+// ValueOfInt64 returns a new int64 value.
+func ValueOfInt64(v int64) Value {
+ return Value{typ: int64Type, num: uint64(v)}
+}
+
+// ValueOfUint32 returns a new uint32 value.
+func ValueOfUint32(v uint32) Value {
+ return Value{typ: uint32Type, num: uint64(v)}
+}
+
+// ValueOfUint64 returns a new uint64 value.
+func ValueOfUint64(v uint64) Value {
+ return Value{typ: uint64Type, num: v}
+}
+
+// ValueOfFloat32 returns a new float32 value.
+func ValueOfFloat32(v float32) Value {
+ return Value{typ: float32Type, num: uint64(math.Float64bits(float64(v)))}
+}
+
+// ValueOfFloat64 returns a new float64 value.
+func ValueOfFloat64(v float64) Value {
+ return Value{typ: float64Type, num: uint64(math.Float64bits(float64(v)))}
+}
+
+// ValueOfString returns a new string value.
+func ValueOfString(v string) Value {
+ return valueOfString(v)
+}
+
+// ValueOfBytes returns a new bytes value.
+func ValueOfBytes(v []byte) Value {
+ return valueOfBytes(v[:len(v):len(v)])
+}
+
+// ValueOfEnum returns a new enum value.
+func ValueOfEnum(v EnumNumber) Value {
+ return Value{typ: enumType, num: uint64(v)}
+}
+
+// ValueOfMessage returns a new Message value.
+func ValueOfMessage(v Message) Value {
+ return valueOfIface(v)
+}
+
+// ValueOfList returns a new List value.
+func ValueOfList(v List) Value {
+ return valueOfIface(v)
+}
+
+// ValueOfMap returns a new Map value.
+func ValueOfMap(v Map) Value {
+ return valueOfIface(v)
+}
+
+// IsValid reports whether v is populated with a value.
+func (v Value) IsValid() bool {
+ return v.typ != nilType
+}
+
+// Interface returns v as an interface{}.
+//
+// Invariant: v == ValueOf(v).Interface()
+func (v Value) Interface() interface{} {
+ switch v.typ {
+ case nilType:
+ return nil
+ case boolType:
+ return v.Bool()
+ case int32Type:
+ return int32(v.Int())
+ case int64Type:
+ return int64(v.Int())
+ case uint32Type:
+ return uint32(v.Uint())
+ case uint64Type:
+ return uint64(v.Uint())
+ case float32Type:
+ return float32(v.Float())
+ case float64Type:
+ return float64(v.Float())
+ case stringType:
+ return v.String()
+ case bytesType:
+ return v.Bytes()
+ case enumType:
+ return v.Enum()
+ default:
+ return v.getIface()
+ }
+}
+
+func (v Value) typeName() string {
+ switch v.typ {
+ case nilType:
+ return "nil"
+ case boolType:
+ return "bool"
+ case int32Type:
+ return "int32"
+ case int64Type:
+ return "int64"
+ case uint32Type:
+ return "uint32"
+ case uint64Type:
+ return "uint64"
+ case float32Type:
+ return "float32"
+ case float64Type:
+ return "float64"
+ case stringType:
+ return "string"
+ case bytesType:
+ return "bytes"
+ case enumType:
+ return "enum"
+ default:
+ switch v := v.getIface().(type) {
+ case Message:
+ return "message"
+ case List:
+ return "list"
+ case Map:
+ return "map"
+ default:
+ return fmt.Sprintf("<unknown: %T>", v)
+ }
+ }
+}
+
+func (v Value) panicMessage(what string) string {
+ return fmt.Sprintf("type mismatch: cannot convert %v to %s", v.typeName(), what)
+}
+
+// Bool returns v as a bool and panics if the type is not a bool.
+func (v Value) Bool() bool {
+ switch v.typ {
+ case boolType:
+ return v.num > 0
+ default:
+ panic(v.panicMessage("bool"))
+ }
+}
+
+// Int returns v as a int64 and panics if the type is not a int32 or int64.
+func (v Value) Int() int64 {
+ switch v.typ {
+ case int32Type, int64Type:
+ return int64(v.num)
+ default:
+ panic(v.panicMessage("int"))
+ }
+}
+
+// Uint returns v as a uint64 and panics if the type is not a uint32 or uint64.
+func (v Value) Uint() uint64 {
+ switch v.typ {
+ case uint32Type, uint64Type:
+ return uint64(v.num)
+ default:
+ panic(v.panicMessage("uint"))
+ }
+}
+
+// Float returns v as a float64 and panics if the type is not a float32 or float64.
+func (v Value) Float() float64 {
+ switch v.typ {
+ case float32Type, float64Type:
+ return math.Float64frombits(uint64(v.num))
+ default:
+ panic(v.panicMessage("float"))
+ }
+}
+
+// String returns v as a string. Since this method implements fmt.Stringer,
+// this returns the formatted string value for any non-string type.
+func (v Value) String() string {
+ switch v.typ {
+ case stringType:
+ return v.getString()
+ default:
+ return fmt.Sprint(v.Interface())
+ }
+}
+
+// Bytes returns v as a []byte and panics if the type is not a []byte.
+func (v Value) Bytes() []byte {
+ switch v.typ {
+ case bytesType:
+ return v.getBytes()
+ default:
+ panic(v.panicMessage("bytes"))
+ }
+}
+
+// Enum returns v as a EnumNumber and panics if the type is not a EnumNumber.
+func (v Value) Enum() EnumNumber {
+ switch v.typ {
+ case enumType:
+ return EnumNumber(v.num)
+ default:
+ panic(v.panicMessage("enum"))
+ }
+}
+
+// Message returns v as a Message and panics if the type is not a Message.
+func (v Value) Message() Message {
+ switch vi := v.getIface().(type) {
+ case Message:
+ return vi
+ default:
+ panic(v.panicMessage("message"))
+ }
+}
+
+// List returns v as a List and panics if the type is not a List.
+func (v Value) List() List {
+ switch vi := v.getIface().(type) {
+ case List:
+ return vi
+ default:
+ panic(v.panicMessage("list"))
+ }
+}
+
+// Map returns v as a Map and panics if the type is not a Map.
+func (v Value) Map() Map {
+ switch vi := v.getIface().(type) {
+ case Map:
+ return vi
+ default:
+ panic(v.panicMessage("map"))
+ }
+}
+
+// MapKey returns v as a MapKey and panics for invalid MapKey types.
+func (v Value) MapKey() MapKey {
+ switch v.typ {
+ case boolType, int32Type, int64Type, uint32Type, uint64Type, stringType:
+ return MapKey(v)
+ default:
+ panic(v.panicMessage("map key"))
+ }
+}
+
+// MapKey is used to index maps, where the Go type of the MapKey must match
+// the specified key Kind (see MessageDescriptor.IsMapEntry).
+// The following shows what Go type is used to represent each proto Kind:
+//
+// ╔═════════╤═════════════════════════════════════╗
+// ║ Go type │ Protobuf kind ║
+// ╠═════════╪═════════════════════════════════════╣
+// ║ bool │ BoolKind ║
+// ║ int32 │ Int32Kind, Sint32Kind, Sfixed32Kind ║
+// ║ int64 │ Int64Kind, Sint64Kind, Sfixed64Kind ║
+// ║ uint32 │ Uint32Kind, Fixed32Kind ║
+// ║ uint64 │ Uint64Kind, Fixed64Kind ║
+// ║ string │ StringKind ║
+// ╚═════════╧═════════════════════════════════════╝
+//
+// A MapKey is constructed and accessed through a Value:
+// k := ValueOf("hash").MapKey() // convert string to MapKey
+// s := k.String() // convert MapKey to string
+//
+// The MapKey is a strict subset of valid types used in Value;
+// converting a Value to a MapKey with an invalid type panics.
+type MapKey value
+
+// IsValid reports whether k is populated with a value.
+func (k MapKey) IsValid() bool {
+ return Value(k).IsValid()
+}
+
+// Interface returns k as an interface{}.
+func (k MapKey) Interface() interface{} {
+ return Value(k).Interface()
+}
+
+// Bool returns k as a bool and panics if the type is not a bool.
+func (k MapKey) Bool() bool {
+ return Value(k).Bool()
+}
+
+// Int returns k as a int64 and panics if the type is not a int32 or int64.
+func (k MapKey) Int() int64 {
+ return Value(k).Int()
+}
+
+// Uint returns k as a uint64 and panics if the type is not a uint32 or uint64.
+func (k MapKey) Uint() uint64 {
+ return Value(k).Uint()
+}
+
+// String returns k as a string. Since this method implements fmt.Stringer,
+// this returns the formatted string value for any non-string type.
+func (k MapKey) String() string {
+ return Value(k).String()
+}
+
+// Value returns k as a Value.
+func (k MapKey) Value() Value {
+ return Value(k)
+}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go
new file mode 100644
index 000000000..c45debdca
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go
@@ -0,0 +1,98 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !purego,!appengine
+
+package protoreflect
+
+import (
+ "unsafe"
+
+ "google.golang.org/protobuf/internal/pragma"
+)
+
+type (
+ stringHeader struct {
+ Data unsafe.Pointer
+ Len int
+ }
+ sliceHeader struct {
+ Data unsafe.Pointer
+ Len int
+ Cap int
+ }
+ ifaceHeader struct {
+ Type unsafe.Pointer
+ Data unsafe.Pointer
+ }
+)
+
+var (
+ nilType = typeOf(nil)
+ boolType = typeOf(*new(bool))
+ int32Type = typeOf(*new(int32))
+ int64Type = typeOf(*new(int64))
+ uint32Type = typeOf(*new(uint32))
+ uint64Type = typeOf(*new(uint64))
+ float32Type = typeOf(*new(float32))
+ float64Type = typeOf(*new(float64))
+ stringType = typeOf(*new(string))
+ bytesType = typeOf(*new([]byte))
+ enumType = typeOf(*new(EnumNumber))
+)
+
+// typeOf returns a pointer to the Go type information.
+// The pointer is comparable and equal if and only if the types are identical.
+func typeOf(t interface{}) unsafe.Pointer {
+ return (*ifaceHeader)(unsafe.Pointer(&t)).Type
+}
+
+// value is a union where only one type can be represented at a time.
+// The struct is 24B large on 64-bit systems and requires the minimum storage
+// necessary to represent each possible type.
+//
+// The Go GC needs to be able to scan variables containing pointers.
+// As such, pointers and non-pointers cannot be intermixed.
+type value struct {
+ pragma.DoNotCompare // 0B
+
+ // typ stores the type of the value as a pointer to the Go type.
+ typ unsafe.Pointer // 8B
+
+ // ptr stores the data pointer for a String, Bytes, or interface value.
+ ptr unsafe.Pointer // 8B
+
+ // num stores a Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, or
+ // Enum value as a raw uint64.
+ //
+ // It is also used to store the length of a String or Bytes value;
+ // the capacity is ignored.
+ num uint64 // 8B
+}
+
+func valueOfString(v string) Value {
+ p := (*stringHeader)(unsafe.Pointer(&v))
+ return Value{typ: stringType, ptr: p.Data, num: uint64(len(v))}
+}
+func valueOfBytes(v []byte) Value {
+ p := (*sliceHeader)(unsafe.Pointer(&v))
+ return Value{typ: bytesType, ptr: p.Data, num: uint64(len(v))}
+}
+func valueOfIface(v interface{}) Value {
+ p := (*ifaceHeader)(unsafe.Pointer(&v))
+ return Value{typ: p.Type, ptr: p.Data}
+}
+
+func (v Value) getString() (x string) {
+ *(*stringHeader)(unsafe.Pointer(&x)) = stringHeader{Data: v.ptr, Len: int(v.num)}
+ return x
+}
+func (v Value) getBytes() (x []byte) {
+ *(*sliceHeader)(unsafe.Pointer(&x)) = sliceHeader{Data: v.ptr, Len: int(v.num), Cap: int(v.num)}
+ return x
+}
+func (v Value) getIface() (x interface{}) {
+ *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr}
+ return x
+}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go
new file mode 100644
index 000000000..43f16c616
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go
@@ -0,0 +1,768 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package protoregistry provides data structures to register and lookup
+// protobuf descriptor types.
+//
+// The Files registry contains file descriptors and provides the ability
+// to iterate over the files or lookup a specific descriptor within the files.
+// Files only contains protobuf descriptors and has no understanding of Go
+// type information that may be associated with each descriptor.
+//
+// The Types registry contains descriptor types for which there is a known
+// Go type associated with that descriptor. It provides the ability to iterate
+// over the registered types or lookup a type by name.
+package protoregistry
+
+import (
+ "fmt"
+ "log"
+ "strings"
+ "sync"
+
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// ignoreConflict reports whether to ignore a registration conflict
+// given the descriptor being registered and the error.
+// It is a variable so that the behavior is easily overridden in another file.
+var ignoreConflict = func(d protoreflect.Descriptor, err error) bool {
+ log.Printf(""+
+ "WARNING: %v\n"+
+ "A future release will panic on registration conflicts. See:\n"+
+ "https://developers.google.com/protocol-buffers/docs/reference/go/faq#namespace-conflict\n"+
+ "\n", err)
+ return true
+}
+
+var globalMutex sync.RWMutex
+
+// GlobalFiles is a global registry of file descriptors.
+var GlobalFiles *Files = new(Files)
+
+// GlobalTypes is the registry used by default for type lookups
+// unless a local registry is provided by the user.
+var GlobalTypes *Types = new(Types)
+
+// NotFound is a sentinel error value to indicate that the type was not found.
+//
+// Since registry lookup can happen in the critical performance path, resolvers
+// must return this exact error value, not an error wrapping it.
+var NotFound = errors.New("not found")
+
+// Files is a registry for looking up or iterating over files and the
+// descriptors contained within them.
+// The Find and Range methods are safe for concurrent use.
+type Files struct {
+ // The map of descsByName contains:
+ // EnumDescriptor
+ // EnumValueDescriptor
+ // MessageDescriptor
+ // ExtensionDescriptor
+ // ServiceDescriptor
+ // *packageDescriptor
+ //
+ // Note that files are stored as a slice, since a package may contain
+ // multiple files. Only top-level declarations are registered.
+ // Note that enum values are in the top-level since that are in the same
+ // scope as the parent enum.
+ descsByName map[protoreflect.FullName]interface{}
+ filesByPath map[string]protoreflect.FileDescriptor
+}
+
+type packageDescriptor struct {
+ files []protoreflect.FileDescriptor
+}
+
+// RegisterFile registers the provided file descriptor.
+//
+// If any descriptor within the file conflicts with the descriptor of any
+// previously registered file (e.g., two enums with the same full name),
+// then the file is not registered and an error is returned.
+//
+// It is permitted for multiple files to have the same file path.
+func (r *Files) RegisterFile(file protoreflect.FileDescriptor) error {
+ if r == GlobalFiles {
+ globalMutex.Lock()
+ defer globalMutex.Unlock()
+ }
+ if r.descsByName == nil {
+ r.descsByName = map[protoreflect.FullName]interface{}{
+ "": &packageDescriptor{},
+ }
+ r.filesByPath = make(map[string]protoreflect.FileDescriptor)
+ }
+ path := file.Path()
+ if prev := r.filesByPath[path]; prev != nil {
+ err := errors.New("file %q is already registered", file.Path())
+ err = amendErrorWithCaller(err, prev, file)
+ if r == GlobalFiles && ignoreConflict(file, err) {
+ err = nil
+ }
+ return err
+ }
+
+ for name := file.Package(); name != ""; name = name.Parent() {
+ switch prev := r.descsByName[name]; prev.(type) {
+ case nil, *packageDescriptor:
+ default:
+ err := errors.New("file %q has a package name conflict over %v", file.Path(), name)
+ err = amendErrorWithCaller(err, prev, file)
+ if r == GlobalFiles && ignoreConflict(file, err) {
+ err = nil
+ }
+ return err
+ }
+ }
+ var err error
+ var hasConflict bool
+ rangeTopLevelDescriptors(file, func(d protoreflect.Descriptor) {
+ if prev := r.descsByName[d.FullName()]; prev != nil {
+ hasConflict = true
+ err = errors.New("file %q has a name conflict over %v", file.Path(), d.FullName())
+ err = amendErrorWithCaller(err, prev, file)
+ if r == GlobalFiles && ignoreConflict(d, err) {
+ err = nil
+ }
+ }
+ })
+ if hasConflict {
+ return err
+ }
+
+ for name := file.Package(); name != ""; name = name.Parent() {
+ if r.descsByName[name] == nil {
+ r.descsByName[name] = &packageDescriptor{}
+ }
+ }
+ p := r.descsByName[file.Package()].(*packageDescriptor)
+ p.files = append(p.files, file)
+ rangeTopLevelDescriptors(file, func(d protoreflect.Descriptor) {
+ r.descsByName[d.FullName()] = d
+ })
+ r.filesByPath[path] = file
+ return nil
+}
+
+// FindDescriptorByName looks up a descriptor by the full name.
+//
+// This returns (nil, NotFound) if not found.
+func (r *Files) FindDescriptorByName(name protoreflect.FullName) (protoreflect.Descriptor, error) {
+ if r == nil {
+ return nil, NotFound
+ }
+ if r == GlobalFiles {
+ globalMutex.RLock()
+ defer globalMutex.RUnlock()
+ }
+ prefix := name
+ suffix := nameSuffix("")
+ for prefix != "" {
+ if d, ok := r.descsByName[prefix]; ok {
+ switch d := d.(type) {
+ case protoreflect.EnumDescriptor:
+ if d.FullName() == name {
+ return d, nil
+ }
+ case protoreflect.EnumValueDescriptor:
+ if d.FullName() == name {
+ return d, nil
+ }
+ case protoreflect.MessageDescriptor:
+ if d.FullName() == name {
+ return d, nil
+ }
+ if d := findDescriptorInMessage(d, suffix); d != nil && d.FullName() == name {
+ return d, nil
+ }
+ case protoreflect.ExtensionDescriptor:
+ if d.FullName() == name {
+ return d, nil
+ }
+ case protoreflect.ServiceDescriptor:
+ if d.FullName() == name {
+ return d, nil
+ }
+ if d := d.Methods().ByName(suffix.Pop()); d != nil && d.FullName() == name {
+ return d, nil
+ }
+ }
+ return nil, NotFound
+ }
+ prefix = prefix.Parent()
+ suffix = nameSuffix(name[len(prefix)+len("."):])
+ }
+ return nil, NotFound
+}
+
+func findDescriptorInMessage(md protoreflect.MessageDescriptor, suffix nameSuffix) protoreflect.Descriptor {
+ name := suffix.Pop()
+ if suffix == "" {
+ if ed := md.Enums().ByName(name); ed != nil {
+ return ed
+ }
+ for i := md.Enums().Len() - 1; i >= 0; i-- {
+ if vd := md.Enums().Get(i).Values().ByName(name); vd != nil {
+ return vd
+ }
+ }
+ if xd := md.Extensions().ByName(name); xd != nil {
+ return xd
+ }
+ if fd := md.Fields().ByName(name); fd != nil {
+ return fd
+ }
+ if od := md.Oneofs().ByName(name); od != nil {
+ return od
+ }
+ }
+ if md := md.Messages().ByName(name); md != nil {
+ if suffix == "" {
+ return md
+ }
+ return findDescriptorInMessage(md, suffix)
+ }
+ return nil
+}
+
+type nameSuffix string
+
+func (s *nameSuffix) Pop() (name protoreflect.Name) {
+ if i := strings.IndexByte(string(*s), '.'); i >= 0 {
+ name, *s = protoreflect.Name((*s)[:i]), (*s)[i+1:]
+ } else {
+ name, *s = protoreflect.Name((*s)), ""
+ }
+ return name
+}
+
+// FindFileByPath looks up a file by the path.
+//
+// This returns (nil, NotFound) if not found.
+func (r *Files) FindFileByPath(path string) (protoreflect.FileDescriptor, error) {
+ if r == nil {
+ return nil, NotFound
+ }
+ if r == GlobalFiles {
+ globalMutex.RLock()
+ defer globalMutex.RUnlock()
+ }
+ if fd, ok := r.filesByPath[path]; ok {
+ return fd, nil
+ }
+ return nil, NotFound
+}
+
+// NumFiles reports the number of registered files.
+func (r *Files) NumFiles() int {
+ if r == nil {
+ return 0
+ }
+ if r == GlobalFiles {
+ globalMutex.RLock()
+ defer globalMutex.RUnlock()
+ }
+ return len(r.filesByPath)
+}
+
+// RangeFiles iterates over all registered files while f returns true.
+// The iteration order is undefined.
+func (r *Files) RangeFiles(f func(protoreflect.FileDescriptor) bool) {
+ if r == nil {
+ return
+ }
+ if r == GlobalFiles {
+ globalMutex.RLock()
+ defer globalMutex.RUnlock()
+ }
+ for _, file := range r.filesByPath {
+ if !f(file) {
+ return
+ }
+ }
+}
+
+// NumFilesByPackage reports the number of registered files in a proto package.
+func (r *Files) NumFilesByPackage(name protoreflect.FullName) int {
+ if r == nil {
+ return 0
+ }
+ if r == GlobalFiles {
+ globalMutex.RLock()
+ defer globalMutex.RUnlock()
+ }
+ p, ok := r.descsByName[name].(*packageDescriptor)
+ if !ok {
+ return 0
+ }
+ return len(p.files)
+}
+
+// RangeFilesByPackage iterates over all registered files in a given proto package
+// while f returns true. The iteration order is undefined.
+func (r *Files) RangeFilesByPackage(name protoreflect.FullName, f func(protoreflect.FileDescriptor) bool) {
+ if r == nil {
+ return
+ }
+ if r == GlobalFiles {
+ globalMutex.RLock()
+ defer globalMutex.RUnlock()
+ }
+ p, ok := r.descsByName[name].(*packageDescriptor)
+ if !ok {
+ return
+ }
+ for _, file := range p.files {
+ if !f(file) {
+ return
+ }
+ }
+}
+
+// rangeTopLevelDescriptors iterates over all top-level descriptors in a file
+// which will be directly entered into the registry.
+func rangeTopLevelDescriptors(fd protoreflect.FileDescriptor, f func(protoreflect.Descriptor)) {
+ eds := fd.Enums()
+ for i := eds.Len() - 1; i >= 0; i-- {
+ f(eds.Get(i))
+ vds := eds.Get(i).Values()
+ for i := vds.Len() - 1; i >= 0; i-- {
+ f(vds.Get(i))
+ }
+ }
+ mds := fd.Messages()
+ for i := mds.Len() - 1; i >= 0; i-- {
+ f(mds.Get(i))
+ }
+ xds := fd.Extensions()
+ for i := xds.Len() - 1; i >= 0; i-- {
+ f(xds.Get(i))
+ }
+ sds := fd.Services()
+ for i := sds.Len() - 1; i >= 0; i-- {
+ f(sds.Get(i))
+ }
+}
+
+// MessageTypeResolver is an interface for looking up messages.
+//
+// A compliant implementation must deterministically return the same type
+// if no error is encountered.
+//
+// The Types type implements this interface.
+type MessageTypeResolver interface {
+ // FindMessageByName looks up a message by its full name.
+ // E.g., "google.protobuf.Any"
+ //
+ // This return (nil, NotFound) if not found.
+ FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error)
+
+ // FindMessageByURL looks up a message by a URL identifier.
+ // See documentation on google.protobuf.Any.type_url for the URL format.
+ //
+ // This returns (nil, NotFound) if not found.
+ FindMessageByURL(url string) (protoreflect.MessageType, error)
+}
+
+// ExtensionTypeResolver is an interface for looking up extensions.
+//
+// A compliant implementation must deterministically return the same type
+// if no error is encountered.
+//
+// The Types type implements this interface.
+type ExtensionTypeResolver interface {
+ // FindExtensionByName looks up a extension field by the field's full name.
+ // Note that this is the full name of the field as determined by
+ // where the extension is declared and is unrelated to the full name of the
+ // message being extended.
+ //
+ // This returns (nil, NotFound) if not found.
+ FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error)
+
+ // FindExtensionByNumber looks up a extension field by the field number
+ // within some parent message, identified by full name.
+ //
+ // This returns (nil, NotFound) if not found.
+ FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error)
+}
+
+var (
+ _ MessageTypeResolver = (*Types)(nil)
+ _ ExtensionTypeResolver = (*Types)(nil)
+)
+
+// Types is a registry for looking up or iterating over descriptor types.
+// The Find and Range methods are safe for concurrent use.
+type Types struct {
+ typesByName typesByName
+ extensionsByMessage extensionsByMessage
+
+ numEnums int
+ numMessages int
+ numExtensions int
+}
+
+type (
+ typesByName map[protoreflect.FullName]interface{}
+ extensionsByMessage map[protoreflect.FullName]extensionsByNumber
+ extensionsByNumber map[protoreflect.FieldNumber]protoreflect.ExtensionType
+)
+
+// RegisterMessage registers the provided message type.
+//
+// If a naming conflict occurs, the type is not registered and an error is returned.
+func (r *Types) RegisterMessage(mt protoreflect.MessageType) error {
+ // Under rare circumstances getting the descriptor might recursively
+ // examine the registry, so fetch it before locking.
+ md := mt.Descriptor()
+
+ if r == GlobalTypes {
+ globalMutex.Lock()
+ defer globalMutex.Unlock()
+ }
+
+ if err := r.register("message", md, mt); err != nil {
+ return err
+ }
+ r.numMessages++
+ return nil
+}
+
+// RegisterEnum registers the provided enum type.
+//
+// If a naming conflict occurs, the type is not registered and an error is returned.
+func (r *Types) RegisterEnum(et protoreflect.EnumType) error {
+ // Under rare circumstances getting the descriptor might recursively
+ // examine the registry, so fetch it before locking.
+ ed := et.Descriptor()
+
+ if r == GlobalTypes {
+ globalMutex.Lock()
+ defer globalMutex.Unlock()
+ }
+
+ if err := r.register("enum", ed, et); err != nil {
+ return err
+ }
+ r.numEnums++
+ return nil
+}
+
+// RegisterExtension registers the provided extension type.
+//
+// If a naming conflict occurs, the type is not registered and an error is returned.
+func (r *Types) RegisterExtension(xt protoreflect.ExtensionType) error {
+ // Under rare circumstances getting the descriptor might recursively
+ // examine the registry, so fetch it before locking.
+ //
+ // A known case where this can happen: Fetching the TypeDescriptor for a
+ // legacy ExtensionDesc can consult the global registry.
+ xd := xt.TypeDescriptor()
+
+ if r == GlobalTypes {
+ globalMutex.Lock()
+ defer globalMutex.Unlock()
+ }
+
+ field := xd.Number()
+ message := xd.ContainingMessage().FullName()
+ if prev := r.extensionsByMessage[message][field]; prev != nil {
+ err := errors.New("extension number %d is already registered on message %v", field, message)
+ err = amendErrorWithCaller(err, prev, xt)
+ if !(r == GlobalTypes && ignoreConflict(xd, err)) {
+ return err
+ }
+ }
+
+ if err := r.register("extension", xd, xt); err != nil {
+ return err
+ }
+ if r.extensionsByMessage == nil {
+ r.extensionsByMessage = make(extensionsByMessage)
+ }
+ if r.extensionsByMessage[message] == nil {
+ r.extensionsByMessage[message] = make(extensionsByNumber)
+ }
+ r.extensionsByMessage[message][field] = xt
+ r.numExtensions++
+ return nil
+}
+
+func (r *Types) register(kind string, desc protoreflect.Descriptor, typ interface{}) error {
+ name := desc.FullName()
+ prev := r.typesByName[name]
+ if prev != nil {
+ err := errors.New("%v %v is already registered", kind, name)
+ err = amendErrorWithCaller(err, prev, typ)
+ if !(r == GlobalTypes && ignoreConflict(desc, err)) {
+ return err
+ }
+ }
+ if r.typesByName == nil {
+ r.typesByName = make(typesByName)
+ }
+ r.typesByName[name] = typ
+ return nil
+}
+
+// FindEnumByName looks up an enum by its full name.
+// E.g., "google.protobuf.Field.Kind".
+//
+// This returns (nil, NotFound) if not found.
+func (r *Types) FindEnumByName(enum protoreflect.FullName) (protoreflect.EnumType, error) {
+ if r == nil {
+ return nil, NotFound
+ }
+ if r == GlobalTypes {
+ globalMutex.RLock()
+ defer globalMutex.RUnlock()
+ }
+ if v := r.typesByName[enum]; v != nil {
+ if et, _ := v.(protoreflect.EnumType); et != nil {
+ return et, nil
+ }
+ return nil, errors.New("found wrong type: got %v, want enum", typeName(v))
+ }
+ return nil, NotFound
+}
+
+// FindMessageByName looks up a message by its full name.
+// E.g., "google.protobuf.Any"
+//
+// This return (nil, NotFound) if not found.
+func (r *Types) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) {
+ // The full name by itself is a valid URL.
+ return r.FindMessageByURL(string(message))
+}
+
+// FindMessageByURL looks up a message by a URL identifier.
+// See documentation on google.protobuf.Any.type_url for the URL format.
+//
+// This returns (nil, NotFound) if not found.
+func (r *Types) FindMessageByURL(url string) (protoreflect.MessageType, error) {
+ if r == nil {
+ return nil, NotFound
+ }
+ if r == GlobalTypes {
+ globalMutex.RLock()
+ defer globalMutex.RUnlock()
+ }
+ message := protoreflect.FullName(url)
+ if i := strings.LastIndexByte(url, '/'); i >= 0 {
+ message = message[i+len("/"):]
+ }
+
+ if v := r.typesByName[message]; v != nil {
+ if mt, _ := v.(protoreflect.MessageType); mt != nil {
+ return mt, nil
+ }
+ return nil, errors.New("found wrong type: got %v, want message", typeName(v))
+ }
+ return nil, NotFound
+}
+
+// FindExtensionByName looks up a extension field by the field's full name.
+// Note that this is the full name of the field as determined by
+// where the extension is declared and is unrelated to the full name of the
+// message being extended.
+//
+// This returns (nil, NotFound) if not found.
+func (r *Types) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
+ if r == nil {
+ return nil, NotFound
+ }
+ if r == GlobalTypes {
+ globalMutex.RLock()
+ defer globalMutex.RUnlock()
+ }
+ if v := r.typesByName[field]; v != nil {
+ if xt, _ := v.(protoreflect.ExtensionType); xt != nil {
+ return xt, nil
+ }
+ return nil, errors.New("found wrong type: got %v, want extension", typeName(v))
+ }
+ return nil, NotFound
+}
+
+// FindExtensionByNumber looks up a extension field by the field number
+// within some parent message, identified by full name.
+//
+// This returns (nil, NotFound) if not found.
+func (r *Types) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
+ if r == nil {
+ return nil, NotFound
+ }
+ if r == GlobalTypes {
+ globalMutex.RLock()
+ defer globalMutex.RUnlock()
+ }
+ if xt, ok := r.extensionsByMessage[message][field]; ok {
+ return xt, nil
+ }
+ return nil, NotFound
+}
+
+// NumEnums reports the number of registered enums.
+func (r *Types) NumEnums() int {
+ if r == nil {
+ return 0
+ }
+ if r == GlobalTypes {
+ globalMutex.RLock()
+ defer globalMutex.RUnlock()
+ }
+ return r.numEnums
+}
+
+// RangeEnums iterates over all registered enums while f returns true.
+// Iteration order is undefined.
+func (r *Types) RangeEnums(f func(protoreflect.EnumType) bool) {
+ if r == nil {
+ return
+ }
+ if r == GlobalTypes {
+ globalMutex.RLock()
+ defer globalMutex.RUnlock()
+ }
+ for _, typ := range r.typesByName {
+ if et, ok := typ.(protoreflect.EnumType); ok {
+ if !f(et) {
+ return
+ }
+ }
+ }
+}
+
+// NumMessages reports the number of registered messages.
+func (r *Types) NumMessages() int {
+ if r == nil {
+ return 0
+ }
+ if r == GlobalTypes {
+ globalMutex.RLock()
+ defer globalMutex.RUnlock()
+ }
+ return r.numMessages
+}
+
+// RangeMessages iterates over all registered messages while f returns true.
+// Iteration order is undefined.
+func (r *Types) RangeMessages(f func(protoreflect.MessageType) bool) {
+ if r == nil {
+ return
+ }
+ if r == GlobalTypes {
+ globalMutex.RLock()
+ defer globalMutex.RUnlock()
+ }
+ for _, typ := range r.typesByName {
+ if mt, ok := typ.(protoreflect.MessageType); ok {
+ if !f(mt) {
+ return
+ }
+ }
+ }
+}
+
+// NumExtensions reports the number of registered extensions.
+func (r *Types) NumExtensions() int {
+ if r == nil {
+ return 0
+ }
+ if r == GlobalTypes {
+ globalMutex.RLock()
+ defer globalMutex.RUnlock()
+ }
+ return r.numExtensions
+}
+
+// RangeExtensions iterates over all registered extensions while f returns true.
+// Iteration order is undefined.
+func (r *Types) RangeExtensions(f func(protoreflect.ExtensionType) bool) {
+ if r == nil {
+ return
+ }
+ if r == GlobalTypes {
+ globalMutex.RLock()
+ defer globalMutex.RUnlock()
+ }
+ for _, typ := range r.typesByName {
+ if xt, ok := typ.(protoreflect.ExtensionType); ok {
+ if !f(xt) {
+ return
+ }
+ }
+ }
+}
+
+// NumExtensionsByMessage reports the number of registered extensions for
+// a given message type.
+func (r *Types) NumExtensionsByMessage(message protoreflect.FullName) int {
+ if r == nil {
+ return 0
+ }
+ if r == GlobalTypes {
+ globalMutex.RLock()
+ defer globalMutex.RUnlock()
+ }
+ return len(r.extensionsByMessage[message])
+}
+
+// RangeExtensionsByMessage iterates over all registered extensions filtered
+// by a given message type while f returns true. Iteration order is undefined.
+func (r *Types) RangeExtensionsByMessage(message protoreflect.FullName, f func(protoreflect.ExtensionType) bool) {
+ if r == nil {
+ return
+ }
+ if r == GlobalTypes {
+ globalMutex.RLock()
+ defer globalMutex.RUnlock()
+ }
+ for _, xt := range r.extensionsByMessage[message] {
+ if !f(xt) {
+ return
+ }
+ }
+}
+
+func typeName(t interface{}) string {
+ switch t.(type) {
+ case protoreflect.EnumType:
+ return "enum"
+ case protoreflect.MessageType:
+ return "message"
+ case protoreflect.ExtensionType:
+ return "extension"
+ default:
+ return fmt.Sprintf("%T", t)
+ }
+}
+
+func amendErrorWithCaller(err error, prev, curr interface{}) error {
+ prevPkg := goPackage(prev)
+ currPkg := goPackage(curr)
+ if prevPkg == "" || currPkg == "" || prevPkg == currPkg {
+ return err
+ }
+ return errors.New("%s\n\tpreviously from: %q\n\tcurrently from: %q", err, prevPkg, currPkg)
+}
+
+func goPackage(v interface{}) string {
+ switch d := v.(type) {
+ case protoreflect.EnumType:
+ v = d.Descriptor()
+ case protoreflect.MessageType:
+ v = d.Descriptor()
+ case protoreflect.ExtensionType:
+ v = d.TypeDescriptor()
+ }
+ if d, ok := v.(protoreflect.Descriptor); ok {
+ v = d.ParentFile()
+ }
+ if d, ok := v.(interface{ GoPackagePath() string }); ok {
+ return d.GoPackagePath()
+ }
+ return ""
+}
diff --git a/vendor/google.golang.org/protobuf/runtime/protoiface/legacy.go b/vendor/google.golang.org/protobuf/runtime/protoiface/legacy.go
new file mode 100644
index 000000000..c58727675
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/runtime/protoiface/legacy.go
@@ -0,0 +1,15 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package protoiface
+
+type MessageV1 interface {
+ Reset()
+ String() string
+ ProtoMessage()
+}
+
+type ExtensionRangeV1 struct {
+ Start, End int32 // both inclusive
+}
diff --git a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go
new file mode 100644
index 000000000..32c04f67e
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go
@@ -0,0 +1,167 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package protoiface contains types referenced or implemented by messages.
+//
+// WARNING: This package should only be imported by message implementations.
+// The functionality found in this package should be accessed through
+// higher-level abstractions provided by the proto package.
+package protoiface
+
+import (
+ "google.golang.org/protobuf/internal/pragma"
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// Methods is a set of optional fast-path implementations of various operations.
+type Methods = struct {
+ pragma.NoUnkeyedLiterals
+
+ // Flags indicate support for optional features.
+ Flags SupportFlags
+
+ // Size returns the size in bytes of the wire-format encoding of a message.
+ // Marshal must be provided if a custom Size is provided.
+ Size func(SizeInput) SizeOutput
+
+ // Marshal formats a message in the wire-format encoding to the provided buffer.
+ // Size should be provided if a custom Marshal is provided.
+ // It must not return an error for a partial message.
+ Marshal func(MarshalInput) (MarshalOutput, error)
+
+ // Unmarshal parses the wire-format encoding and merges the result into a message.
+ // It must not reset the target message or return an error for a partial message.
+ Unmarshal func(UnmarshalInput) (UnmarshalOutput, error)
+
+ // Merge merges the contents of a source message into a destination message.
+ Merge func(MergeInput) MergeOutput
+
+ // CheckInitialized returns an error if any required fields in the message are not set.
+ CheckInitialized func(CheckInitializedInput) (CheckInitializedOutput, error)
+}
+
+// SupportFlags indicate support for optional features.
+type SupportFlags = uint64
+
+const (
+ // SupportMarshalDeterministic reports whether MarshalOptions.Deterministic is supported.
+ SupportMarshalDeterministic SupportFlags = 1 << iota
+
+ // SupportUnmarshalDiscardUnknown reports whether UnmarshalOptions.DiscardUnknown is supported.
+ SupportUnmarshalDiscardUnknown
+)
+
+// SizeInput is input to the Size method.
+type SizeInput = struct {
+ pragma.NoUnkeyedLiterals
+
+ Message protoreflect.Message
+ Flags MarshalInputFlags
+}
+
+// SizeOutput is output from the Size method.
+type SizeOutput = struct {
+ pragma.NoUnkeyedLiterals
+
+ Size int
+}
+
+// MarshalInput is input to the Marshal method.
+type MarshalInput = struct {
+ pragma.NoUnkeyedLiterals
+
+ Message protoreflect.Message
+ Buf []byte // output is appended to this buffer
+ Flags MarshalInputFlags
+}
+
+// MarshalOutput is output from the Marshal method.
+type MarshalOutput = struct {
+ pragma.NoUnkeyedLiterals
+
+ Buf []byte // contains marshaled message
+}
+
+// MarshalInputFlags configure the marshaler.
+// Most flags correspond to fields in proto.MarshalOptions.
+type MarshalInputFlags = uint8
+
+const (
+ MarshalDeterministic MarshalInputFlags = 1 << iota
+ MarshalUseCachedSize
+)
+
+// UnmarshalInput is input to the Unmarshal method.
+type UnmarshalInput = struct {
+ pragma.NoUnkeyedLiterals
+
+ Message protoreflect.Message
+ Buf []byte // input buffer
+ Flags UnmarshalInputFlags
+ Resolver interface {
+ FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error)
+ FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error)
+ }
+}
+
+// UnmarshalOutput is output from the Unmarshal method.
+type UnmarshalOutput = struct {
+ pragma.NoUnkeyedLiterals
+
+ Flags UnmarshalOutputFlags
+}
+
+// UnmarshalInputFlags configure the unmarshaler.
+// Most flags correspond to fields in proto.UnmarshalOptions.
+type UnmarshalInputFlags = uint8
+
+const (
+ UnmarshalDiscardUnknown UnmarshalInputFlags = 1 << iota
+)
+
+// UnmarshalOutputFlags are output from the Unmarshal method.
+type UnmarshalOutputFlags = uint8
+
+const (
+ // UnmarshalInitialized may be set on return if all required fields are known to be set.
+ // If unset, then it does not necessarily indicate that the message is uninitialized,
+ // only that its status could not be confirmed.
+ UnmarshalInitialized UnmarshalOutputFlags = 1 << iota
+)
+
+// MergeInput is input to the Merge method.
+type MergeInput = struct {
+ pragma.NoUnkeyedLiterals
+
+ Source protoreflect.Message
+ Destination protoreflect.Message
+}
+
+// MergeOutput is output from the Merge method.
+type MergeOutput = struct {
+ pragma.NoUnkeyedLiterals
+
+ Flags MergeOutputFlags
+}
+
+// MergeOutputFlags are output from the Merge method.
+type MergeOutputFlags = uint8
+
+const (
+ // MergeComplete reports whether the merge was performed.
+ // If unset, the merger must have made no changes to the destination.
+ MergeComplete MergeOutputFlags = 1 << iota
+)
+
+// CheckInitializedInput is input to the CheckInitialized method.
+type CheckInitializedInput = struct {
+ pragma.NoUnkeyedLiterals
+
+ Message protoreflect.Message
+}
+
+// CheckInitializedOutput is output from the CheckInitialized method.
+type CheckInitializedOutput = struct {
+ pragma.NoUnkeyedLiterals
+}
diff --git a/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go b/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go
new file mode 100644
index 000000000..4a1ab7fb3
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go
@@ -0,0 +1,44 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package protoimpl contains the default implementation for messages
+// generated by protoc-gen-go.
+//
+// WARNING: This package should only ever be imported by generated messages.
+// The compatibility agreement covers nothing except for functionality needed
+// to keep existing generated messages operational. Breakages that occur due
+// to unauthorized usages of this package are not the author's responsibility.
+package protoimpl
+
+import (
+ "google.golang.org/protobuf/internal/filedesc"
+ "google.golang.org/protobuf/internal/filetype"
+ "google.golang.org/protobuf/internal/impl"
+)
+
+// UnsafeEnabled specifies whether package unsafe can be used.
+const UnsafeEnabled = impl.UnsafeEnabled
+
+type (
+ // Types used by generated code in init functions.
+ DescBuilder = filedesc.Builder
+ TypeBuilder = filetype.Builder
+
+ // Types used by generated code to implement EnumType, MessageType, and ExtensionType.
+ EnumInfo = impl.EnumInfo
+ MessageInfo = impl.MessageInfo
+ ExtensionInfo = impl.ExtensionInfo
+
+ // Types embedded in generated messages.
+ MessageState = impl.MessageState
+ SizeCache = impl.SizeCache
+ WeakFields = impl.WeakFields
+ UnknownFields = impl.UnknownFields
+ ExtensionFields = impl.ExtensionFields
+ ExtensionFieldV1 = impl.ExtensionField
+
+ Pointer = impl.Pointer
+)
+
+var X impl.Export
diff --git a/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go b/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go
new file mode 100644
index 000000000..ff094e1ba
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go
@@ -0,0 +1,56 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package protoimpl
+
+import (
+ "google.golang.org/protobuf/internal/version"
+)
+
+const (
+ // MaxVersion is the maximum supported version for generated .pb.go files.
+ // It is always the current version of the module.
+ MaxVersion = version.Minor
+
+ // GenVersion is the runtime version required by generated .pb.go files.
+ // This is incremented when generated code relies on new functionality
+ // in the runtime.
+ GenVersion = 20
+
+ // MinVersion is the minimum supported version for generated .pb.go files.
+ // This is incremented when the runtime drops support for old code.
+ MinVersion = 0
+)
+
+// EnforceVersion is used by code generated by protoc-gen-go
+// to statically enforce minimum and maximum versions of this package.
+// A compilation failure implies either that:
+// * the runtime package is too old and needs to be updated OR
+// * the generated code is too old and needs to be regenerated.
+//
+// The runtime package can be upgraded by running:
+// go get google.golang.org/protobuf
+//
+// The generated code can be regenerated by running:
+// protoc --go_out=${PROTOC_GEN_GO_ARGS} ${PROTO_FILES}
+//
+// Example usage by generated code:
+// const (
+// // Verify that this generated code is sufficiently up-to-date.
+// _ = protoimpl.EnforceVersion(genVersion - protoimpl.MinVersion)
+// // Verify that runtime/protoimpl is sufficiently up-to-date.
+// _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - genVersion)
+// )
+//
+// The genVersion is the current minor version used to generated the code.
+// This compile-time check relies on negative integer overflow of a uint
+// being a compilation failure (guaranteed by the Go specification).
+type EnforceVersion uint
+
+// This enforces the following invariant:
+// MinVersion ≤ GenVersion ≤ MaxVersion
+const (
+ _ = EnforceVersion(GenVersion - MinVersion)
+ _ = EnforceVersion(MaxVersion - GenVersion)
+)
diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
new file mode 100644
index 000000000..5f9498e4e
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
@@ -0,0 +1,287 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc. All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/any.proto
+
+package anypb
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+// `Any` contains an arbitrary serialized protocol buffer message along with a
+// URL that describes the type of the serialized message.
+//
+// Protobuf library provides support to pack/unpack Any values in the form
+// of utility functions or additional generated methods of the Any type.
+//
+// Example 1: Pack and unpack a message in C++.
+//
+// Foo foo = ...;
+// Any any;
+// any.PackFrom(foo);
+// ...
+// if (any.UnpackTo(&foo)) {
+// ...
+// }
+//
+// Example 2: Pack and unpack a message in Java.
+//
+// Foo foo = ...;
+// Any any = Any.pack(foo);
+// ...
+// if (any.is(Foo.class)) {
+// foo = any.unpack(Foo.class);
+// }
+//
+// Example 3: Pack and unpack a message in Python.
+//
+// foo = Foo(...)
+// any = Any()
+// any.Pack(foo)
+// ...
+// if any.Is(Foo.DESCRIPTOR):
+// any.Unpack(foo)
+// ...
+//
+// Example 4: Pack and unpack a message in Go
+//
+// foo := &pb.Foo{...}
+// any, err := ptypes.MarshalAny(foo)
+// ...
+// foo := &pb.Foo{}
+// if err := ptypes.UnmarshalAny(any, foo); err != nil {
+// ...
+// }
+//
+// The pack methods provided by protobuf library will by default use
+// 'type.googleapis.com/full.type.name' as the type URL and the unpack
+// methods only use the fully qualified type name after the last '/'
+// in the type URL, for example "foo.bar.com/x/y.z" will yield type
+// name "y.z".
+//
+//
+// JSON
+// ====
+// The JSON representation of an `Any` value uses the regular
+// representation of the deserialized, embedded message, with an
+// additional field `@type` which contains the type URL. Example:
+//
+// package google.profile;
+// message Person {
+// string first_name = 1;
+// string last_name = 2;
+// }
+//
+// {
+// "@type": "type.googleapis.com/google.profile.Person",
+// "firstName": <string>,
+// "lastName": <string>
+// }
+//
+// If the embedded message type is well-known and has a custom JSON
+// representation, that representation will be embedded adding a field
+// `value` which holds the custom JSON in addition to the `@type`
+// field. Example (for message [google.protobuf.Duration][]):
+//
+// {
+// "@type": "type.googleapis.com/google.protobuf.Duration",
+// "value": "1.212s"
+// }
+//
+type Any struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // A URL/resource name that uniquely identifies the type of the serialized
+ // protocol buffer message. This string must contain at least
+ // one "/" character. The last segment of the URL's path must represent
+ // the fully qualified name of the type (as in
+ // `path/google.protobuf.Duration`). The name should be in a canonical form
+ // (e.g., leading "." is not accepted).
+ //
+ // In practice, teams usually precompile into the binary all types that they
+ // expect it to use in the context of Any. However, for URLs which use the
+ // scheme `http`, `https`, or no scheme, one can optionally set up a type
+ // server that maps type URLs to message definitions as follows:
+ //
+ // * If no scheme is provided, `https` is assumed.
+ // * An HTTP GET on the URL must yield a [google.protobuf.Type][]
+ // value in binary format, or produce an error.
+ // * Applications are allowed to cache lookup results based on the
+ // URL, or have them precompiled into a binary to avoid any
+ // lookup. Therefore, binary compatibility needs to be preserved
+ // on changes to types. (Use versioned type names to manage
+ // breaking changes.)
+ //
+ // Note: this functionality is not currently available in the official
+ // protobuf release, and it is not used for type URLs beginning with
+ // type.googleapis.com.
+ //
+ // Schemes other than `http`, `https` (or the empty scheme) might be
+ // used with implementation specific semantics.
+ //
+ TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"`
+ // Must be a valid serialized protocol buffer of the above specified type.
+ Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *Any) Reset() {
+ *x = Any{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_protobuf_any_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Any) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Any) ProtoMessage() {}
+
+func (x *Any) ProtoReflect() protoreflect.Message {
+ mi := &file_google_protobuf_any_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Any.ProtoReflect.Descriptor instead.
+func (*Any) Descriptor() ([]byte, []int) {
+ return file_google_protobuf_any_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Any) GetTypeUrl() string {
+ if x != nil {
+ return x.TypeUrl
+ }
+ return ""
+}
+
+func (x *Any) GetValue() []byte {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+
+var File_google_protobuf_any_proto protoreflect.FileDescriptor
+
+var file_google_protobuf_any_proto_rawDesc = []byte{
+ 0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x36, 0x0a, 0x03,
+ 0x41, 0x6e, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x14,
+ 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x42, 0x6f, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x08, 0x41, 0x6e, 0x79,
+ 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x25, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e,
+ 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0xa2, 0x02,
+ 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e,
+ 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_protobuf_any_proto_rawDescOnce sync.Once
+ file_google_protobuf_any_proto_rawDescData = file_google_protobuf_any_proto_rawDesc
+)
+
+func file_google_protobuf_any_proto_rawDescGZIP() []byte {
+ file_google_protobuf_any_proto_rawDescOnce.Do(func() {
+ file_google_protobuf_any_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_any_proto_rawDescData)
+ })
+ return file_google_protobuf_any_proto_rawDescData
+}
+
+var file_google_protobuf_any_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_google_protobuf_any_proto_goTypes = []interface{}{
+ (*Any)(nil), // 0: google.protobuf.Any
+}
+var file_google_protobuf_any_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_google_protobuf_any_proto_init() }
+func file_google_protobuf_any_proto_init() {
+ if File_google_protobuf_any_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_google_protobuf_any_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Any); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_protobuf_any_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_google_protobuf_any_proto_goTypes,
+ DependencyIndexes: file_google_protobuf_any_proto_depIdxs,
+ MessageInfos: file_google_protobuf_any_proto_msgTypes,
+ }.Build()
+ File_google_protobuf_any_proto = out.File
+ file_google_protobuf_any_proto_rawDesc = nil
+ file_google_protobuf_any_proto_goTypes = nil
+ file_google_protobuf_any_proto_depIdxs = nil
+}
diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
new file mode 100644
index 000000000..3997c604f
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
@@ -0,0 +1,249 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc. All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/duration.proto
+
+package durationpb
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+// A Duration represents a signed, fixed-length span of time represented
+// as a count of seconds and fractions of seconds at nanosecond
+// resolution. It is independent of any calendar and concepts like "day"
+// or "month". It is related to Timestamp in that the difference between
+// two Timestamp values is a Duration and it can be added or subtracted
+// from a Timestamp. Range is approximately +-10,000 years.
+//
+// # Examples
+//
+// Example 1: Compute Duration from two Timestamps in pseudo code.
+//
+// Timestamp start = ...;
+// Timestamp end = ...;
+// Duration duration = ...;
+//
+// duration.seconds = end.seconds - start.seconds;
+// duration.nanos = end.nanos - start.nanos;
+//
+// if (duration.seconds < 0 && duration.nanos > 0) {
+// duration.seconds += 1;
+// duration.nanos -= 1000000000;
+// } else if (duration.seconds > 0 && duration.nanos < 0) {
+// duration.seconds -= 1;
+// duration.nanos += 1000000000;
+// }
+//
+// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
+//
+// Timestamp start = ...;
+// Duration duration = ...;
+// Timestamp end = ...;
+//
+// end.seconds = start.seconds + duration.seconds;
+// end.nanos = start.nanos + duration.nanos;
+//
+// if (end.nanos < 0) {
+// end.seconds -= 1;
+// end.nanos += 1000000000;
+// } else if (end.nanos >= 1000000000) {
+// end.seconds += 1;
+// end.nanos -= 1000000000;
+// }
+//
+// Example 3: Compute Duration from datetime.timedelta in Python.
+//
+// td = datetime.timedelta(days=3, minutes=10)
+// duration = Duration()
+// duration.FromTimedelta(td)
+//
+// # JSON Mapping
+//
+// In JSON format, the Duration type is encoded as a string rather than an
+// object, where the string ends in the suffix "s" (indicating seconds) and
+// is preceded by the number of seconds, with nanoseconds expressed as
+// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
+// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
+// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
+// microsecond should be expressed in JSON format as "3.000001s".
+//
+//
+type Duration struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Signed seconds of the span of time. Must be from -315,576,000,000
+ // to +315,576,000,000 inclusive. Note: these bounds are computed from:
+ // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
+ Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
+ // Signed fractions of a second at nanosecond resolution of the span
+ // of time. Durations less than one second are represented with a 0
+ // `seconds` field and a positive or negative `nanos` field. For durations
+ // of one second or more, a non-zero value for the `nanos` field must be
+ // of the same sign as the `seconds` field. Must be from -999,999,999
+ // to +999,999,999 inclusive.
+ Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
+}
+
+func (x *Duration) Reset() {
+ *x = Duration{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_protobuf_duration_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Duration) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Duration) ProtoMessage() {}
+
+func (x *Duration) ProtoReflect() protoreflect.Message {
+ mi := &file_google_protobuf_duration_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Duration.ProtoReflect.Descriptor instead.
+func (*Duration) Descriptor() ([]byte, []int) {
+ return file_google_protobuf_duration_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Duration) GetSeconds() int64 {
+ if x != nil {
+ return x.Seconds
+ }
+ return 0
+}
+
+func (x *Duration) GetNanos() int32 {
+ if x != nil {
+ return x.Nanos
+ }
+ return 0
+}
+
+var File_google_protobuf_duration_proto protoreflect.FileDescriptor
+
+var file_google_protobuf_duration_proto_rawDesc = []byte{
+ 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x22, 0x3a, 0x0a, 0x08, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a,
+ 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07,
+ 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, 0x7c, 0x0a,
+ 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72,
+ 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f,
+ 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c,
+ 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_protobuf_duration_proto_rawDescOnce sync.Once
+ file_google_protobuf_duration_proto_rawDescData = file_google_protobuf_duration_proto_rawDesc
+)
+
+func file_google_protobuf_duration_proto_rawDescGZIP() []byte {
+ file_google_protobuf_duration_proto_rawDescOnce.Do(func() {
+ file_google_protobuf_duration_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_duration_proto_rawDescData)
+ })
+ return file_google_protobuf_duration_proto_rawDescData
+}
+
+var file_google_protobuf_duration_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_google_protobuf_duration_proto_goTypes = []interface{}{
+ (*Duration)(nil), // 0: google.protobuf.Duration
+}
+var file_google_protobuf_duration_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_google_protobuf_duration_proto_init() }
+func file_google_protobuf_duration_proto_init() {
+ if File_google_protobuf_duration_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_google_protobuf_duration_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Duration); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_protobuf_duration_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_google_protobuf_duration_proto_goTypes,
+ DependencyIndexes: file_google_protobuf_duration_proto_depIdxs,
+ MessageInfos: file_google_protobuf_duration_proto_msgTypes,
+ }.Build()
+ File_google_protobuf_duration_proto = out.File
+ file_google_protobuf_duration_proto_rawDesc = nil
+ file_google_protobuf_duration_proto_goTypes = nil
+ file_google_protobuf_duration_proto_depIdxs = nil
+}
diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
new file mode 100644
index 000000000..6fe6d42f1
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
@@ -0,0 +1,271 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc. All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/timestamp.proto
+
+package timestamppb
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+// A Timestamp represents a point in time independent of any time zone or local
+// calendar, encoded as a count of seconds and fractions of seconds at
+// nanosecond resolution. The count is relative to an epoch at UTC midnight on
+// January 1, 1970, in the proleptic Gregorian calendar which extends the
+// Gregorian calendar backwards to year one.
+//
+// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap
+// second table is needed for interpretation, using a [24-hour linear
+// smear](https://developers.google.com/time/smear).
+//
+// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By
+// restricting to that range, we ensure that we can convert to and from [RFC
+// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings.
+//
+// # Examples
+//
+// Example 1: Compute Timestamp from POSIX `time()`.
+//
+// Timestamp timestamp;
+// timestamp.set_seconds(time(NULL));
+// timestamp.set_nanos(0);
+//
+// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
+//
+// struct timeval tv;
+// gettimeofday(&tv, NULL);
+//
+// Timestamp timestamp;
+// timestamp.set_seconds(tv.tv_sec);
+// timestamp.set_nanos(tv.tv_usec * 1000);
+//
+// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
+//
+// FILETIME ft;
+// GetSystemTimeAsFileTime(&ft);
+// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
+//
+// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
+// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
+// Timestamp timestamp;
+// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
+// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
+//
+// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
+//
+// long millis = System.currentTimeMillis();
+//
+// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
+// .setNanos((int) ((millis % 1000) * 1000000)).build();
+//
+//
+// Example 5: Compute Timestamp from current time in Python.
+//
+// timestamp = Timestamp()
+// timestamp.GetCurrentTime()
+//
+// # JSON Mapping
+//
+// In JSON format, the Timestamp type is encoded as a string in the
+// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
+// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
+// where {year} is always expressed using four digits while {month}, {day},
+// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
+// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
+// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
+// is required. A proto3 JSON serializer should always use UTC (as indicated by
+// "Z") when printing the Timestamp type and a proto3 JSON parser should be
+// able to accept both UTC and other timezones (as indicated by an offset).
+//
+// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
+// 01:30 UTC on January 15, 2017.
+//
+// In JavaScript, one can convert a Date object to this format using the
+// standard
+// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString)
+// method. In Python, a standard `datetime.datetime` object can be converted
+// to this format using
+// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with
+// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use
+// the Joda Time's [`ISODateTimeFormat.dateTime()`](
+// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D
+// ) to obtain a formatter capable of generating timestamps in this format.
+//
+//
+type Timestamp struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Represents seconds of UTC time since Unix epoch
+ // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
+ // 9999-12-31T23:59:59Z inclusive.
+ Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
+ // Non-negative fractions of a second at nanosecond resolution. Negative
+ // second values with fractions must still have non-negative nanos values
+ // that count forward in time. Must be from 0 to 999,999,999
+ // inclusive.
+ Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
+}
+
+func (x *Timestamp) Reset() {
+ *x = Timestamp{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_protobuf_timestamp_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Timestamp) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Timestamp) ProtoMessage() {}
+
+func (x *Timestamp) ProtoReflect() protoreflect.Message {
+ mi := &file_google_protobuf_timestamp_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Timestamp.ProtoReflect.Descriptor instead.
+func (*Timestamp) Descriptor() ([]byte, []int) {
+ return file_google_protobuf_timestamp_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Timestamp) GetSeconds() int64 {
+ if x != nil {
+ return x.Seconds
+ }
+ return 0
+}
+
+func (x *Timestamp) GetNanos() int32 {
+ if x != nil {
+ return x.Nanos
+ }
+ return 0
+}
+
+var File_google_protobuf_timestamp_proto protoreflect.FileDescriptor
+
+var file_google_protobuf_timestamp_proto_rawDesc = []byte{
+ 0x0a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x22, 0x3b, 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12,
+ 0x18, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03,
+ 0x52, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e,
+ 0x6f, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42,
+ 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
+ 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
+ 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65,
+ 0x73, 0x74, 0x61, 0x6d, 0x70, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02,
+ 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62,
+ 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_protobuf_timestamp_proto_rawDescOnce sync.Once
+ file_google_protobuf_timestamp_proto_rawDescData = file_google_protobuf_timestamp_proto_rawDesc
+)
+
+func file_google_protobuf_timestamp_proto_rawDescGZIP() []byte {
+ file_google_protobuf_timestamp_proto_rawDescOnce.Do(func() {
+ file_google_protobuf_timestamp_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_timestamp_proto_rawDescData)
+ })
+ return file_google_protobuf_timestamp_proto_rawDescData
+}
+
+var file_google_protobuf_timestamp_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_google_protobuf_timestamp_proto_goTypes = []interface{}{
+ (*Timestamp)(nil), // 0: google.protobuf.Timestamp
+}
+var file_google_protobuf_timestamp_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_google_protobuf_timestamp_proto_init() }
+func file_google_protobuf_timestamp_proto_init() {
+ if File_google_protobuf_timestamp_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Timestamp); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_protobuf_timestamp_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_google_protobuf_timestamp_proto_goTypes,
+ DependencyIndexes: file_google_protobuf_timestamp_proto_depIdxs,
+ MessageInfos: file_google_protobuf_timestamp_proto_msgTypes,
+ }.Build()
+ File_google_protobuf_timestamp_proto = out.File
+ file_google_protobuf_timestamp_proto_rawDesc = nil
+ file_google_protobuf_timestamp_proto_goTypes = nil
+ file_google_protobuf_timestamp_proto_depIdxs = nil
+}
diff --git a/vendor/gopkg.in/yaml.v2/apic.go b/vendor/gopkg.in/yaml.v2/apic.go
index 1f7e87e67..d2c2308f1 100644
--- a/vendor/gopkg.in/yaml.v2/apic.go
+++ b/vendor/gopkg.in/yaml.v2/apic.go
@@ -86,6 +86,7 @@ func yaml_emitter_initialize(emitter *yaml_emitter_t) {
raw_buffer: make([]byte, 0, output_raw_buffer_size),
states: make([]yaml_emitter_state_t, 0, initial_stack_size),
events: make([]yaml_event_t, 0, initial_queue_size),
+ best_width: -1,
}
}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/json/json.go b/vendor/k8s.io/apimachinery/pkg/util/json/json.go
index 0e2e30175..204834883 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/json/json.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/json/json.go
@@ -66,11 +66,36 @@ func Unmarshal(data []byte, v interface{}) error {
// If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64
return convertSliceNumbers(*v, 0)
+ case *interface{}:
+ // Build a decoder from the given data
+ decoder := json.NewDecoder(bytes.NewBuffer(data))
+ // Preserve numbers, rather than casting to float64 automatically
+ decoder.UseNumber()
+ // Run the decode
+ if err := decoder.Decode(v); err != nil {
+ return err
+ }
+ // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64
+ return convertInterfaceNumbers(v, 0)
+
default:
return json.Unmarshal(data, v)
}
}
+func convertInterfaceNumbers(v *interface{}, depth int) error {
+ var err error
+ switch v2 := (*v).(type) {
+ case json.Number:
+ *v, err = convertNumber(v2)
+ case map[string]interface{}:
+ err = convertMapNumbers(v2, depth+1)
+ case []interface{}:
+ err = convertSliceNumbers(v2, depth+1)
+ }
+ return err
+}
+
// convertMapNumbers traverses the map, converting any json.Number values to int64 or float64.
// values which are map[string]interface{} or []interface{} are recursively visited
func convertMapNumbers(m map[string]interface{}, depth int) error {
diff --git a/vendor/modules.txt b/vendor/modules.txt
index fac8238c6..fb05b08d3 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -8,7 +8,8 @@ github.com/Microsoft/go-winio
github.com/Microsoft/go-winio/archive/tar
github.com/Microsoft/go-winio/backuptar
github.com/Microsoft/go-winio/pkg/guid
-# github.com/Microsoft/hcsshim v0.8.7
+github.com/Microsoft/go-winio/vhd
+# github.com/Microsoft/hcsshim v0.8.9
github.com/Microsoft/hcsshim
github.com/Microsoft/hcsshim/internal/cow
github.com/Microsoft/hcsshim/internal/hcs
@@ -33,7 +34,7 @@ github.com/VividCortex/ewma
github.com/acarl005/stripansi
# github.com/beorn7/perks v1.0.1
github.com/beorn7/perks/quantile
-# github.com/blang/semver v3.1.0+incompatible
+# github.com/blang/semver v3.5.1+incompatible
github.com/blang/semver
# github.com/buger/goterm v0.0.0-20181115115552-c206103e1f37
github.com/buger/goterm
@@ -42,7 +43,7 @@ github.com/checkpoint-restore/go-criu
github.com/checkpoint-restore/go-criu/rpc
# github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f
github.com/containerd/cgroups/stats/v1
-# github.com/containerd/containerd v1.3.0
+# github.com/containerd/containerd v1.3.2
github.com/containerd/containerd/errdefs
github.com/containerd/containerd/log
github.com/containerd/containerd/platforms
@@ -58,10 +59,11 @@ github.com/containernetworking/cni/pkg/types/020
github.com/containernetworking/cni/pkg/types/current
github.com/containernetworking/cni/pkg/utils
github.com/containernetworking/cni/pkg/version
-# github.com/containernetworking/plugins v0.8.5
+# github.com/containernetworking/plugins v0.8.6
github.com/containernetworking/plugins/pkg/ip
github.com/containernetworking/plugins/pkg/ns
github.com/containernetworking/plugins/pkg/utils/hwaddr
+github.com/containernetworking/plugins/pkg/utils/sysctl
github.com/containernetworking/plugins/plugins/ipam/host-local/backend
github.com/containernetworking/plugins/plugins/ipam/host-local/backend/allocator
# github.com/containers/buildah v1.14.9-0.20200501175434-42a48f9373d9
@@ -82,16 +84,16 @@ github.com/containers/buildah/pkg/secrets
github.com/containers/buildah/pkg/supplemented
github.com/containers/buildah/pkg/umask
github.com/containers/buildah/util
-# github.com/containers/common v0.11.1
+# github.com/containers/common v0.11.2
github.com/containers/common/pkg/apparmor
github.com/containers/common/pkg/auth
github.com/containers/common/pkg/capabilities
github.com/containers/common/pkg/cgroupv2
github.com/containers/common/pkg/config
github.com/containers/common/pkg/sysinfo
-# github.com/containers/conmon v2.0.14+incompatible
+# github.com/containers/conmon v2.0.16+incompatible
github.com/containers/conmon/runner/config
-# github.com/containers/image/v5 v5.4.3
+# github.com/containers/image/v5 v5.4.4
github.com/containers/image/v5/copy
github.com/containers/image/v5/directory
github.com/containers/image/v5/directory/explicitfilepath
@@ -106,6 +108,7 @@ github.com/containers/image/v5/internal/iolimits
github.com/containers/image/v5/internal/pkg/keyctl
github.com/containers/image/v5/internal/pkg/platform
github.com/containers/image/v5/internal/tmpdir
+github.com/containers/image/v5/internal/uploadreader
github.com/containers/image/v5/manifest
github.com/containers/image/v5/oci/archive
github.com/containers/image/v5/oci/internal
@@ -151,7 +154,7 @@ github.com/containers/psgo/internal/dev
github.com/containers/psgo/internal/host
github.com/containers/psgo/internal/proc
github.com/containers/psgo/internal/process
-# github.com/containers/storage v1.19.1
+# github.com/containers/storage v1.20.1
github.com/containers/storage
github.com/containers/storage/drivers
github.com/containers/storage/drivers/aufs
@@ -200,7 +203,7 @@ github.com/coreos/go-systemd/v22/dbus
github.com/coreos/go-systemd/v22/internal/dlopen
github.com/coreos/go-systemd/v22/journal
github.com/coreos/go-systemd/v22/sdjournal
-# github.com/cri-o/ocicni v0.1.1-0.20190920040751-deac903fd99b
+# github.com/cri-o/ocicni v0.2.0
github.com/cri-o/ocicni/pkg/ocicni
# github.com/cyphar/filepath-securejoin v0.2.2
github.com/cyphar/filepath-securejoin
@@ -285,7 +288,7 @@ github.com/gogo/protobuf/gogoproto
github.com/gogo/protobuf/proto
github.com/gogo/protobuf/protoc-gen-gogo/descriptor
github.com/gogo/protobuf/sortkeys
-# github.com/golang/protobuf v1.3.2
+# github.com/golang/protobuf v1.4.2
github.com/golang/protobuf/proto
github.com/golang/protobuf/ptypes
github.com/golang/protobuf/ptypes/any
@@ -328,7 +331,7 @@ github.com/klauspost/compress/huff0
github.com/klauspost/compress/snappy
github.com/klauspost/compress/zstd
github.com/klauspost/compress/zstd/internal/xxhash
-# github.com/klauspost/pgzip v1.2.3
+# github.com/klauspost/pgzip v1.2.4
github.com/klauspost/pgzip
# github.com/konsorten/go-windows-terminal-sequences v1.0.3
github.com/konsorten/go-windows-terminal-sequences
@@ -348,7 +351,13 @@ github.com/morikuni/aec
github.com/mrunalp/fileutils
# github.com/mtrmac/gpgme v0.1.2
github.com/mtrmac/gpgme
-# github.com/onsi/ginkgo v1.12.0
+# github.com/nxadm/tail v1.4.4
+github.com/nxadm/tail
+github.com/nxadm/tail/ratelimiter
+github.com/nxadm/tail/util
+github.com/nxadm/tail/watch
+github.com/nxadm/tail/winfile
+# github.com/onsi/ginkgo v1.12.2
github.com/onsi/ginkgo
github.com/onsi/ginkgo/config
github.com/onsi/ginkgo/extensions/table
@@ -375,7 +384,7 @@ github.com/onsi/ginkgo/reporters/stenographer
github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable
github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty
github.com/onsi/ginkgo/types
-# github.com/onsi/gomega v1.10.0
+# github.com/onsi/gomega v1.10.1
github.com/onsi/gomega
github.com/onsi/gomega/format
github.com/onsi/gomega/gbytes
@@ -390,7 +399,7 @@ github.com/onsi/gomega/matchers/support/goraph/edge
github.com/onsi/gomega/matchers/support/goraph/node
github.com/onsi/gomega/matchers/support/goraph/util
github.com/onsi/gomega/types
-# github.com/opencontainers/go-digest v1.0.0-rc1
+# github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/go-digest
# github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6
github.com/opencontainers/image-spec/specs-go
@@ -403,7 +412,7 @@ github.com/opencontainers/runc/libcontainer/devices
github.com/opencontainers/runc/libcontainer/system
github.com/opencontainers/runc/libcontainer/user
github.com/opencontainers/runc/libcontainer/utils
-# github.com/opencontainers/runtime-spec v0.1.2-0.20190618234442-a950415649c7
+# github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2
github.com/opencontainers/runtime-spec/specs-go
# github.com/opencontainers/runtime-tools v0.9.0
github.com/opencontainers/runtime-tools/error
@@ -468,7 +477,7 @@ github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/udp/udpproxy
github.com/rootless-containers/rootlesskit/pkg/port/portutil
# github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8
github.com/safchain/ethtool
-# github.com/seccomp/containers-golang v0.0.0-20190312124753-8ca8945ccf5f
+# github.com/seccomp/containers-golang v0.4.1
github.com/seccomp/containers-golang
# github.com/seccomp/libseccomp-golang v0.9.1
github.com/seccomp/libseccomp-golang
@@ -520,7 +529,7 @@ github.com/varlink/go/varlink/idl
github.com/vbatts/tar-split/archive/tar
github.com/vbatts/tar-split/tar/asm
github.com/vbatts/tar-split/tar/storage
-# github.com/vbauerster/mpb/v5 v5.0.3
+# github.com/vbauerster/mpb/v5 v5.0.4
github.com/vbauerster/mpb/v5
github.com/vbauerster/mpb/v5/cwriter
github.com/vbauerster/mpb/v5/decor
@@ -546,7 +555,7 @@ go.opencensus.io/trace/internal
go.opencensus.io/trace/tracestate
# go.uber.org/atomic v1.4.0
go.uber.org/atomic
-# golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59
+# golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5
golang.org/x/crypto/blowfish
golang.org/x/crypto/cast5
golang.org/x/crypto/chacha20
@@ -565,7 +574,7 @@ golang.org/x/crypto/poly1305
golang.org/x/crypto/ssh
golang.org/x/crypto/ssh/internal/bcrypt_pbkdf
golang.org/x/crypto/ssh/terminal
-# golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e
+# golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7
golang.org/x/net/context
golang.org/x/net/context/ctxhttp
golang.org/x/net/html
@@ -583,8 +592,9 @@ golang.org/x/oauth2/internal
# golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a
golang.org/x/sync/errgroup
golang.org/x/sync/semaphore
-# golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775
+# golang.org/x/sys v0.0.0-20200519105757-fe76b779f299
golang.org/x/sys/cpu
+golang.org/x/sys/internal/unsafeheader
golang.org/x/sys/unix
golang.org/x/sys/windows
# golang.org/x/text v0.3.2
@@ -629,6 +639,37 @@ google.golang.org/grpc/connectivity
google.golang.org/grpc/grpclog
google.golang.org/grpc/internal
google.golang.org/grpc/status
+# google.golang.org/protobuf v1.23.0
+google.golang.org/protobuf/encoding/prototext
+google.golang.org/protobuf/encoding/protowire
+google.golang.org/protobuf/internal/descfmt
+google.golang.org/protobuf/internal/descopts
+google.golang.org/protobuf/internal/detrand
+google.golang.org/protobuf/internal/encoding/defval
+google.golang.org/protobuf/internal/encoding/messageset
+google.golang.org/protobuf/internal/encoding/tag
+google.golang.org/protobuf/internal/encoding/text
+google.golang.org/protobuf/internal/errors
+google.golang.org/protobuf/internal/fieldnum
+google.golang.org/protobuf/internal/fieldsort
+google.golang.org/protobuf/internal/filedesc
+google.golang.org/protobuf/internal/filetype
+google.golang.org/protobuf/internal/flags
+google.golang.org/protobuf/internal/genname
+google.golang.org/protobuf/internal/impl
+google.golang.org/protobuf/internal/mapsort
+google.golang.org/protobuf/internal/pragma
+google.golang.org/protobuf/internal/set
+google.golang.org/protobuf/internal/strs
+google.golang.org/protobuf/internal/version
+google.golang.org/protobuf/proto
+google.golang.org/protobuf/reflect/protoreflect
+google.golang.org/protobuf/reflect/protoregistry
+google.golang.org/protobuf/runtime/protoiface
+google.golang.org/protobuf/runtime/protoimpl
+google.golang.org/protobuf/types/known/anypb
+google.golang.org/protobuf/types/known/durationpb
+google.golang.org/protobuf/types/known/timestamppb
# gopkg.in/fsnotify.v1 v1.4.7
gopkg.in/fsnotify.v1
# gopkg.in/inf.v0 v0.9.1
@@ -639,11 +680,11 @@ gopkg.in/square/go-jose.v2/cipher
gopkg.in/square/go-jose.v2/json
# gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7
gopkg.in/tomb.v1
-# gopkg.in/yaml.v2 v2.2.8
+# gopkg.in/yaml.v2 v2.3.0
gopkg.in/yaml.v2
-# k8s.io/api v0.18.2
+# k8s.io/api v0.18.3
k8s.io/api/core/v1
-# k8s.io/apimachinery v0.18.2
+# k8s.io/apimachinery v0.18.3
k8s.io/apimachinery/pkg/api/errors
k8s.io/apimachinery/pkg/api/resource
k8s.io/apimachinery/pkg/apis/meta/v1
diff --git a/version/version.go b/version/version.go
index fe602d8e1..4c7202e77 100644
--- a/version/version.go
+++ b/version/version.go
@@ -6,7 +6,7 @@ package version
// bumped.
const Version = "2.0.0-dev"
-// RemoteAPIVersion is the version for the remote
+// APIVersion is the version for the remote
// client API. It is used to determine compatibility
// between a remote podman client and its backend
-const RemoteAPIVersion = 1
+const APIVersion = 1