summaryrefslogtreecommitdiff
path: root/vendor/github.com
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com')
-rw-r--r--vendor/github.com/containers/buildah/.cirrus.yml23
-rw-r--r--vendor/github.com/containers/buildah/.golangci.yml7
-rw-r--r--vendor/github.com/containers/buildah/SECURITY.md3
-rw-r--r--vendor/github.com/containers/buildah/buildah.go4
-rw-r--r--vendor/github.com/containers/buildah/commit.go35
-rw-r--r--vendor/github.com/containers/buildah/common.go6
-rw-r--r--vendor/github.com/containers/buildah/go.mod24
-rw-r--r--vendor/github.com/containers/buildah/go.sum150
-rw-r--r--vendor/github.com/containers/buildah/image.go6
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/build.go23
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/executor.go8
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/stage_executor.go12
-rw-r--r--vendor/github.com/containers/buildah/new.go13
-rw-r--r--vendor/github.com/containers/buildah/pkg/cli/common.go1
-rw-r--r--vendor/github.com/containers/buildah/pull.go7
-rw-r--r--vendor/github.com/containers/buildah/util/util.go8
-rw-r--r--vendor/github.com/containers/common/pkg/config/default.go9
-rw-r--r--vendor/github.com/containers/common/pkg/config/libpodConfig.go83
-rw-r--r--vendor/github.com/containers/storage/VERSION2
-rw-r--r--vendor/github.com/containers/storage/drivers/devmapper/deviceset.go6
-rw-r--r--vendor/github.com/containers/storage/drivers/devmapper/driver.go4
-rw-r--r--vendor/github.com/containers/storage/drivers/overlay/check.go2
-rw-r--r--vendor/github.com/containers/storage/drivers/overlay/overlay.go43
-rw-r--r--vendor/github.com/containers/storage/drivers/vfs/driver.go16
-rw-r--r--vendor/github.com/containers/storage/go.mod4
-rw-r--r--vendor/github.com/containers/storage/go.sum8
-rw-r--r--vendor/github.com/containers/storage/pkg/fileutils/fileutils.go28
-rw-r--r--vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go18
-rw-r--r--vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go2
-rw-r--r--vendor/github.com/containers/storage/store.go6
-rw-r--r--vendor/github.com/containers/storage/userns.go2
-rw-r--r--vendor/github.com/golang/protobuf/proto/buffer.go324
-rw-r--r--vendor/github.com/golang/protobuf/proto/clone.go253
-rw-r--r--vendor/github.com/golang/protobuf/proto/decode.go427
-rw-r--r--vendor/github.com/golang/protobuf/proto/defaults.go63
-rw-r--r--vendor/github.com/golang/protobuf/proto/deprecated.go126
-rw-r--r--vendor/github.com/golang/protobuf/proto/discard.go356
-rw-r--r--vendor/github.com/golang/protobuf/proto/encode.go203
-rw-r--r--vendor/github.com/golang/protobuf/proto/equal.go301
-rw-r--r--vendor/github.com/golang/protobuf/proto/extensions.go771
-rw-r--r--vendor/github.com/golang/protobuf/proto/lib.go965
-rw-r--r--vendor/github.com/golang/protobuf/proto/message_set.go181
-rw-r--r--vendor/github.com/golang/protobuf/proto/pointer_reflect.go360
-rw-r--r--vendor/github.com/golang/protobuf/proto/pointer_unsafe.go313
-rw-r--r--vendor/github.com/golang/protobuf/proto/properties.go648
-rw-r--r--vendor/github.com/golang/protobuf/proto/proto.go167
-rw-r--r--vendor/github.com/golang/protobuf/proto/registry.go323
-rw-r--r--vendor/github.com/golang/protobuf/proto/table_marshal.go2776
-rw-r--r--vendor/github.com/golang/protobuf/proto/table_merge.go654
-rw-r--r--vendor/github.com/golang/protobuf/proto/table_unmarshal.go2053
-rw-r--r--vendor/github.com/golang/protobuf/proto/text.go843
-rw-r--r--vendor/github.com/golang/protobuf/proto/text_decode.go801
-rw-r--r--vendor/github.com/golang/protobuf/proto/text_encode.go560
-rw-r--r--vendor/github.com/golang/protobuf/proto/text_parser.go880
-rw-r--r--vendor/github.com/golang/protobuf/proto/wire.go78
-rw-r--r--vendor/github.com/golang/protobuf/proto/wrappers.go34
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/any.go214
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/any/any.pb.go230
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/any/any.proto154
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/doc.go37
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/duration.go116
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go192
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/duration/duration.proto117
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/timestamp.go109
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go211
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto135
-rw-r--r--vendor/github.com/klauspost/pgzip/LICENSE3
-rw-r--r--vendor/github.com/klauspost/pgzip/gzip.go2
-rw-r--r--vendor/github.com/nxadm/tail/.gitignore2
-rw-r--r--vendor/github.com/nxadm/tail/.travis.yml16
-rw-r--r--vendor/github.com/nxadm/tail/CHANGES.md46
-rw-r--r--vendor/github.com/nxadm/tail/Dockerfile19
-rw-r--r--vendor/github.com/nxadm/tail/LICENSE21
-rw-r--r--vendor/github.com/nxadm/tail/README.md36
-rw-r--r--vendor/github.com/nxadm/tail/appveyor.yml11
-rw-r--r--vendor/github.com/nxadm/tail/go.mod9
-rw-r--r--vendor/github.com/nxadm/tail/go.sum6
-rw-r--r--vendor/github.com/nxadm/tail/ratelimiter/Licence7
-rw-r--r--vendor/github.com/nxadm/tail/ratelimiter/leakybucket.go97
-rw-r--r--vendor/github.com/nxadm/tail/ratelimiter/memory.go60
-rw-r--r--vendor/github.com/nxadm/tail/ratelimiter/storage.go6
-rw-r--r--vendor/github.com/nxadm/tail/tail.go440
-rw-r--r--vendor/github.com/nxadm/tail/tail_posix.go11
-rw-r--r--vendor/github.com/nxadm/tail/tail_windows.go12
-rw-r--r--vendor/github.com/nxadm/tail/util/util.go48
-rw-r--r--vendor/github.com/nxadm/tail/watch/filechanges.go36
-rw-r--r--vendor/github.com/nxadm/tail/watch/inotify.go135
-rw-r--r--vendor/github.com/nxadm/tail/watch/inotify_tracker.go248
-rw-r--r--vendor/github.com/nxadm/tail/watch/polling.go118
-rw-r--r--vendor/github.com/nxadm/tail/watch/watch.go20
-rw-r--r--vendor/github.com/nxadm/tail/winfile/winfile.go92
-rw-r--r--vendor/github.com/onsi/ginkgo/.travis.yml2
-rw-r--r--vendor/github.com/onsi/ginkgo/CHANGELOG.md26
-rw-r--r--vendor/github.com/onsi/ginkgo/README.md52
-rw-r--r--vendor/github.com/onsi/ginkgo/config/config.go2
-rw-r--r--vendor/github.com/onsi/ginkgo/ginkgo/build_command.go2
-rw-r--r--vendor/github.com/onsi/ginkgo/ginkgo/convert/package_rewriter.go8
-rw-r--r--vendor/github.com/onsi/ginkgo/ginkgo/convert/testfile_rewriter.go5
-rw-r--r--vendor/github.com/onsi/ginkgo/ginkgo/run_command.go15
-rw-r--r--vendor/github.com/onsi/ginkgo/ginkgo/testrunner/test_runner.go8
-rw-r--r--vendor/github.com/onsi/ginkgo/ginkgo/unfocus_command.go173
-rw-r--r--vendor/github.com/onsi/ginkgo/go.mod10
-rw-r--r--vendor/github.com/onsi/ginkgo/go.sum41
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go6
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go13
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go11
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_riscv64.go11
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_solaris.go9
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go12
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/suite/suite.go3
-rw-r--r--vendor/github.com/onsi/ginkgo/reporters/default_reporter.go6
-rw-r--r--vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go12
-rw-r--r--vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go12
-rw-r--r--vendor/github.com/onsi/gomega/CHANGELOG.md5
-rw-r--r--vendor/github.com/onsi/gomega/go.mod18
-rw-r--r--vendor/github.com/onsi/gomega/go.sum38
-rw-r--r--vendor/github.com/onsi/gomega/gomega_dsl.go2
-rw-r--r--vendor/github.com/opencontainers/go-digest/.mailmap3
-rw-r--r--vendor/github.com/opencontainers/go-digest/.pullapprove.yml38
-rw-r--r--vendor/github.com/opencontainers/go-digest/.travis.yml3
-rw-r--r--vendor/github.com/opencontainers/go-digest/LICENSE (renamed from vendor/github.com/opencontainers/go-digest/LICENSE.code)1
-rw-r--r--vendor/github.com/opencontainers/go-digest/MAINTAINERS12
-rw-r--r--vendor/github.com/opencontainers/go-digest/README.md70
-rw-r--r--vendor/github.com/opencontainers/go-digest/algorithm.go1
-rw-r--r--vendor/github.com/opencontainers/go-digest/digest.go1
-rw-r--r--vendor/github.com/opencontainers/go-digest/digester.go1
-rw-r--r--vendor/github.com/opencontainers/go-digest/doc.go10
-rw-r--r--vendor/github.com/opencontainers/go-digest/go.mod3
-rw-r--r--vendor/github.com/opencontainers/go-digest/verifiers.go1
-rw-r--r--vendor/github.com/opencontainers/runtime-spec/specs-go/config.go55
-rw-r--r--vendor/github.com/opencontainers/runtime-spec/specs-go/version.go2
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusteroperator.crd.yaml164
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion.crd.yaml328
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_operatorhub.crd.yaml101
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_proxy.crd.yaml98
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver.crd.yaml219
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd.yaml123
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_build.crd.yaml366
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_console.crd.yaml70
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns.crd.yaml100
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_featuregate.crd.yaml76
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_image.crd.yaml144
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure.crd.yaml221
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_ingress.crd.yaml55
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network.crd.yaml141
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_oauth.crd.yaml661
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_project.crd.yaml63
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler.crd.yaml88
-rw-r--r--vendor/github.com/openshift/api/config/v1/doc.go8
-rw-r--r--vendor/github.com/openshift/api/config/v1/register.go70
-rw-r--r--vendor/github.com/openshift/api/config/v1/stringsource.go31
-rw-r--r--vendor/github.com/openshift/api/config/v1/types.go312
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_apiserver.go118
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_authentication.go118
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_build.go109
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_cluster_operator.go184
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_cluster_version.go267
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_console.go62
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_dns.go87
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_feature.go194
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_image.go115
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_infrastructure.go241
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_ingress.go46
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_network.go122
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_oauth.go557
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_operatorhub.go78
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_project.go54
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_proxy.go90
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_scheduling.go74
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go260
-rw-r--r--vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go3365
-rw-r--r--vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go1292
-rw-r--r--vendor/github.com/seccomp/containers-golang/.gitignore4
-rw-r--r--vendor/github.com/seccomp/containers-golang/LICENSE (renamed from vendor/github.com/openshift/api/LICENSE)13
-rw-r--r--vendor/github.com/seccomp/containers-golang/Makefile22
-rw-r--r--vendor/github.com/seccomp/containers-golang/README.md12
-rw-r--r--vendor/github.com/seccomp/containers-golang/go.mod16
-rw-r--r--vendor/github.com/seccomp/containers-golang/go.sum48
-rw-r--r--vendor/github.com/seccomp/containers-golang/seccomp.json45
-rw-r--r--vendor/github.com/seccomp/containers-golang/seccomp_default_linux.go36
-rw-r--r--vendor/github.com/seccomp/containers-golang/seccomp_linux.go46
-rw-r--r--vendor/github.com/seccomp/containers-golang/seccomp_unsupported.go19
-rw-r--r--vendor/github.com/seccomp/containers-golang/types.go5
-rw-r--r--vendor/github.com/seccomp/containers-golang/vendor.conf9
184 files changed, 5756 insertions, 24133 deletions
diff --git a/vendor/github.com/containers/buildah/.cirrus.yml b/vendor/github.com/containers/buildah/.cirrus.yml
index 8453cdc38..cb16fa89c 100644
--- a/vendor/github.com/containers/buildah/.cirrus.yml
+++ b/vendor/github.com/containers/buildah/.cirrus.yml
@@ -27,11 +27,13 @@ env:
####
# GCE project where images live
IMAGE_PROJECT: "libpod-218412"
- # TODO: Setting up from base-images is very inefficient, use libpod's cache-images instead?
- FEDORA_CACHE_IMAGE_NAME: "fedora-cloud-base-30-1-2-1565360543"
- PRIOR_FEDORA_CACHE_IMAGE_NAME: "fedora-cloud-base-29-1-2-1565360543"
- UBUNTU_CACHE_IMAGE_NAME: "ubuntu-1904-disco-v20190724"
- PRIOR_UBUNTU_CACHE_IMAGE_NAME: "ubuntu-1804-bionic-v20190722a"
+ # See https://github.com/containers/libpod/blob/master/contrib/cirrus/README.md#test_build_cache_images_task-task
+ _BUILT_IMAGE_SUFFIX: "libpod-6224667180531712"
+ FEDORA_CACHE_IMAGE_NAME: "fedora-32-${_BUILT_IMAGE_SUFFIX}"
+ PRIOR_FEDORA_CACHE_IMAGE_NAME: "fedora-31-${_BUILT_IMAGE_SUFFIX}"
+ UBUNTU_CACHE_IMAGE_NAME: "ubuntu-19-${_BUILT_IMAGE_SUFFIX}"
+ PRIOR_UBUNTU_CACHE_IMAGE_NAME: "ubuntu-18-${_BUILT_IMAGE_SUFFIX}"
+
####
#### Command variables to help avoid duplication
@@ -153,9 +155,6 @@ gce_instance:
- 'cirrus-ci/only_prs/gate'
- 'cirrus-ci/only_prs/vendor'
- container:
- image: registry.fedoraproject.org/fedora:30
-
env:
matrix:
CROSS_TARGET: darwin
@@ -179,10 +178,8 @@ gce_instance:
gce_instance: # Only need to specify differences from defaults (above)
matrix: # Duplicate this task for each matrix product.
image_name: "${FEDORA_CACHE_IMAGE_NAME}"
- # TODO: Re-enable once prior image is F30 and above is F31
- # image_name: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
- # TODO: Re-enable when package repositories functional
- #image_name: "${UBUNTU_CACHE_IMAGE_NAME}"
+ image_name: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
+ image_name: "${UBUNTU_CACHE_IMAGE_NAME}"
image_name: "${PRIOR_UBUNTU_CACHE_IMAGE_NAME}"
# Separate scripts for separate outputs, makes debugging easier.
@@ -248,7 +245,7 @@ gce_instance:
CIRRUS_CLONE_DEPTH: 1 # no code is being used by this task
container:
- image: "registry.fedoraproject.org/fedora-minimal:latest"
+ image: "quay.io/libpod/fedora-minimal:latest"
cpu: 1
memory: 1
diff --git a/vendor/github.com/containers/buildah/.golangci.yml b/vendor/github.com/containers/buildah/.golangci.yml
index dde37ad79..888d89afa 100644
--- a/vendor/github.com/containers/buildah/.golangci.yml
+++ b/vendor/github.com/containers/buildah/.golangci.yml
@@ -4,8 +4,8 @@ run:
- apparmor
- seccomp
- selinux
- concurrency: 6
- deadline: 5m
+ # Don't exceed number of threads available when running under CI
+ concurrency: 4
linters:
disable-all: true
enable:
@@ -17,7 +17,8 @@ linters:
- gofmt
- goimports
- golint
- - gosimple
+ # Broken? Unpredictably dies w/o any error well before deadline/timeout expires
+ # - gosimple
- govet
- ineffassign
- interfacer
diff --git a/vendor/github.com/containers/buildah/SECURITY.md b/vendor/github.com/containers/buildah/SECURITY.md
new file mode 100644
index 000000000..0184bd22d
--- /dev/null
+++ b/vendor/github.com/containers/buildah/SECURITY.md
@@ -0,0 +1,3 @@
+## Security and Disclosure Information Policy for the Buildah Project
+
+The Buildah Project follows the [Security and Disclosure Information Policy](https://github.com/containers/common/blob/master/SECURITY.md) for the Containers Projects.
diff --git a/vendor/github.com/containers/buildah/buildah.go b/vendor/github.com/containers/buildah/buildah.go
index 2ece11acd..556506e4a 100644
--- a/vendor/github.com/containers/buildah/buildah.go
+++ b/vendor/github.com/containers/buildah/buildah.go
@@ -13,6 +13,7 @@ import (
"github.com/containers/buildah/docker"
"github.com/containers/image/v5/types"
+ encconfig "github.com/containers/ocicrypt/config"
"github.com/containers/storage"
"github.com/containers/storage/pkg/ioutils"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
@@ -413,6 +414,9 @@ type BuilderOptions struct {
MaxPullRetries int
// PullRetryDelay is how long to wait before retrying a pull attempt.
PullRetryDelay time.Duration
+ // OciDecryptConfig contains the config that can be used to decrypt an image if it is
+ // encrypted if non-nil. If nil, it does not attempt to decrypt an image.
+ OciDecryptConfig *encconfig.DecryptConfig
}
// ImportOptions are used to initialize a Builder from an existing container
diff --git a/vendor/github.com/containers/buildah/commit.go b/vendor/github.com/containers/buildah/commit.go
index d25ba110a..6c3febd5d 100644
--- a/vendor/github.com/containers/buildah/commit.go
+++ b/vendor/github.com/containers/buildah/commit.go
@@ -19,11 +19,11 @@ import (
is "github.com/containers/image/v5/storage"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
+ encconfig "github.com/containers/ocicrypt/config"
"github.com/containers/storage"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/stringid"
digest "github.com/opencontainers/go-digest"
- configv1 "github.com/openshift/api/config/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -88,6 +88,15 @@ type CommitOptions struct {
// RetryDelay is how long to wait before retrying a commit attempt to a
// registry.
RetryDelay time.Duration
+ // OciEncryptConfig when non-nil indicates that an image should be encrypted.
+ // The encryption options is derived from the construction of EncryptConfig object.
+ OciEncryptConfig *encconfig.EncryptConfig
+ // OciEncryptLayers represents the list of layers to encrypt.
+ // If nil, don't encrypt any layers.
+ // If non-nil and len==0, denotes encrypt all layers.
+ // integers in the slice represent 0-indexed layer indices, with support for negative
+ // indexing. i.e. 0 is the first layer, -1 is the last (top-most) layer.
+ OciEncryptLayers *[]int
}
// PushOptions can be used to alter how an image is copied somewhere.
@@ -132,6 +141,15 @@ type PushOptions struct {
MaxRetries int
// RetryDelay is how long to wait before retrying a push attempt.
RetryDelay time.Duration
+ // OciEncryptConfig when non-nil indicates that an image should be encrypted.
+ // The encryption options is derived from the construction of EncryptConfig object.
+ OciEncryptConfig *encconfig.EncryptConfig
+ // OciEncryptLayers represents the list of layers to encrypt.
+ // If nil, don't encrypt any layers.
+ // If non-nil and len==0, denotes encrypt all layers.
+ // integers in the slice represent 0-indexed layer indices, with support for negative
+ // indexing. i.e. 0 is the first layer, -1 is the last (top-most) layer.
+ OciEncryptLayers *[]int
}
var (
@@ -162,7 +180,12 @@ func checkRegistrySourcesAllows(forWhat string, dest types.ImageReference) error
}
if registrySources, ok := os.LookupEnv("BUILD_REGISTRY_SOURCES"); ok && len(registrySources) > 0 {
- var sources configv1.RegistrySources
+ // Use local struct instead of github.com/openshift/api/config/v1 RegistrySources
+ var sources struct {
+ InsecureRegistries []string `json:"insecureRegistries,omitempty"`
+ BlockedRegistries []string `json:"blockedRegistries,omitempty"`
+ AllowedRegistries []string `json:"allowedRegistries,omitempty"`
+ }
if err := json.Unmarshal([]byte(registrySources), &sources); err != nil {
return errors.Wrapf(err, "error parsing $BUILD_REGISTRY_SOURCES (%q) as JSON", registrySources)
}
@@ -270,7 +293,9 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
// Check if the base image is already in the destination and it's some kind of local
// storage. If so, we can skip recompressing any layers that come from the base image.
exportBaseLayers := true
- if transport, destIsStorage := dest.Transport().(is.StoreTransport); destIsStorage && b.FromImageID != "" {
+ if transport, destIsStorage := dest.Transport().(is.StoreTransport); destIsStorage && options.OciEncryptConfig != nil {
+ return imgID, nil, "", errors.New("unable to use local storage with image encryption")
+ } else if destIsStorage && b.FromImageID != "" {
if baseref, err := transport.ParseReference(b.FromImageID); baseref != nil && err == nil {
if img, err := transport.GetImage(baseref); img != nil && err == nil {
logrus.Debugf("base image %q is already present in local storage, no need to copy its layers", b.FromImageID)
@@ -319,7 +344,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
}
var manifestBytes []byte
- if manifestBytes, err = retryCopyImage(ctx, policyContext, maybeCachedDest, maybeCachedSrc, dest, "push", getCopyOptions(b.store, options.ReportWriter, nil, systemContext, "", false, options.SignBy), options.MaxRetries, options.RetryDelay); err != nil {
+ if manifestBytes, err = retryCopyImage(ctx, policyContext, maybeCachedDest, maybeCachedSrc, dest, "push", getCopyOptions(b.store, options.ReportWriter, nil, systemContext, "", false, options.SignBy, options.OciEncryptLayers, options.OciEncryptConfig, nil), options.MaxRetries, options.RetryDelay); err != nil {
return imgID, nil, "", errors.Wrapf(err, "error copying layers and metadata for container %q", b.ContainerID)
}
// If we've got more names to attach, and we know how to do that for
@@ -451,7 +476,7 @@ func Push(ctx context.Context, image string, dest types.ImageReference, options
systemContext.DirForceCompress = true
}
var manifestBytes []byte
- if manifestBytes, err = retryCopyImage(ctx, policyContext, dest, maybeCachedSrc, dest, "push", getCopyOptions(options.Store, options.ReportWriter, nil, systemContext, options.ManifestType, options.RemoveSignatures, options.SignBy), options.MaxRetries, options.RetryDelay); err != nil {
+ if manifestBytes, err = retryCopyImage(ctx, policyContext, dest, maybeCachedSrc, dest, "push", getCopyOptions(options.Store, options.ReportWriter, nil, systemContext, options.ManifestType, options.RemoveSignatures, options.SignBy, options.OciEncryptLayers, options.OciEncryptConfig, nil), options.MaxRetries, options.RetryDelay); err != nil {
return nil, "", errors.Wrapf(err, "error copying layers and metadata from %q to %q", transports.ImageName(maybeCachedSrc), transports.ImageName(dest))
}
if options.ReportWriter != nil {
diff --git a/vendor/github.com/containers/buildah/common.go b/vendor/github.com/containers/buildah/common.go
index 8fb3ebdb7..b43cfffc9 100644
--- a/vendor/github.com/containers/buildah/common.go
+++ b/vendor/github.com/containers/buildah/common.go
@@ -14,6 +14,7 @@ import (
"github.com/containers/image/v5/docker"
"github.com/containers/image/v5/signature"
"github.com/containers/image/v5/types"
+ encconfig "github.com/containers/ocicrypt/config"
"github.com/containers/storage"
"github.com/containers/storage/pkg/unshare"
"github.com/docker/distribution/registry/api/errcode"
@@ -30,7 +31,7 @@ const (
DOCKER = "docker"
)
-func getCopyOptions(store storage.Store, reportWriter io.Writer, sourceSystemContext *types.SystemContext, destinationSystemContext *types.SystemContext, manifestType string, removeSignatures bool, addSigner string) *cp.Options {
+func getCopyOptions(store storage.Store, reportWriter io.Writer, sourceSystemContext *types.SystemContext, destinationSystemContext *types.SystemContext, manifestType string, removeSignatures bool, addSigner string, ociEncryptLayers *[]int, ociEncryptConfig *encconfig.EncryptConfig, ociDecryptConfig *encconfig.DecryptConfig) *cp.Options {
sourceCtx := getSystemContext(store, nil, "")
if sourceSystemContext != nil {
*sourceCtx = *sourceSystemContext
@@ -47,6 +48,9 @@ func getCopyOptions(store storage.Store, reportWriter io.Writer, sourceSystemCon
ForceManifestMIMEType: manifestType,
RemoveSignatures: removeSignatures,
SignBy: addSigner,
+ OciEncryptConfig: ociEncryptConfig,
+ OciDecryptConfig: ociDecryptConfig,
+ OciEncryptLayers: ociEncryptLayers,
}
}
diff --git a/vendor/github.com/containers/buildah/go.mod b/vendor/github.com/containers/buildah/go.mod
index 472603e52..2d50e1e48 100644
--- a/vendor/github.com/containers/buildah/go.mod
+++ b/vendor/github.com/containers/buildah/go.mod
@@ -4,9 +4,10 @@ go 1.12
require (
github.com/containernetworking/cni v0.7.2-0.20190904153231-83439463f784
- github.com/containers/common v0.10.0
- github.com/containers/image/v5 v5.4.3
- github.com/containers/storage v1.19.0
+ github.com/containers/common v0.11.2
+ github.com/containers/image/v5 v5.4.4
+ github.com/containers/ocicrypt v1.0.2
+ github.com/containers/storage v1.19.2
github.com/cyphar/filepath-securejoin v0.2.2
github.com/docker/distribution v2.7.1+incompatible
github.com/docker/go-units v0.4.0
@@ -17,27 +18,26 @@ require (
github.com/hashicorp/go-multierror v1.0.0
github.com/ishidawataru/sctp v0.0.0-20191218070446-00ab2ac2db07 // indirect
github.com/mattn/go-shellwords v1.0.10
- github.com/onsi/ginkgo v1.12.0
- github.com/onsi/gomega v1.9.0
- github.com/opencontainers/go-digest v1.0.0-rc1
+ github.com/onsi/ginkgo v1.12.1
+ github.com/onsi/gomega v1.10.0
+ github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6
github.com/opencontainers/runc v1.0.0-rc9
- github.com/opencontainers/runtime-spec v0.1.2-0.20190618234442-a950415649c7
+ github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2
github.com/opencontainers/runtime-tools v0.9.0
github.com/opencontainers/selinux v1.5.1
- github.com/openshift/api v0.0.0-20200106203948-7ab22a2c8316
github.com/openshift/imagebuilder v1.1.4
github.com/pkg/errors v0.9.1
- github.com/seccomp/containers-golang v0.0.0-20190312124753-8ca8945ccf5f
+ github.com/seccomp/containers-golang v0.4.1
github.com/seccomp/libseccomp-golang v0.9.1
- github.com/sirupsen/logrus v1.5.0
+ github.com/sirupsen/logrus v1.6.0
github.com/spf13/cobra v0.0.7
github.com/spf13/pflag v1.0.5
github.com/stretchr/testify v1.5.1
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2
github.com/vishvananda/netlink v1.1.0 // indirect
- golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59
- golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775
+ golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5
+ golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f
)
replace github.com/sirupsen/logrus => github.com/sirupsen/logrus v1.4.2
diff --git a/vendor/github.com/containers/buildah/go.sum b/vendor/github.com/containers/buildah/go.sum
index 5612c03db..c66b8256b 100644
--- a/vendor/github.com/containers/buildah/go.sum
+++ b/vendor/github.com/containers/buildah/go.sum
@@ -6,18 +6,14 @@ github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7O
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
github.com/Microsoft/go-winio v0.4.15-0.20200113171025-3fe6c5262873 h1:93nQ7k53GjoMQ07HVP8g6Zj1fQZDDj7Xy2VkNNtvX8o=
github.com/Microsoft/go-winio v0.4.15-0.20200113171025-3fe6c5262873/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
github.com/Microsoft/hcsshim v0.8.7 h1:ptnOoufxGSzauVTsdE+wMYnCWA301PdoN4xg5oRdZpg=
github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ=
-github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
+github.com/Microsoft/hcsshim v0.8.9 h1:VrfodqvztU8YSOvygU+DN1BGaSGxmrNfqOv5oOuX2Bk=
+github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
-github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
-github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
-github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
-github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM=
github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA=
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8=
@@ -31,6 +27,8 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/blang/semver v3.1.0+incompatible h1:7hqmJYuaEK3qwVjWubYiht3j93YI0WQBuysxHIfUriU=
github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
+github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
+github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
@@ -41,6 +39,8 @@ github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtM
github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.3.0 h1:xjvXQWABwS2uiv3TWgQt5Uth60Gu86LTGZXMJkjc7rY=
github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.3.2 h1:ForxmXkA6tPIvffbrDAcPUIB32QgXkt2XFj+F0UxetA=
+github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
github.com/containerd/continuity v0.0.0-20200228182428-0f16d7a0959c h1:8ahmSVELW1wghbjerVAyuEYD5+Dio66RYvSS0iGfL1M=
github.com/containerd/continuity v0.0.0-20200228182428-0f16d7a0959c/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY=
@@ -50,17 +50,20 @@ github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDG
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
github.com/containernetworking/cni v0.7.2-0.20190904153231-83439463f784 h1:rqUVLD8I859xRgUx/WMC3v7QAFqbLKZbs+0kqYboRJc=
github.com/containernetworking/cni v0.7.2-0.20190904153231-83439463f784/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
-github.com/containers/common v0.10.0 h1:Km1foMJJBIxceA1/UCZcIuwf8sCF71sP5DwE6Oh1BEA=
-github.com/containers/common v0.10.0/go.mod h1:6A/moCuQITXLqBe5A0WKKTcCfCmEQRbknI05HcPzOL0=
-github.com/containers/image/v5 v5.4.3 h1:zn2HR7uu4hpvT5QQHgjqonOzKDuM1I1UHUEmzZT5sbs=
+github.com/containers/common v0.11.2 h1:e4477fCE3qSA+Z2vT+uUMUTn8s8CyIM++qNm3PCSl68=
+github.com/containers/common v0.11.2/go.mod h1:2w3QE6VUmhltGYW4wV00h4okq1Crs7hNI1ZD2I0QRUY=
github.com/containers/image/v5 v5.4.3/go.mod h1:pN0tvp3YbDd7BWavK2aE0mvJUqVd2HmhPjekyWSFm0U=
+github.com/containers/image/v5 v5.4.4 h1:JSanNn3v/BMd3o0MEvO4R4OKNuoJUSzVGQAI1+0FMXE=
+github.com/containers/image/v5 v5.4.4/go.mod h1:g7cxNXitiLi6pEr9/L9n/0wfazRuhDKXU15kV86N8h8=
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE=
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
github.com/containers/ocicrypt v1.0.2 h1:Q0/IPs8ohfbXNxEfyJ2pFVmvJu5BhqJUAmc6ES9NKbo=
github.com/containers/ocicrypt v1.0.2/go.mod h1:nsOhbP19flrX6rE7ieGFvBlr7modwmNjsqWarIUce4M=
github.com/containers/storage v1.18.2/go.mod h1:WTBMf+a9ZZ/LbmEVeLHH2TX4CikWbO1Bt+/m58ZHVPg=
-github.com/containers/storage v1.19.0 h1:bVIF5EglbT5PQnqcN7sE6VWqoQzlToqzjXdz+eNubQg=
-github.com/containers/storage v1.19.0/go.mod h1:9Xc4rrTubn5hmtBfL+PSJH1XlfTQwR4VAG1NDUIpCts=
+github.com/containers/storage v1.19.1 h1:YKIzOO12iaD5Ra0PKFS6emcygbHLmwmQOCQRU/19YAQ=
+github.com/containers/storage v1.19.1/go.mod h1:KbXjSwKnx17ejOsjFcCXSf78mCgZkQSLPBNTMRc3XrQ=
+github.com/containers/storage v1.19.2 h1:vhcUwEjDZiPJxaLPFsjvyavnEjFw6qQi9HAkVz1amfI=
+github.com/containers/storage v1.19.2/go.mod h1:gYCp3jzgXkvubO0rI14QAjz5Mxm/qKJgLmHFyqayDnw=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
@@ -69,7 +72,6 @@ github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfc
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cyphar/filepath-securejoin v0.2.2 h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg=
github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
-github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -92,53 +94,32 @@ github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316 h1:moehP
github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8=
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4=
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
-github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
-github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
-github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
-github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/etcd-io/bbolt v1.3.3 h1:gSJmxrs37LgTqR/oyJBWok6k6SvXEUerFTbltIhXkBM=
github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw=
-github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsouza/go-dockerclient v1.6.5 h1:vuFDnPcds3LvTWGYb9h0Rty14FLgkjHZdwLDROCdgsw=
github.com/fsouza/go-dockerclient v1.6.5/go.mod h1:GOdftxWLWIbIWKbIMDroKFJzPdg6Iw7r+jX1DDZdVsA=
github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa h1:RDBNVkRviHZtvDvId8XSGPu3rmpmSe+wKRcEWNgsfWU=
github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
-github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
-github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
-github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
-github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
-github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
-github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
-github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
-github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
-github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
-github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
-github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
-github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
-github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
-github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
@@ -148,11 +129,8 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/gorilla/mux v1.7.4 h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc=
github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
@@ -177,20 +155,18 @@ github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANyt
github.com/ishidawataru/sctp v0.0.0-20191218070446-00ab2ac2db07 h1:rw3IAne6CDuVFlZbPOkA7bhxlqawFh7RJJ+CejfMaxE=
github.com/ishidawataru/sctp v0.0.0-20191218070446-00ab2ac2db07/go.mod h1:co9pwDoBCm1kGxawmb4sPq0cSIOOWNPT4KnHotMP1Zg=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
-github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok=
-github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/compress v1.10.4 h1:jFzIFaf586tquEB5EhzQG0HwGNSlgAJpG53G6Ss11wc=
-github.com/klauspost/compress v1.10.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
+github.com/klauspost/compress v1.10.5 h1:7q6vHIqubShURwQz8cQK6yIe/xC3IF0Vm7TGfqjewrc=
+github.com/klauspost/compress v1.10.5/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/pgzip v1.2.3 h1:Ce2to9wvs/cuJ2b86/CKQoTYr9VHfpanYosZ0UBJqdw=
github.com/klauspost/pgzip v1.2.3/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -198,14 +174,9 @@ github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFB
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
-github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
github.com/mattn/go-shellwords v1.0.10 h1:Y7Xqm8piKOO3v10Thp7Z36h4FYFjt5xB//6XvOrs2Gw=
github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
@@ -217,7 +188,6 @@ github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
@@ -225,23 +195,25 @@ github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/mtrmac/gpgme v0.1.2 h1:dNOmvYmsrakgW7LcgiprD0yfRuQQe8/C8F6Z+zogO3s=
github.com/mtrmac/gpgme v0.1.2/go.mod h1:GYYHnGSuS7HK3zVS2n3y73y0okK/BeKzwnn5jgiVFNI=
-github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
+github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
+github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
-github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.0 h1:Iw5WCbBcaAAd0fpRb1c9r5YCylv4XDoCSigm1zLevwU=
github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg=
-github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
+github.com/onsi/ginkgo v1.12.1 h1:mFwc4LvZ0xpSvDZ3E+k8Yte0hLOMxXUlP+yXtJqkYfQ=
+github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
-github.com/onsi/gomega v1.9.0 h1:R1uwffexN6Pr340GtYRIdZmAiN4J+iw6WG4wog1DUXg=
-github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
+github.com/onsi/gomega v1.10.0 h1:Gwkk+PTu/nfOwNMtUB/mRUv0X7ewW5dO4AERT1ThVKo=
+github.com/onsi/gomega v1.10.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
+github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
+github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6 h1:yN8BPXVwMBAm3Cuvh1L5XE8XpvYRMdsVLd82ILprhUU=
github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
@@ -252,14 +224,15 @@ github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rm
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v0.1.2-0.20190618234442-a950415649c7 h1:Dliu5QO+4JYWu/yMshaMU7G3JN2POGpwjJN7gjy10Go=
github.com/opencontainers/runtime-spec v0.1.2-0.20190618234442-a950415649c7/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2 h1:9mv9SC7GWmRWE0J/+oD8w3GsN2KYGKtg6uwLN7hfP5E=
+github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
github.com/opencontainers/runtime-tools v0.9.0 h1:FYgwVsKRI/H9hU32MJ/4MLOzXWodKK5zsQavY8NPMkU=
github.com/opencontainers/runtime-tools v0.9.0/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
+github.com/opencontainers/selinux v1.3.0/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs=
github.com/opencontainers/selinux v1.4.0/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g=
github.com/opencontainers/selinux v1.5.1 h1:jskKwSMFYqyTrHEuJgQoUlTcId0av64S6EWObrIfn5Y=
github.com/opencontainers/selinux v1.5.1/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g=
-github.com/openshift/api v0.0.0-20200106203948-7ab22a2c8316 h1:enQG2QUGwug4fR1yM6hL0Fjzx6Km/exZY6RbSPwMu3o=
-github.com/openshift/api v0.0.0-20200106203948-7ab22a2c8316/go.mod h1:dv+J0b/HWai0QnMVb37/H0v36klkLBi2TNpPeWDxX10=
github.com/openshift/imagebuilder v1.1.4 h1:LUg8aTjyXMtlDx6IbtvaqofFGZ6aYqe+VIeATE735LM=
github.com/openshift/imagebuilder v1.1.4/go.mod h1:9aJRczxCH0mvT6XQ+5STAQaPWz7OsWcU5/mRkt8IWeo=
github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913 h1:TnbXhKzrTOyuvWrjI8W6pcoI9XPbLHFXCdN2dtUw7Rw=
@@ -270,7 +243,6 @@ github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M=
@@ -290,6 +262,7 @@ github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.6.0 h1:kRhiuYSXR3+uv2IbVbZhUxK5zVD/2pp3Gd2PpvPkpEo=
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
+github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
@@ -297,13 +270,14 @@ github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDa
github.com/prometheus/procfs v0.0.5 h1:3+auTFlqw+ZaQYJARz6ArODtkaIwtvBTx3N2NehQlL8=
github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
-github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
+github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-github.com/seccomp/containers-golang v0.0.0-20190312124753-8ca8945ccf5f h1:OtU/w6sBKmXYaw2KEODxjcYi3oPSyyslhgGFgIJVGAI=
-github.com/seccomp/containers-golang v0.0.0-20190312124753-8ca8945ccf5f/go.mod h1:f/98/SnvAzhAEFQJ3u836FePXvcbE8BS0YGMQNn4mhA=
+github.com/seccomp/containers-golang v0.4.1 h1:6hsmsP8Y9T6PWKJELqAkRWkc6Te60+zK64avkjInd44=
+github.com/seccomp/containers-golang v0.4.1/go.mod h1:5fP9lgyYyklJ8fg8Geq193G1QLe0ikf34z+hZKIjmnE=
github.com/seccomp/libseccomp-golang v0.9.1 h1:NJjM5DNFOs0s3kYE1WUOr6G8V97sdt46rlXTMfXGWBo=
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
+github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
@@ -315,7 +289,6 @@ github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKv
github.com/spf13/cobra v0.0.7 h1:FfTH+vuMXOas8jmfb5/M7dzEYx7LpcLb7a0LPe34uOU=
github.com/spf13/cobra v0.0.7/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
-github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
@@ -323,8 +296,6 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
-github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
@@ -342,8 +313,9 @@ github.com/ulikunitz/xz v0.5.7/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oW
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/vbatts/tar-split v0.11.1 h1:0Odu65rhcZ3JZaPHxl7tCI3V/C/Q9Zf82UFravl02dE=
github.com/vbatts/tar-split v0.11.1/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g=
-github.com/vbauerster/mpb/v5 v5.0.3 h1:Ldt/azOkbThTk2loi6FrBd/3fhxGFQ24MxFAS88PoNY=
github.com/vbauerster/mpb/v5 v5.0.3/go.mod h1:h3YxU5CSr8rZP4Q3xZPVB3jJLhWPou63lHEdr9ytH4Y=
+github.com/vbauerster/mpb/v5 v5.0.4 h1:w7l/tJfHmtIOKZkU+bhbDZOUxj1kln9jy4DUOp3Tl14=
+github.com/vbauerster/mpb/v5 v5.0.4/go.mod h1:fvzasBUyuo35UyuA6sSOlVhpLoNQsp2nBdHw7OiSUU8=
github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0=
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df h1:OviZH7qLw/7ZovXvuNyL3XQl8UFofeikI1NW1Gypu7k=
@@ -368,21 +340,16 @@ go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200311171314-f7b00557c8c4/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59 h1:3zb4D3T4G8jdExgVU/95+vQXfpEPiMdCaZgmGVxjNHM=
golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5 h1:Q7tZBpemrlsc2I7IyODzhtallWRSm4Q0d09pL6XbQtU=
+golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
-golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
-golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -394,9 +361,7 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e h1:3G+cUijn7XD+S4eJFddp53Pv7+slrESplyjG25HgL+k=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
@@ -408,31 +373,29 @@ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a h1:WXEvlFVvvGxCJLG6REjsT03iWnKLEWinaScsxF2Vm2o=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190921190940-14da1ac737cc/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191127021746-63cb32ae39b2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775 h1:TC0v2RSO1u2kn1ZugjrFXkRZAEaqMN/RW+OTZkBzmLE=
golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f h1:gWF768j/LaZugp8dyS4UwsslYCYz9XgFxvlgsn0n9H8=
+golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
@@ -441,32 +404,26 @@ golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqG
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0=
-gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
-gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=
google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk=
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
@@ -479,8 +436,6 @@ gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
-gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
-gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/square/go-jose.v2 v2.3.1 h1:SK5KegNXmKmqE342YYN2qPHEnUYeoMiXXl1poUlI+o4=
gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
@@ -496,23 +451,4 @@ gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-k8s.io/api v0.17.0 h1:H9d/lw+VkZKEVIUc8F3wgiQ+FUXTTr21M87jXLU7yqM=
-k8s.io/api v0.17.0/go.mod h1:npsyOePkeP0CPwyGfXDHxvypiYMJxBWAMpQxCaJ4ZxI=
-k8s.io/apimachinery v0.17.0 h1:xRBnuie9rXcPxUkDizUsGvPf1cnlZCFu210op7J7LJo=
-k8s.io/apimachinery v0.17.0/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg=
-k8s.io/code-generator v0.17.0/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s=
-k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
-k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
-k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
-k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
-k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
-k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E=
k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
-modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw=
-modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk=
-modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k=
-modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs=
-modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I=
-sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI=
-sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
-sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
diff --git a/vendor/github.com/containers/buildah/image.go b/vendor/github.com/containers/buildah/image.go
index 94e97d870..57d8ecb93 100644
--- a/vendor/github.com/containers/buildah/image.go
+++ b/vendor/github.com/containers/buildah/image.go
@@ -586,16 +586,10 @@ func (i *containerImageSource) Reference() types.ImageReference {
}
func (i *containerImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
- if instanceDigest != nil {
- return nil, errors.Errorf("containerImageSource does not support manifest lists")
- }
return nil, nil
}
func (i *containerImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
- if instanceDigest != nil {
- return nil, "", errors.Errorf("containerImageSource does not support manifest lists")
- }
return i.manifest, i.manifestType, nil
}
diff --git a/vendor/github.com/containers/buildah/imagebuildah/build.go b/vendor/github.com/containers/buildah/imagebuildah/build.go
index 8d30367c1..a9e0641c0 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/build.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/build.go
@@ -3,6 +3,7 @@ package imagebuildah
import (
"bytes"
"context"
+ "fmt"
"io"
"io/ioutil"
"net/http"
@@ -16,10 +17,12 @@ import (
"github.com/containers/common/pkg/config"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/types"
+ encconfig "github.com/containers/ocicrypt/config"
"github.com/containers/storage"
"github.com/containers/storage/pkg/archive"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/openshift/imagebuilder"
+ "github.com/openshift/imagebuilder/dockerfile/parser"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -171,6 +174,9 @@ type BuildOptions struct {
MaxPullPushRetries int
// PullPushRetryDelay is how long to wait before retrying a pull or push attempt.
PullPushRetryDelay time.Duration
+ // OciDecryptConfig contains the config that can be used to decrypt an image if it is
+ // encrypted if non-nil. If nil, it does not attempt to decrypt an image.
+ OciDecryptConfig *encconfig.DecryptConfig
}
// BuildDockerfiles parses a set of one or more Dockerfiles (which may be
@@ -249,6 +255,9 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options BuildOpt
if err != nil {
return "", nil, errors.Wrapf(err, "error parsing main Dockerfile")
}
+
+ warnOnUnsetBuildArgs(mainNode, options.Args)
+
for _, d := range dockerfiles[1:] {
additionalNode, err := imagebuilder.ParseDockerfile(d)
if err != nil {
@@ -280,6 +289,20 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options BuildOpt
return exec.Build(ctx, stages)
}
+func warnOnUnsetBuildArgs(node *parser.Node, args map[string]string) {
+ for _, child := range node.Children {
+ switch strings.ToUpper(child.Value) {
+ case "ARG":
+ argName := child.Next.Value
+ if _, ok := args[argName]; !strings.Contains(argName, "=") && !ok {
+ logrus.Warnf("missing %q build argument. Try adding %q to the command line", argName, fmt.Sprintf("--build-arg %s=<VALUE>", argName))
+ }
+ default:
+ continue
+ }
+ }
+}
+
// preprocessDockerfileContents runs CPP(1) in preprocess-only mode on the input
// dockerfile content and will use ctxDir as the base include path.
//
diff --git a/vendor/github.com/containers/buildah/imagebuildah/executor.go b/vendor/github.com/containers/buildah/imagebuildah/executor.go
index 02123c822..a156313df 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/executor.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/executor.go
@@ -20,6 +20,7 @@ import (
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/transports/alltransports"
"github.com/containers/image/v5/types"
+ encconfig "github.com/containers/ocicrypt/config"
"github.com/containers/storage"
"github.com/containers/storage/pkg/archive"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
@@ -100,6 +101,7 @@ type Executor struct {
os string
maxPullPushRetries int
retryPullPushDelay time.Duration
+ ociDecryptConfig *encconfig.DecryptConfig
}
// NewExecutor creates a new instance of the imagebuilder.Executor interface.
@@ -188,6 +190,7 @@ func NewExecutor(store storage.Store, options BuildOptions, mainNode *parser.Nod
os: options.OS,
maxPullPushRetries: options.MaxPullPushRetries,
retryPullPushDelay: options.PullPushRetryDelay,
+ ociDecryptConfig: options.OciDecryptConfig,
}
if exec.err == nil {
exec.err = os.Stderr
@@ -233,7 +236,7 @@ func NewExecutor(store storage.Store, options BuildOptions, mainNode *parser.Nod
// startStage creates a new stage executor that will be referenced whenever a
// COPY or ADD statement uses a --from=NAME flag.
-func (b *Executor) startStage(stage *imagebuilder.Stage, stages int, from, output string) *StageExecutor {
+func (b *Executor) startStage(stage *imagebuilder.Stage, stages int, output string) *StageExecutor {
if b.stages == nil {
b.stages = make(map[string]*StageExecutor)
}
@@ -248,7 +251,6 @@ func (b *Executor) startStage(stage *imagebuilder.Stage, stages int, from, outpu
stage: stage,
}
b.stages[stage.Name] = stageExec
- b.stages[from] = stageExec
if idx := strconv.Itoa(stage.Position); idx != stage.Name {
b.stages[idx] = stageExec
}
@@ -421,7 +423,7 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
output = b.output
}
- stageExecutor := b.startStage(&stage, len(stages), base, output)
+ stageExecutor := b.startStage(&stage, len(stages), output)
// If this a single-layer build, or if it's a multi-layered
// build and b.forceRmIntermediateCtrs is set, make sure we
diff --git a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go
index 5ab70e54c..7ba5e2e96 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go
@@ -295,7 +295,7 @@ func (s *StageExecutor) digestSpecifiedContent(node *parser.Node, argValues []st
// container. Update the ID mappings and
// all-content-comes-from-below-this-directory value.
from := strings.TrimPrefix(flag, "--from=")
- if other, ok := s.executor.stages[from]; ok {
+ if other, ok := s.executor.stages[from]; ok && other.index < s.index {
contextDir = other.mountPoint
idMappingOptions = &other.builder.IDMappingOptions
} else if builder, ok := s.executor.containerMap[from]; ok {
@@ -633,6 +633,7 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo
Devices: s.executor.devices,
MaxPullRetries: s.executor.maxPullPushRetries,
PullRetryDelay: s.executor.retryPullPushDelay,
+ OciDecryptConfig: s.executor.ociDecryptConfig,
}
// Check and see if the image is a pseudonym for the end result of a
@@ -868,13 +869,10 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
if len(arr) != 2 {
return "", nil, errors.Errorf("%s: invalid --from flag, should be --from=<name|stage>", command)
}
- otherStage, ok := s.executor.stages[arr[1]]
- if !ok {
- if mountPoint, err = s.getImageRootfs(ctx, arr[1]); err != nil {
- return "", nil, errors.Errorf("%s --from=%s: no stage or image found with that name", command, arr[1])
- }
- } else {
+ if otherStage, ok := s.executor.stages[arr[1]]; ok && otherStage.index < s.index {
mountPoint = otherStage.mountPoint
+ } else if mountPoint, err = s.getImageRootfs(ctx, arr[1]); err != nil {
+ return "", nil, errors.Errorf("%s --from=%s: no stage or image found with that name", command, arr[1])
}
s.copyFrom = mountPoint
break
diff --git a/vendor/github.com/containers/buildah/new.go b/vendor/github.com/containers/buildah/new.go
index 160c2157d..4f4b1564b 100644
--- a/vendor/github.com/containers/buildah/new.go
+++ b/vendor/github.com/containers/buildah/new.go
@@ -30,12 +30,13 @@ const (
func pullAndFindImage(ctx context.Context, store storage.Store, srcRef types.ImageReference, options BuilderOptions, sc *types.SystemContext) (*storage.Image, types.ImageReference, error) {
pullOptions := PullOptions{
- ReportWriter: options.ReportWriter,
- Store: store,
- SystemContext: options.SystemContext,
- BlobDirectory: options.BlobDirectory,
- MaxRetries: options.MaxPullRetries,
- RetryDelay: options.PullRetryDelay,
+ ReportWriter: options.ReportWriter,
+ Store: store,
+ SystemContext: options.SystemContext,
+ BlobDirectory: options.BlobDirectory,
+ MaxRetries: options.MaxPullRetries,
+ RetryDelay: options.PullRetryDelay,
+ OciDecryptConfig: options.OciDecryptConfig,
}
ref, err := pullImage(ctx, store, srcRef, pullOptions, sc)
if err != nil {
diff --git a/vendor/github.com/containers/buildah/pkg/cli/common.go b/vendor/github.com/containers/buildah/pkg/cli/common.go
index 8e7f7dd4a..1a457f34c 100644
--- a/vendor/github.com/containers/buildah/pkg/cli/common.go
+++ b/vendor/github.com/containers/buildah/pkg/cli/common.go
@@ -57,6 +57,7 @@ type BudResults struct {
Creds string
DisableCompression bool
DisableContentTrust bool
+ DecryptionKeys []string
File []string
Format string
Iidfile string
diff --git a/vendor/github.com/containers/buildah/pull.go b/vendor/github.com/containers/buildah/pull.go
index cbb98cbcf..f8d4bdeb6 100644
--- a/vendor/github.com/containers/buildah/pull.go
+++ b/vendor/github.com/containers/buildah/pull.go
@@ -19,6 +19,7 @@ import (
is "github.com/containers/image/v5/storage"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
+ encconfig "github.com/containers/ocicrypt/config"
"github.com/containers/storage"
multierror "github.com/hashicorp/go-multierror"
"github.com/pkg/errors"
@@ -56,6 +57,9 @@ type PullOptions struct {
MaxRetries int
// RetryDelay is how long to wait before retrying a pull attempt.
RetryDelay time.Duration
+ // OciDecryptConfig contains the config that can be used to decrypt an image if it is
+ // encrypted if non-nil. If nil, it does not attempt to decrypt an image.
+ OciDecryptConfig *encconfig.DecryptConfig
}
func localImageNameForReference(ctx context.Context, store storage.Store, srcRef types.ImageReference) (string, error) {
@@ -164,6 +168,7 @@ func Pull(ctx context.Context, imageName string, options PullOptions) (imageID s
ReportWriter: options.ReportWriter,
MaxPullRetries: options.MaxRetries,
PullRetryDelay: options.RetryDelay,
+ OciDecryptConfig: options.OciDecryptConfig,
}
storageRef, transport, img, err := resolveImage(ctx, systemContext, options.Store, boptions)
@@ -275,7 +280,7 @@ func pullImage(ctx context.Context, store storage.Store, srcRef types.ImageRefer
}()
logrus.Debugf("copying %q to %q", transports.ImageName(srcRef), destName)
- if _, err := retryCopyImage(ctx, policyContext, maybeCachedDestRef, srcRef, srcRef, "pull", getCopyOptions(store, options.ReportWriter, sc, nil, "", options.RemoveSignatures, ""), options.MaxRetries, options.RetryDelay); err != nil {
+ if _, err := retryCopyImage(ctx, policyContext, maybeCachedDestRef, srcRef, srcRef, "pull", getCopyOptions(store, options.ReportWriter, sc, nil, "", options.RemoveSignatures, "", nil, nil, options.OciDecryptConfig), options.MaxRetries, options.RetryDelay); err != nil {
logrus.Debugf("error copying src image [%q] to dest image [%q] err: %v", transports.ImageName(srcRef), destName, err)
return nil, err
}
diff --git a/vendor/github.com/containers/buildah/util/util.go b/vendor/github.com/containers/buildah/util/util.go
index a358b7c54..f41daa2cc 100644
--- a/vendor/github.com/containers/buildah/util/util.go
+++ b/vendor/github.com/containers/buildah/util/util.go
@@ -74,7 +74,7 @@ func ResolveName(name string, firstRegistry string, sc *types.SystemContext, sto
return []string{strings.TrimPrefix(name, DefaultTransport)}, DefaultTransport, false, nil
}
split := strings.SplitN(name, ":", 2)
- if len(split) == 2 {
+ if StartsWithValidTransport(name) && len(split) == 2 {
if trans := transports.Get(split[0]); trans != nil {
return []string{split[1]}, trans.Name(), false, nil
}
@@ -148,6 +148,12 @@ func ResolveName(name string, firstRegistry string, sc *types.SystemContext, sto
return candidates, DefaultTransport, searchRegistriesAreEmpty, nil
}
+// StartsWithValidTransport validates the name starts with Buildah supported transport
+// to avoid the corner case image name same as the transport name
+func StartsWithValidTransport(name string) bool {
+ return strings.HasPrefix(name, "dir:") || strings.HasPrefix(name, "docker://") || strings.HasPrefix(name, "docker-archive:") || strings.HasPrefix(name, "docker-daemon:") || strings.HasPrefix(name, "oci:") || strings.HasPrefix(name, "oci-archive:")
+}
+
// ExpandNames takes unqualified names, parses them as image names, and returns
// the fully expanded result, including a tag. Names which don't include a registry
// name will be marked for the most-preferred registry (i.e., the first one in our
diff --git a/vendor/github.com/containers/common/pkg/config/default.go b/vendor/github.com/containers/common/pkg/config/default.go
index 7debd2984..185ce8cee 100644
--- a/vendor/github.com/containers/common/pkg/config/default.go
+++ b/vendor/github.com/containers/common/pkg/config/default.go
@@ -476,10 +476,11 @@ func (c *Config) Ulimits() []string {
// PidsLimit returns the default maximum number of pids to use in containers
func (c *Config) PidsLimit() int64 {
if unshare.IsRootless() {
- cgroup2, _ := cgroupv2.Enabled()
- if cgroup2 {
- return c.Containers.PidsLimit
- } else {
+ if c.Engine.CgroupManager == SystemdCgroupsManager {
+ cgroup2, _ := cgroupv2.Enabled()
+ if cgroup2 {
+ return c.Containers.PidsLimit
+ }
return 0
}
}
diff --git a/vendor/github.com/containers/common/pkg/config/libpodConfig.go b/vendor/github.com/containers/common/pkg/config/libpodConfig.go
index 89566f789..a8e4c9c93 100644
--- a/vendor/github.com/containers/common/pkg/config/libpodConfig.go
+++ b/vendor/github.com/containers/common/pkg/config/libpodConfig.go
@@ -246,20 +246,6 @@ func readLibpodConfigFromFile(path string, config *ConfigFromLibpod) (*ConfigFro
return nil, fmt.Errorf("unable to decode configuration %v: %v", path, err)
}
- // For the sake of backwards compat we need to check if the config fields
- // with *Set suffix are set in the config. Note that the storage-related
- // fields are NOT set in the config here but in the storage.conf OR directly
- // by the user.
- if config.VolumePath != "" {
- config.VolumePathSet = true
- }
- if config.StaticDir != "" {
- config.StaticDirSet = true
- }
- if config.TmpDir != "" {
- config.TmpDirSet = true
- }
-
return config, err
}
@@ -350,40 +336,75 @@ func (c *Config) libpodConfig() *ConfigFromLibpod {
func (c *Config) libpodToContainersConfig(libpodConf *ConfigFromLibpod) {
- c.Containers.InitPath = libpodConf.InitPath
+ if libpodConf.InitPath != "" {
+ c.Containers.InitPath = libpodConf.InitPath
+ }
c.Containers.LogSizeMax = libpodConf.MaxLogSize
c.Containers.EnableLabeling = libpodConf.EnableLabeling
- c.Engine.SignaturePolicyPath = libpodConf.SignaturePolicyPath
+ if libpodConf.SignaturePolicyPath != "" {
+ c.Engine.SignaturePolicyPath = libpodConf.SignaturePolicyPath
+ }
c.Engine.SetOptions = libpodConf.SetOptions
- c.Engine.VolumePath = libpodConf.VolumePath
- c.Engine.ImageDefaultTransport = libpodConf.ImageDefaultTransport
- c.Engine.OCIRuntime = libpodConf.OCIRuntime
+ if libpodConf.VolumePath != "" {
+ c.Engine.VolumePath = libpodConf.VolumePath
+ }
+ if libpodConf.ImageDefaultTransport != "" {
+ c.Engine.ImageDefaultTransport = libpodConf.ImageDefaultTransport
+ }
+ if libpodConf.OCIRuntime != "" {
+ c.Engine.OCIRuntime = libpodConf.OCIRuntime
+ }
c.Engine.OCIRuntimes = libpodConf.OCIRuntimes
c.Engine.RuntimeSupportsJSON = libpodConf.RuntimeSupportsJSON
c.Engine.RuntimeSupportsNoCgroups = libpodConf.RuntimeSupportsNoCgroups
c.Engine.RuntimePath = libpodConf.RuntimePath
c.Engine.ConmonPath = libpodConf.ConmonPath
c.Engine.ConmonEnvVars = libpodConf.ConmonEnvVars
- c.Engine.CgroupManager = libpodConf.CgroupManager
- c.Engine.StaticDir = libpodConf.StaticDir
- c.Engine.TmpDir = libpodConf.TmpDir
+ if libpodConf.CgroupManager != "" {
+ c.Engine.CgroupManager = libpodConf.CgroupManager
+ }
+ if libpodConf.StaticDir != "" {
+ c.Engine.StaticDir = libpodConf.StaticDir
+ }
+ if libpodConf.TmpDir != "" {
+ c.Engine.TmpDir = libpodConf.TmpDir
+ }
c.Engine.NoPivotRoot = libpodConf.NoPivotRoot
c.Engine.HooksDir = libpodConf.HooksDir
- c.Engine.Namespace = libpodConf.Namespace
- c.Engine.InfraImage = libpodConf.InfraImage
- c.Engine.InfraCommand = libpodConf.InfraCommand
+ if libpodConf.Namespace != "" {
+ c.Engine.Namespace = libpodConf.Namespace
+ }
+ if libpodConf.InfraImage != "" {
+ c.Engine.InfraImage = libpodConf.InfraImage
+ }
+ if libpodConf.InfraCommand != "" {
+ c.Engine.InfraCommand = libpodConf.InfraCommand
+ }
+
c.Engine.EnablePortReservation = libpodConf.EnablePortReservation
- c.Engine.NetworkCmdPath = libpodConf.NetworkCmdPath
+ if libpodConf.NetworkCmdPath != "" {
+ c.Engine.NetworkCmdPath = libpodConf.NetworkCmdPath
+ }
c.Engine.NumLocks = libpodConf.NumLocks
c.Engine.LockType = libpodConf.LockType
- c.Engine.EventsLogger = libpodConf.EventsLogger
- c.Engine.EventsLogFilePath = libpodConf.EventsLogFilePath
- c.Engine.DetachKeys = libpodConf.DetachKeys
+ if libpodConf.EventsLogger != "" {
+ c.Engine.EventsLogger = libpodConf.EventsLogger
+ }
+ if libpodConf.EventsLogFilePath != "" {
+ c.Engine.EventsLogFilePath = libpodConf.EventsLogFilePath
+ }
+ if libpodConf.DetachKeys != "" {
+ c.Engine.DetachKeys = libpodConf.DetachKeys
+ }
c.Engine.SDNotify = libpodConf.SDNotify
c.Engine.CgroupCheck = libpodConf.CgroupCheck
- c.Network.NetworkConfigDir = libpodConf.CNIConfigDir
+ if libpodConf.CNIConfigDir != "" {
+ c.Network.NetworkConfigDir = libpodConf.CNIConfigDir
+ }
c.Network.CNIPluginDirs = libpodConf.CNIPluginDir
- c.Network.DefaultNetwork = libpodConf.CNIDefaultNetwork
+ if libpodConf.CNIDefaultNetwork != "" {
+ c.Network.DefaultNetwork = libpodConf.CNIDefaultNetwork
+ }
}
diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION
index 836ae4eda..0044d6cb9 100644
--- a/vendor/github.com/containers/storage/VERSION
+++ b/vendor/github.com/containers/storage/VERSION
@@ -1 +1 @@
-1.19.2
+1.20.1
diff --git a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go
index d0c7fab0a..f1c941f11 100644
--- a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go
+++ b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go
@@ -272,7 +272,7 @@ func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) {
if err != nil {
return "", err
}
- if err := idtools.MkdirAllAs(dirname, 0700, uid, gid); err != nil && !os.IsExist(err) {
+ if err := idtools.MkdirAllAs(dirname, 0700, uid, gid); err != nil {
return "", err
}
@@ -1701,10 +1701,10 @@ func (devices *DeviceSet) initDevmapper(doInit bool) (retErr error) {
if err != nil {
return err
}
- if err := idtools.MkdirAs(devices.root, 0700, uid, gid); err != nil && !os.IsExist(err) {
+ if err := idtools.MkdirAs(devices.root, 0700, uid, gid); err != nil {
return err
}
- if err := os.MkdirAll(devices.metadataDir(), 0700); err != nil && !os.IsExist(err) {
+ if err := os.MkdirAll(devices.metadataDir(), 0700); err != nil {
return err
}
diff --git a/vendor/github.com/containers/storage/drivers/devmapper/driver.go b/vendor/github.com/containers/storage/drivers/devmapper/driver.go
index ca50e7f06..0afa6c84d 100644
--- a/vendor/github.com/containers/storage/drivers/devmapper/driver.go
+++ b/vendor/github.com/containers/storage/drivers/devmapper/driver.go
@@ -183,7 +183,7 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
}
// Create the target directories if they don't exist
- if err := idtools.MkdirAllAs(path.Join(d.home, "mnt"), 0755, uid, gid); err != nil && !os.IsExist(err) {
+ if err := idtools.MkdirAllAs(path.Join(d.home, "mnt"), 0755, uid, gid); err != nil {
d.ctr.Decrement(mp)
return "", err
}
@@ -198,7 +198,7 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
return "", err
}
- if err := idtools.MkdirAllAs(rootFs, 0755, uid, gid); err != nil && !os.IsExist(err) {
+ if err := idtools.MkdirAllAs(rootFs, 0755, uid, gid); err != nil {
d.ctr.Decrement(mp)
d.DeviceSet.UnmountDevice(id, mp)
return "", err
diff --git a/vendor/github.com/containers/storage/drivers/overlay/check.go b/vendor/github.com/containers/storage/drivers/overlay/check.go
index a566e4afd..cbfad2cd5 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/check.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/check.go
@@ -145,7 +145,7 @@ func doesMetacopy(d, mountOpts string) (bool, error) {
opts = fmt.Sprintf("%s,%s", opts, data)
}
if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", uintptr(flags), opts); err != nil {
- return false, errors.Wrap(err, "failed to mount overlay for metacopy check")
+ return false, errors.Wrapf(err, "failed to mount overlay for metacopy check with %q options", mountOpts)
}
defer func() {
if err := unix.Unmount(filepath.Join(td, "merged"), 0); err != nil {
diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
index 7e7dba753..2906e3e08 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
@@ -33,6 +33,7 @@ import (
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
+ "github.com/vbatts/tar-split/tar/storage"
"golang.org/x/sys/unix"
)
@@ -152,11 +153,11 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
}
// Create the driver home dir
- if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0700, rootUID, rootGID); err != nil && !os.IsExist(err) {
+ if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0700, rootUID, rootGID); err != nil {
return nil, err
}
runhome := filepath.Join(options.RunRoot, filepath.Base(home))
- if err := idtools.MkdirAllAs(runhome, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) {
+ if err := idtools.MkdirAllAs(runhome, 0700, rootUID, rootGID); err != nil {
return nil, err
}
@@ -555,7 +556,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr
return err
}
// Make the link directory if it does not exist
- if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0700, rootUID, rootGID); err != nil && !os.IsExist(err) {
+ if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0700, rootUID, rootGID); err != nil {
return err
}
if err := idtools.MkdirAllAs(path.Dir(dir), 0700, rootUID, rootGID); err != nil {
@@ -767,7 +768,7 @@ func (d *Driver) recreateSymlinks() error {
if err != nil {
return err
}
- if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0700, rootUID, rootGID); err != nil && !os.IsExist(err) {
+ if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0700, rootUID, rootGID); err != nil {
return err
}
for _, dir := range dirs {
@@ -809,6 +810,13 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
}
readWrite := true
+ for _, o := range options.Options {
+ if o == "ro" {
+ readWrite = false
+ break
+ }
+ }
+
lowers, err := ioutil.ReadFile(path.Join(dir, lowerFile))
if err != nil && !os.IsNotExist(err) {
return "", err
@@ -886,7 +894,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
// if we are doing a readOnly mount, and there is only one lower
// We should just return the lower directory, no reason to mount.
- if !readWrite {
+ if !readWrite && d.options.mountProgram == "" {
if len(absLowers) == 0 {
return path.Join(dir, "empty"), nil
}
@@ -904,10 +912,8 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
return "", err
}
diffDir := path.Join(dir, "diff")
- if readWrite {
- if err := idtools.MkdirAllAs(diffDir, 0755, rootUID, rootGID); err != nil && !os.IsExist(err) {
- return "", err
- }
+ if err := idtools.MkdirAllAs(diffDir, 0755, rootUID, rootGID); err != nil {
+ return "", err
}
mergedDir := path.Join(dir, "merged")
@@ -932,7 +938,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
if readWrite {
opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), diffDir, path.Join(dir, "work"))
} else {
- opts = fmt.Sprintf("lowerdir=%s", strings.Join(absLowers, ":"))
+ opts = fmt.Sprintf("lowerdir=%s:%s", diffDir, strings.Join(absLowers, ":"))
}
if len(options.Options) > 0 {
opts = fmt.Sprintf("%s,%s", strings.Join(options.Options, ","), opts)
@@ -1018,7 +1024,7 @@ func (d *Driver) Put(id string) error {
// If they fail, fallback to unix.Unmount
for _, v := range []string{"fusermount3", "fusermount"} {
err := exec.Command(v, "-u", mountpoint).Run()
- if err != nil && !os.IsNotExist(err) {
+ if err != nil && errors.Cause(err) != exec.ErrNotFound {
logrus.Debugf("Error unmounting %s with %s - %v", mountpoint, v, err)
}
if err == nil {
@@ -1090,6 +1096,21 @@ func (d *Driver) getWhiteoutFormat() archive.WhiteoutFormat {
return whiteoutFormat
}
+type fileGetNilCloser struct {
+ storage.FileGetter
+}
+
+func (f fileGetNilCloser) Close() error {
+ return nil
+}
+
+// DiffGetter returns a FileGetCloser that can read files from the directory that
+// contains files for the layer differences. Used for direct access for tar-split.
+func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) {
+ p := d.getDiffPath(id)
+ return fileGetNilCloser{storage.NewPathFileGetter(p)}, nil
+}
+
// ApplyDiff applies the new layer into a root
func (d *Driver) ApplyDiff(id, parent string, options graphdriver.ApplyDiffOpts) (size int64, err error) {
diff --git a/vendor/github.com/containers/storage/drivers/vfs/driver.go b/vendor/github.com/containers/storage/drivers/vfs/driver.go
index f2859b427..679d89112 100644
--- a/vendor/github.com/containers/storage/drivers/vfs/driver.go
+++ b/vendor/github.com/containers/storage/drivers/vfs/driver.go
@@ -15,6 +15,7 @@ import (
"github.com/containers/storage/pkg/system"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/sirupsen/logrus"
+ "github.com/vbatts/tar-split/tar/storage"
)
var (
@@ -101,6 +102,21 @@ func (d *Driver) Cleanup() error {
return nil
}
+type fileGetNilCloser struct {
+ storage.FileGetter
+}
+
+func (f fileGetNilCloser) Close() error {
+ return nil
+}
+
+// DiffGetter returns a FileGetCloser that can read files from the directory that
+// contains files for the layer differences. Used for direct access for tar-split.
+func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) {
+ p := d.dir(id)
+ return fileGetNilCloser{storage.NewPathFileGetter(p)}, nil
+}
+
// CreateFromTemplate creates a layer with the same contents and parent as another layer.
func (d *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error {
if readWrite {
diff --git a/vendor/github.com/containers/storage/go.mod b/vendor/github.com/containers/storage/go.mod
index a3ae07c9c..a7d9ade60 100644
--- a/vendor/github.com/containers/storage/go.mod
+++ b/vendor/github.com/containers/storage/go.mod
@@ -7,10 +7,10 @@ require (
github.com/docker/go-units v0.4.0
github.com/hashicorp/go-multierror v1.0.0
github.com/klauspost/compress v1.10.5
- github.com/klauspost/pgzip v1.2.3
+ github.com/klauspost/pgzip v1.2.4
github.com/mattn/go-shellwords v1.0.10
github.com/mistifyio/go-zfs v2.1.1+incompatible
- github.com/opencontainers/go-digest v1.0.0-rc1
+ github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/runc v1.0.0-rc9
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700
github.com/opencontainers/selinux v1.5.1
diff --git a/vendor/github.com/containers/storage/go.sum b/vendor/github.com/containers/storage/go.sum
index c430e925f..eab0fd61e 100644
--- a/vendor/github.com/containers/storage/go.sum
+++ b/vendor/github.com/containers/storage/go.sum
@@ -44,8 +44,8 @@ github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQL
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.10.5 h1:7q6vHIqubShURwQz8cQK6yIe/xC3IF0Vm7TGfqjewrc=
github.com/klauspost/compress v1.10.5/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/pgzip v1.2.3 h1:Ce2to9wvs/cuJ2b86/CKQoTYr9VHfpanYosZ0UBJqdw=
-github.com/klauspost/pgzip v1.2.3/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
+github.com/klauspost/pgzip v1.2.4 h1:TQ7CNpYKovDOmqzRHKxJh0BeaBI7UdQZYc6p7pMQh1A=
+github.com/klauspost/pgzip v1.2.4/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -59,8 +59,8 @@ github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lL
github.com/mistifyio/go-zfs v2.1.1+incompatible h1:gAMO1HM9xBRONLHHYnu5iFsOJUiJdNZo6oqSENd4eW8=
github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
-github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=
-github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
+github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
+github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.0-rc9 h1:/k06BMULKF5hidyoZymkoDCzdJzltZpz/UU4LguQVtc=
github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go
index a188c510d..90f196371 100644
--- a/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go
+++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go
@@ -63,8 +63,6 @@ func NewPatternMatcher(patterns []string) (*PatternMatcher, error) {
func (pm *PatternMatcher) Matches(file string) (bool, error) {
matched := false
file = filepath.FromSlash(file)
- parentPath := filepath.Dir(file)
- parentPathDirs := strings.Split(parentPath, string(os.PathSeparator))
for _, pattern := range pm.patterns {
negative := false
@@ -78,13 +76,6 @@ func (pm *PatternMatcher) Matches(file string) (bool, error) {
return false, err
}
- if !match && parentPath != "." {
- // Check to see if the pattern matches one of our parent dirs.
- if len(pattern.dirs) <= len(parentPathDirs) {
- match, _ = pattern.match(strings.Join(parentPathDirs[:len(pattern.dirs)], string(os.PathSeparator)))
- }
- }
-
if match {
matched = !negative
}
@@ -122,8 +113,6 @@ func (m *MatchResult) Excludes() uint {
// an error. This method is not safe to be called concurrently.
func (pm *PatternMatcher) MatchesResult(file string) (res *MatchResult, err error) {
file = filepath.FromSlash(file)
- parentPath := filepath.Dir(file)
- parentPathDirs := strings.Split(parentPath, string(os.PathSeparator))
res = &MatchResult{false, 0, 0}
for _, pattern := range pm.patterns {
@@ -138,16 +127,6 @@ func (pm *PatternMatcher) MatchesResult(file string) (res *MatchResult, err erro
return nil, err
}
- if !match && parentPath != "." {
- // Check to see if the pattern matches one of our parent dirs.
- if len(pattern.dirs) <= len(parentPathDirs) {
- match, _ = pattern.match(strings.Join(
- parentPathDirs[:len(pattern.dirs)],
- string(os.PathSeparator)),
- )
- }
- }
-
if match {
res.isMatched = !negative
if negative {
@@ -265,8 +244,7 @@ func (p *Pattern) compile() error {
// in golang's filepath.Match
regStr += bs + string(ch)
} else if ch == '\\' {
- // escape next char. Note that a trailing \ in the pattern
- // will be left alone (but need to escape it)
+ // escape next char.
if sl == bs {
// On windows map "\" to "\\", meaning an escaped backslash,
// and then just continue because filepath.Match on
@@ -277,14 +255,14 @@ func (p *Pattern) compile() error {
if scan.Peek() != scanner.EOF {
regStr += bs + string(scan.Next())
} else {
- regStr += bs
+ return filepath.ErrBadPattern
}
} else {
regStr += string(ch)
}
}
- regStr += "$"
+ regStr += "(/.*)?$"
re, err := regexp.Compile(regStr)
if err != nil {
diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go
index 6b0f55030..9776b2a12 100644
--- a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go
+++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go
@@ -10,6 +10,7 @@ import (
"path/filepath"
"strings"
"sync"
+ "syscall"
"github.com/containers/storage/pkg/system"
"github.com/opencontainers/runc/libcontainer/user"
@@ -26,13 +27,18 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown
// so that we can chown all of them properly at the end. If chownExisting is false, we won't
// chown the full directory path if it exists
var paths []string
- if _, err := os.Stat(path); err != nil && os.IsNotExist(err) {
+ st, err := os.Stat(path)
+ if err != nil && os.IsNotExist(err) {
paths = []string{path}
- } else if err == nil && chownExisting {
- // short-circuit--we were called with an existing directory and chown was requested
- return SafeChown(path, ownerUID, ownerGID)
} else if err == nil {
- // nothing to do; directory path fully exists already and chown was NOT requested
+ if !st.IsDir() {
+ return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR}
+ }
+ if chownExisting {
+ // short-circuit--we were called with an existing directory and chown was requested
+ return SafeChown(path, ownerUID, ownerGID)
+ }
+ // nothing to do; directory exists and chown was NOT requested
return nil
}
@@ -49,7 +55,7 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown
paths = append(paths, dirPath)
}
}
- if err := os.MkdirAll(path, mode); err != nil && !os.IsExist(err) {
+ if err := os.MkdirAll(path, mode); err != nil {
return err
}
} else {
diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go
index 9c8508397..16be94f44 100644
--- a/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go
+++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go
@@ -9,7 +9,7 @@ import (
// Platforms such as Windows do not support the UID/GID concept. So make this
// just a wrapper around system.MkdirAll.
func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error {
- if err := os.MkdirAll(path, mode); err != nil && !os.IsExist(err) {
+ if err := os.MkdirAll(path, mode); err != nil {
return err
}
return nil
diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go
index 43b84d769..eaf622f43 100644
--- a/vendor/github.com/containers/storage/store.go
+++ b/vendor/github.com/containers/storage/store.go
@@ -629,14 +629,14 @@ func GetStore(options StoreOptions) (Store, error) {
return nil, errors.Wrap(ErrIncompleteOptions, "no storage runroot specified")
}
- if err := os.MkdirAll(options.RunRoot, 0700); err != nil && !os.IsExist(err) {
+ if err := os.MkdirAll(options.RunRoot, 0700); err != nil {
return nil, err
}
- if err := os.MkdirAll(options.GraphRoot, 0700); err != nil && !os.IsExist(err) {
+ if err := os.MkdirAll(options.GraphRoot, 0700); err != nil {
return nil, err
}
for _, subdir := range []string{"mounts", "tmp", options.GraphDriverName} {
- if err := os.MkdirAll(filepath.Join(options.GraphRoot, subdir), 0700); err != nil && !os.IsExist(err) {
+ if err := os.MkdirAll(filepath.Join(options.GraphRoot, subdir), 0700); err != nil {
return nil, err
}
}
diff --git a/vendor/github.com/containers/storage/userns.go b/vendor/github.com/containers/storage/userns.go
index 18e72efd1..34ff6a77a 100644
--- a/vendor/github.com/containers/storage/userns.go
+++ b/vendor/github.com/containers/storage/userns.go
@@ -113,7 +113,7 @@ func parseMountedFiles(containerMount, passwdFile, groupFile string) uint32 {
size = u.Uid
}
if u.Gid > size {
- size = u.Uid
+ size = u.Gid
}
}
}
diff --git a/vendor/github.com/golang/protobuf/proto/buffer.go b/vendor/github.com/golang/protobuf/proto/buffer.go
new file mode 100644
index 000000000..e810e6fea
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/buffer.go
@@ -0,0 +1,324 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "errors"
+ "fmt"
+
+ "google.golang.org/protobuf/encoding/prototext"
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ WireVarint = 0
+ WireFixed32 = 5
+ WireFixed64 = 1
+ WireBytes = 2
+ WireStartGroup = 3
+ WireEndGroup = 4
+)
+
+// EncodeVarint returns the varint encoded bytes of v.
+func EncodeVarint(v uint64) []byte {
+ return protowire.AppendVarint(nil, v)
+}
+
+// SizeVarint returns the length of the varint encoded bytes of v.
+// This is equal to len(EncodeVarint(v)).
+func SizeVarint(v uint64) int {
+ return protowire.SizeVarint(v)
+}
+
+// DecodeVarint parses a varint encoded integer from b,
+// returning the integer value and the length of the varint.
+// It returns (0, 0) if there is a parse error.
+func DecodeVarint(b []byte) (uint64, int) {
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return 0, 0
+ }
+ return v, n
+}
+
+// Buffer is a buffer for encoding and decoding the protobuf wire format.
+// It may be reused between invocations to reduce memory usage.
+type Buffer struct {
+ buf []byte
+ idx int
+ deterministic bool
+}
+
+// NewBuffer allocates a new Buffer initialized with buf,
+// where the contents of buf are considered the unread portion of the buffer.
+func NewBuffer(buf []byte) *Buffer {
+ return &Buffer{buf: buf}
+}
+
+// SetDeterministic specifies whether to use deterministic serialization.
+//
+// Deterministic serialization guarantees that for a given binary, equal
+// messages will always be serialized to the same bytes. This implies:
+//
+// - Repeated serialization of a message will return the same bytes.
+// - Different processes of the same binary (which may be executing on
+// different machines) will serialize equal messages to the same bytes.
+//
+// Note that the deterministic serialization is NOT canonical across
+// languages. It is not guaranteed to remain stable over time. It is unstable
+// across different builds with schema changes due to unknown fields.
+// Users who need canonical serialization (e.g., persistent storage in a
+// canonical form, fingerprinting, etc.) should define their own
+// canonicalization specification and implement their own serializer rather
+// than relying on this API.
+//
+// If deterministic serialization is requested, map entries will be sorted
+// by keys in lexographical order. This is an implementation detail and
+// subject to change.
+func (b *Buffer) SetDeterministic(deterministic bool) {
+ b.deterministic = deterministic
+}
+
+// SetBuf sets buf as the internal buffer,
+// where the contents of buf are considered the unread portion of the buffer.
+func (b *Buffer) SetBuf(buf []byte) {
+ b.buf = buf
+ b.idx = 0
+}
+
+// Reset clears the internal buffer of all written and unread data.
+func (b *Buffer) Reset() {
+ b.buf = b.buf[:0]
+ b.idx = 0
+}
+
+// Bytes returns the internal buffer.
+func (b *Buffer) Bytes() []byte {
+ return b.buf
+}
+
+// Unread returns the unread portion of the buffer.
+func (b *Buffer) Unread() []byte {
+ return b.buf[b.idx:]
+}
+
+// Marshal appends the wire-format encoding of m to the buffer.
+func (b *Buffer) Marshal(m Message) error {
+ var err error
+ b.buf, err = marshalAppend(b.buf, m, b.deterministic)
+ return err
+}
+
+// Unmarshal parses the wire-format message in the buffer and
+// places the decoded results in m.
+// It does not reset m before unmarshaling.
+func (b *Buffer) Unmarshal(m Message) error {
+ err := UnmarshalMerge(b.Unread(), m)
+ b.idx = len(b.buf)
+ return err
+}
+
+type unknownFields struct{ XXX_unrecognized protoimpl.UnknownFields }
+
+func (m *unknownFields) String() string { panic("not implemented") }
+func (m *unknownFields) Reset() { panic("not implemented") }
+func (m *unknownFields) ProtoMessage() { panic("not implemented") }
+
+// DebugPrint dumps the encoded bytes of b with a header and footer including s
+// to stdout. This is only intended for debugging.
+func (*Buffer) DebugPrint(s string, b []byte) {
+ m := MessageReflect(new(unknownFields))
+ m.SetUnknown(b)
+ b, _ = prototext.MarshalOptions{AllowPartial: true, Indent: "\t"}.Marshal(m.Interface())
+ fmt.Printf("==== %s ====\n%s==== %s ====\n", s, b, s)
+}
+
+// EncodeVarint appends an unsigned varint encoding to the buffer.
+func (b *Buffer) EncodeVarint(v uint64) error {
+ b.buf = protowire.AppendVarint(b.buf, v)
+ return nil
+}
+
+// EncodeZigzag32 appends a 32-bit zig-zag varint encoding to the buffer.
+func (b *Buffer) EncodeZigzag32(v uint64) error {
+ return b.EncodeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
+}
+
+// EncodeZigzag64 appends a 64-bit zig-zag varint encoding to the buffer.
+func (b *Buffer) EncodeZigzag64(v uint64) error {
+ return b.EncodeVarint(uint64((uint64(v) << 1) ^ uint64((int64(v) >> 63))))
+}
+
+// EncodeFixed32 appends a 32-bit little-endian integer to the buffer.
+func (b *Buffer) EncodeFixed32(v uint64) error {
+ b.buf = protowire.AppendFixed32(b.buf, uint32(v))
+ return nil
+}
+
+// EncodeFixed64 appends a 64-bit little-endian integer to the buffer.
+func (b *Buffer) EncodeFixed64(v uint64) error {
+ b.buf = protowire.AppendFixed64(b.buf, uint64(v))
+ return nil
+}
+
+// EncodeRawBytes appends a length-prefixed raw bytes to the buffer.
+func (b *Buffer) EncodeRawBytes(v []byte) error {
+ b.buf = protowire.AppendBytes(b.buf, v)
+ return nil
+}
+
+// EncodeStringBytes appends a length-prefixed raw bytes to the buffer.
+// It does not validate whether v contains valid UTF-8.
+func (b *Buffer) EncodeStringBytes(v string) error {
+ b.buf = protowire.AppendString(b.buf, v)
+ return nil
+}
+
+// EncodeMessage appends a length-prefixed encoded message to the buffer.
+func (b *Buffer) EncodeMessage(m Message) error {
+ var err error
+ b.buf = protowire.AppendVarint(b.buf, uint64(Size(m)))
+ b.buf, err = marshalAppend(b.buf, m, b.deterministic)
+ return err
+}
+
+// DecodeVarint consumes an encoded unsigned varint from the buffer.
+func (b *Buffer) DecodeVarint() (uint64, error) {
+ v, n := protowire.ConsumeVarint(b.buf[b.idx:])
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ b.idx += n
+ return uint64(v), nil
+}
+
+// DecodeZigzag32 consumes an encoded 32-bit zig-zag varint from the buffer.
+func (b *Buffer) DecodeZigzag32() (uint64, error) {
+ v, err := b.DecodeVarint()
+ if err != nil {
+ return 0, err
+ }
+ return uint64((uint32(v) >> 1) ^ uint32((int32(v&1)<<31)>>31)), nil
+}
+
+// DecodeZigzag64 consumes an encoded 64-bit zig-zag varint from the buffer.
+func (b *Buffer) DecodeZigzag64() (uint64, error) {
+ v, err := b.DecodeVarint()
+ if err != nil {
+ return 0, err
+ }
+ return uint64((uint64(v) >> 1) ^ uint64((int64(v&1)<<63)>>63)), nil
+}
+
+// DecodeFixed32 consumes a 32-bit little-endian integer from the buffer.
+func (b *Buffer) DecodeFixed32() (uint64, error) {
+ v, n := protowire.ConsumeFixed32(b.buf[b.idx:])
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ b.idx += n
+ return uint64(v), nil
+}
+
+// DecodeFixed64 consumes a 64-bit little-endian integer from the buffer.
+func (b *Buffer) DecodeFixed64() (uint64, error) {
+ v, n := protowire.ConsumeFixed64(b.buf[b.idx:])
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ b.idx += n
+ return uint64(v), nil
+}
+
+// DecodeRawBytes consumes a length-prefixed raw bytes from the buffer.
+// If alloc is specified, it returns a copy the raw bytes
+// rather than a sub-slice of the buffer.
+func (b *Buffer) DecodeRawBytes(alloc bool) ([]byte, error) {
+ v, n := protowire.ConsumeBytes(b.buf[b.idx:])
+ if n < 0 {
+ return nil, protowire.ParseError(n)
+ }
+ b.idx += n
+ if alloc {
+ v = append([]byte(nil), v...)
+ }
+ return v, nil
+}
+
+// DecodeStringBytes consumes a length-prefixed raw bytes from the buffer.
+// It does not validate whether the raw bytes contain valid UTF-8.
+func (b *Buffer) DecodeStringBytes() (string, error) {
+ v, n := protowire.ConsumeString(b.buf[b.idx:])
+ if n < 0 {
+ return "", protowire.ParseError(n)
+ }
+ b.idx += n
+ return v, nil
+}
+
+// DecodeMessage consumes a length-prefixed message from the buffer.
+// It does not reset m before unmarshaling.
+func (b *Buffer) DecodeMessage(m Message) error {
+ v, err := b.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+ return UnmarshalMerge(v, m)
+}
+
+// DecodeGroup consumes a message group from the buffer.
+// It assumes that the start group marker has already been consumed and
+// consumes all bytes until (and including the end group marker).
+// It does not reset m before unmarshaling.
+func (b *Buffer) DecodeGroup(m Message) error {
+ v, n, err := consumeGroup(b.buf[b.idx:])
+ if err != nil {
+ return err
+ }
+ b.idx += n
+ return UnmarshalMerge(v, m)
+}
+
+// consumeGroup parses b until it finds an end group marker, returning
+// the raw bytes of the message (excluding the end group marker) and the
+// the total length of the message (including the end group marker).
+func consumeGroup(b []byte) ([]byte, int, error) {
+ b0 := b
+ depth := 1 // assume this follows a start group marker
+ for {
+ _, wtyp, tagLen := protowire.ConsumeTag(b)
+ if tagLen < 0 {
+ return nil, 0, protowire.ParseError(tagLen)
+ }
+ b = b[tagLen:]
+
+ var valLen int
+ switch wtyp {
+ case protowire.VarintType:
+ _, valLen = protowire.ConsumeVarint(b)
+ case protowire.Fixed32Type:
+ _, valLen = protowire.ConsumeFixed32(b)
+ case protowire.Fixed64Type:
+ _, valLen = protowire.ConsumeFixed64(b)
+ case protowire.BytesType:
+ _, valLen = protowire.ConsumeBytes(b)
+ case protowire.StartGroupType:
+ depth++
+ case protowire.EndGroupType:
+ depth--
+ default:
+ return nil, 0, errors.New("proto: cannot parse reserved wire type")
+ }
+ if valLen < 0 {
+ return nil, 0, protowire.ParseError(valLen)
+ }
+ b = b[valLen:]
+
+ if depth == 0 {
+ return b0[:len(b0)-len(b)-tagLen], len(b0) - len(b), nil
+ }
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go
deleted file mode 100644
index 3cd3249f7..000000000
--- a/vendor/github.com/golang/protobuf/proto/clone.go
+++ /dev/null
@@ -1,253 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2011 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Protocol buffer deep copy and merge.
-// TODO: RawMessage.
-
-package proto
-
-import (
- "fmt"
- "log"
- "reflect"
- "strings"
-)
-
-// Clone returns a deep copy of a protocol buffer.
-func Clone(src Message) Message {
- in := reflect.ValueOf(src)
- if in.IsNil() {
- return src
- }
- out := reflect.New(in.Type().Elem())
- dst := out.Interface().(Message)
- Merge(dst, src)
- return dst
-}
-
-// Merger is the interface representing objects that can merge messages of the same type.
-type Merger interface {
- // Merge merges src into this message.
- // Required and optional fields that are set in src will be set to that value in dst.
- // Elements of repeated fields will be appended.
- //
- // Merge may panic if called with a different argument type than the receiver.
- Merge(src Message)
-}
-
-// generatedMerger is the custom merge method that generated protos will have.
-// We must add this method since a generate Merge method will conflict with
-// many existing protos that have a Merge data field already defined.
-type generatedMerger interface {
- XXX_Merge(src Message)
-}
-
-// Merge merges src into dst.
-// Required and optional fields that are set in src will be set to that value in dst.
-// Elements of repeated fields will be appended.
-// Merge panics if src and dst are not the same type, or if dst is nil.
-func Merge(dst, src Message) {
- if m, ok := dst.(Merger); ok {
- m.Merge(src)
- return
- }
-
- in := reflect.ValueOf(src)
- out := reflect.ValueOf(dst)
- if out.IsNil() {
- panic("proto: nil destination")
- }
- if in.Type() != out.Type() {
- panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src))
- }
- if in.IsNil() {
- return // Merge from nil src is a noop
- }
- if m, ok := dst.(generatedMerger); ok {
- m.XXX_Merge(src)
- return
- }
- mergeStruct(out.Elem(), in.Elem())
-}
-
-func mergeStruct(out, in reflect.Value) {
- sprop := GetProperties(in.Type())
- for i := 0; i < in.NumField(); i++ {
- f := in.Type().Field(i)
- if strings.HasPrefix(f.Name, "XXX_") {
- continue
- }
- mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
- }
-
- if emIn, err := extendable(in.Addr().Interface()); err == nil {
- emOut, _ := extendable(out.Addr().Interface())
- mIn, muIn := emIn.extensionsRead()
- if mIn != nil {
- mOut := emOut.extensionsWrite()
- muIn.Lock()
- mergeExtension(mOut, mIn)
- muIn.Unlock()
- }
- }
-
- uf := in.FieldByName("XXX_unrecognized")
- if !uf.IsValid() {
- return
- }
- uin := uf.Bytes()
- if len(uin) > 0 {
- out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
- }
-}
-
-// mergeAny performs a merge between two values of the same type.
-// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
-// prop is set if this is a struct field (it may be nil).
-func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
- if in.Type() == protoMessageType {
- if !in.IsNil() {
- if out.IsNil() {
- out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
- } else {
- Merge(out.Interface().(Message), in.Interface().(Message))
- }
- }
- return
- }
- switch in.Kind() {
- case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
- reflect.String, reflect.Uint32, reflect.Uint64:
- if !viaPtr && isProto3Zero(in) {
- return
- }
- out.Set(in)
- case reflect.Interface:
- // Probably a oneof field; copy non-nil values.
- if in.IsNil() {
- return
- }
- // Allocate destination if it is not set, or set to a different type.
- // Otherwise we will merge as normal.
- if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
- out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
- }
- mergeAny(out.Elem(), in.Elem(), false, nil)
- case reflect.Map:
- if in.Len() == 0 {
- return
- }
- if out.IsNil() {
- out.Set(reflect.MakeMap(in.Type()))
- }
- // For maps with value types of *T or []byte we need to deep copy each value.
- elemKind := in.Type().Elem().Kind()
- for _, key := range in.MapKeys() {
- var val reflect.Value
- switch elemKind {
- case reflect.Ptr:
- val = reflect.New(in.Type().Elem().Elem())
- mergeAny(val, in.MapIndex(key), false, nil)
- case reflect.Slice:
- val = in.MapIndex(key)
- val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
- default:
- val = in.MapIndex(key)
- }
- out.SetMapIndex(key, val)
- }
- case reflect.Ptr:
- if in.IsNil() {
- return
- }
- if out.IsNil() {
- out.Set(reflect.New(in.Elem().Type()))
- }
- mergeAny(out.Elem(), in.Elem(), true, nil)
- case reflect.Slice:
- if in.IsNil() {
- return
- }
- if in.Type().Elem().Kind() == reflect.Uint8 {
- // []byte is a scalar bytes field, not a repeated field.
-
- // Edge case: if this is in a proto3 message, a zero length
- // bytes field is considered the zero value, and should not
- // be merged.
- if prop != nil && prop.proto3 && in.Len() == 0 {
- return
- }
-
- // Make a deep copy.
- // Append to []byte{} instead of []byte(nil) so that we never end up
- // with a nil result.
- out.SetBytes(append([]byte{}, in.Bytes()...))
- return
- }
- n := in.Len()
- if out.IsNil() {
- out.Set(reflect.MakeSlice(in.Type(), 0, n))
- }
- switch in.Type().Elem().Kind() {
- case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
- reflect.String, reflect.Uint32, reflect.Uint64:
- out.Set(reflect.AppendSlice(out, in))
- default:
- for i := 0; i < n; i++ {
- x := reflect.Indirect(reflect.New(in.Type().Elem()))
- mergeAny(x, in.Index(i), false, nil)
- out.Set(reflect.Append(out, x))
- }
- }
- case reflect.Struct:
- mergeStruct(out, in)
- default:
- // unknown type, so not a protocol buffer
- log.Printf("proto: don't know how to copy %v", in)
- }
-}
-
-func mergeExtension(out, in map[int32]Extension) {
- for extNum, eIn := range in {
- eOut := Extension{desc: eIn.desc}
- if eIn.value != nil {
- v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
- mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
- eOut.value = v.Interface()
- }
- if eIn.enc != nil {
- eOut.enc = make([]byte, len(eIn.enc))
- copy(eOut.enc, eIn.enc)
- }
-
- out[extNum] = eOut
- }
-}
diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go
deleted file mode 100644
index 63b0f08be..000000000
--- a/vendor/github.com/golang/protobuf/proto/decode.go
+++ /dev/null
@@ -1,427 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-/*
- * Routines for decoding protocol buffer data to construct in-memory representations.
- */
-
-import (
- "errors"
- "fmt"
- "io"
-)
-
-// errOverflow is returned when an integer is too large to be represented.
-var errOverflow = errors.New("proto: integer overflow")
-
-// ErrInternalBadWireType is returned by generated code when an incorrect
-// wire type is encountered. It does not get returned to user code.
-var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
-
-// DecodeVarint reads a varint-encoded integer from the slice.
-// It returns the integer and the number of bytes consumed, or
-// zero if there is not enough.
-// This is the format for the
-// int32, int64, uint32, uint64, bool, and enum
-// protocol buffer types.
-func DecodeVarint(buf []byte) (x uint64, n int) {
- for shift := uint(0); shift < 64; shift += 7 {
- if n >= len(buf) {
- return 0, 0
- }
- b := uint64(buf[n])
- n++
- x |= (b & 0x7F) << shift
- if (b & 0x80) == 0 {
- return x, n
- }
- }
-
- // The number is too large to represent in a 64-bit value.
- return 0, 0
-}
-
-func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
- i := p.index
- l := len(p.buf)
-
- for shift := uint(0); shift < 64; shift += 7 {
- if i >= l {
- err = io.ErrUnexpectedEOF
- return
- }
- b := p.buf[i]
- i++
- x |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- p.index = i
- return
- }
- }
-
- // The number is too large to represent in a 64-bit value.
- err = errOverflow
- return
-}
-
-// DecodeVarint reads a varint-encoded integer from the Buffer.
-// This is the format for the
-// int32, int64, uint32, uint64, bool, and enum
-// protocol buffer types.
-func (p *Buffer) DecodeVarint() (x uint64, err error) {
- i := p.index
- buf := p.buf
-
- if i >= len(buf) {
- return 0, io.ErrUnexpectedEOF
- } else if buf[i] < 0x80 {
- p.index++
- return uint64(buf[i]), nil
- } else if len(buf)-i < 10 {
- return p.decodeVarintSlow()
- }
-
- var b uint64
- // we already checked the first byte
- x = uint64(buf[i]) - 0x80
- i++
-
- b = uint64(buf[i])
- i++
- x += b << 7
- if b&0x80 == 0 {
- goto done
- }
- x -= 0x80 << 7
-
- b = uint64(buf[i])
- i++
- x += b << 14
- if b&0x80 == 0 {
- goto done
- }
- x -= 0x80 << 14
-
- b = uint64(buf[i])
- i++
- x += b << 21
- if b&0x80 == 0 {
- goto done
- }
- x -= 0x80 << 21
-
- b = uint64(buf[i])
- i++
- x += b << 28
- if b&0x80 == 0 {
- goto done
- }
- x -= 0x80 << 28
-
- b = uint64(buf[i])
- i++
- x += b << 35
- if b&0x80 == 0 {
- goto done
- }
- x -= 0x80 << 35
-
- b = uint64(buf[i])
- i++
- x += b << 42
- if b&0x80 == 0 {
- goto done
- }
- x -= 0x80 << 42
-
- b = uint64(buf[i])
- i++
- x += b << 49
- if b&0x80 == 0 {
- goto done
- }
- x -= 0x80 << 49
-
- b = uint64(buf[i])
- i++
- x += b << 56
- if b&0x80 == 0 {
- goto done
- }
- x -= 0x80 << 56
-
- b = uint64(buf[i])
- i++
- x += b << 63
- if b&0x80 == 0 {
- goto done
- }
-
- return 0, errOverflow
-
-done:
- p.index = i
- return x, nil
-}
-
-// DecodeFixed64 reads a 64-bit integer from the Buffer.
-// This is the format for the
-// fixed64, sfixed64, and double protocol buffer types.
-func (p *Buffer) DecodeFixed64() (x uint64, err error) {
- // x, err already 0
- i := p.index + 8
- if i < 0 || i > len(p.buf) {
- err = io.ErrUnexpectedEOF
- return
- }
- p.index = i
-
- x = uint64(p.buf[i-8])
- x |= uint64(p.buf[i-7]) << 8
- x |= uint64(p.buf[i-6]) << 16
- x |= uint64(p.buf[i-5]) << 24
- x |= uint64(p.buf[i-4]) << 32
- x |= uint64(p.buf[i-3]) << 40
- x |= uint64(p.buf[i-2]) << 48
- x |= uint64(p.buf[i-1]) << 56
- return
-}
-
-// DecodeFixed32 reads a 32-bit integer from the Buffer.
-// This is the format for the
-// fixed32, sfixed32, and float protocol buffer types.
-func (p *Buffer) DecodeFixed32() (x uint64, err error) {
- // x, err already 0
- i := p.index + 4
- if i < 0 || i > len(p.buf) {
- err = io.ErrUnexpectedEOF
- return
- }
- p.index = i
-
- x = uint64(p.buf[i-4])
- x |= uint64(p.buf[i-3]) << 8
- x |= uint64(p.buf[i-2]) << 16
- x |= uint64(p.buf[i-1]) << 24
- return
-}
-
-// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
-// from the Buffer.
-// This is the format used for the sint64 protocol buffer type.
-func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
- x, err = p.DecodeVarint()
- if err != nil {
- return
- }
- x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
- return
-}
-
-// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
-// from the Buffer.
-// This is the format used for the sint32 protocol buffer type.
-func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
- x, err = p.DecodeVarint()
- if err != nil {
- return
- }
- x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
- return
-}
-
-// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
-// This is the format used for the bytes protocol buffer
-// type and for embedded messages.
-func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
- n, err := p.DecodeVarint()
- if err != nil {
- return nil, err
- }
-
- nb := int(n)
- if nb < 0 {
- return nil, fmt.Errorf("proto: bad byte length %d", nb)
- }
- end := p.index + nb
- if end < p.index || end > len(p.buf) {
- return nil, io.ErrUnexpectedEOF
- }
-
- if !alloc {
- // todo: check if can get more uses of alloc=false
- buf = p.buf[p.index:end]
- p.index += nb
- return
- }
-
- buf = make([]byte, nb)
- copy(buf, p.buf[p.index:])
- p.index += nb
- return
-}
-
-// DecodeStringBytes reads an encoded string from the Buffer.
-// This is the format used for the proto2 string type.
-func (p *Buffer) DecodeStringBytes() (s string, err error) {
- buf, err := p.DecodeRawBytes(false)
- if err != nil {
- return
- }
- return string(buf), nil
-}
-
-// Unmarshaler is the interface representing objects that can
-// unmarshal themselves. The argument points to data that may be
-// overwritten, so implementations should not keep references to the
-// buffer.
-// Unmarshal implementations should not clear the receiver.
-// Any unmarshaled data should be merged into the receiver.
-// Callers of Unmarshal that do not want to retain existing data
-// should Reset the receiver before calling Unmarshal.
-type Unmarshaler interface {
- Unmarshal([]byte) error
-}
-
-// newUnmarshaler is the interface representing objects that can
-// unmarshal themselves. The semantics are identical to Unmarshaler.
-//
-// This exists to support protoc-gen-go generated messages.
-// The proto package will stop type-asserting to this interface in the future.
-//
-// DO NOT DEPEND ON THIS.
-type newUnmarshaler interface {
- XXX_Unmarshal([]byte) error
-}
-
-// Unmarshal parses the protocol buffer representation in buf and places the
-// decoded result in pb. If the struct underlying pb does not match
-// the data in buf, the results can be unpredictable.
-//
-// Unmarshal resets pb before starting to unmarshal, so any
-// existing data in pb is always removed. Use UnmarshalMerge
-// to preserve and append to existing data.
-func Unmarshal(buf []byte, pb Message) error {
- pb.Reset()
- if u, ok := pb.(newUnmarshaler); ok {
- return u.XXX_Unmarshal(buf)
- }
- if u, ok := pb.(Unmarshaler); ok {
- return u.Unmarshal(buf)
- }
- return NewBuffer(buf).Unmarshal(pb)
-}
-
-// UnmarshalMerge parses the protocol buffer representation in buf and
-// writes the decoded result to pb. If the struct underlying pb does not match
-// the data in buf, the results can be unpredictable.
-//
-// UnmarshalMerge merges into existing data in pb.
-// Most code should use Unmarshal instead.
-func UnmarshalMerge(buf []byte, pb Message) error {
- if u, ok := pb.(newUnmarshaler); ok {
- return u.XXX_Unmarshal(buf)
- }
- if u, ok := pb.(Unmarshaler); ok {
- // NOTE: The history of proto have unfortunately been inconsistent
- // whether Unmarshaler should or should not implicitly clear itself.
- // Some implementations do, most do not.
- // Thus, calling this here may or may not do what people want.
- //
- // See https://github.com/golang/protobuf/issues/424
- return u.Unmarshal(buf)
- }
- return NewBuffer(buf).Unmarshal(pb)
-}
-
-// DecodeMessage reads a count-delimited message from the Buffer.
-func (p *Buffer) DecodeMessage(pb Message) error {
- enc, err := p.DecodeRawBytes(false)
- if err != nil {
- return err
- }
- return NewBuffer(enc).Unmarshal(pb)
-}
-
-// DecodeGroup reads a tag-delimited group from the Buffer.
-// StartGroup tag is already consumed. This function consumes
-// EndGroup tag.
-func (p *Buffer) DecodeGroup(pb Message) error {
- b := p.buf[p.index:]
- x, y := findEndGroup(b)
- if x < 0 {
- return io.ErrUnexpectedEOF
- }
- err := Unmarshal(b[:x], pb)
- p.index += y
- return err
-}
-
-// Unmarshal parses the protocol buffer representation in the
-// Buffer and places the decoded result in pb. If the struct
-// underlying pb does not match the data in the buffer, the results can be
-// unpredictable.
-//
-// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
-func (p *Buffer) Unmarshal(pb Message) error {
- // If the object can unmarshal itself, let it.
- if u, ok := pb.(newUnmarshaler); ok {
- err := u.XXX_Unmarshal(p.buf[p.index:])
- p.index = len(p.buf)
- return err
- }
- if u, ok := pb.(Unmarshaler); ok {
- // NOTE: The history of proto have unfortunately been inconsistent
- // whether Unmarshaler should or should not implicitly clear itself.
- // Some implementations do, most do not.
- // Thus, calling this here may or may not do what people want.
- //
- // See https://github.com/golang/protobuf/issues/424
- err := u.Unmarshal(p.buf[p.index:])
- p.index = len(p.buf)
- return err
- }
-
- // Slow workaround for messages that aren't Unmarshalers.
- // This includes some hand-coded .pb.go files and
- // bootstrap protos.
- // TODO: fix all of those and then add Unmarshal to
- // the Message interface. Then:
- // The cast above and code below can be deleted.
- // The old unmarshaler can be deleted.
- // Clients can call Unmarshal directly (can already do that, actually).
- var info InternalMessageInfo
- err := info.Unmarshal(pb, p.buf[p.index:])
- p.index = len(p.buf)
- return err
-}
diff --git a/vendor/github.com/golang/protobuf/proto/defaults.go b/vendor/github.com/golang/protobuf/proto/defaults.go
new file mode 100644
index 000000000..d399bf069
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/defaults.go
@@ -0,0 +1,63 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// SetDefaults sets unpopulated scalar fields to their default values.
+// Fields within a oneof are not set even if they have a default value.
+// SetDefaults is recursively called upon any populated message fields.
+func SetDefaults(m Message) {
+ if m != nil {
+ setDefaults(MessageReflect(m))
+ }
+}
+
+func setDefaults(m protoreflect.Message) {
+ fds := m.Descriptor().Fields()
+ for i := 0; i < fds.Len(); i++ {
+ fd := fds.Get(i)
+ if !m.Has(fd) {
+ if fd.HasDefault() && fd.ContainingOneof() == nil {
+ v := fd.Default()
+ if fd.Kind() == protoreflect.BytesKind {
+ v = protoreflect.ValueOf(append([]byte(nil), v.Bytes()...)) // copy the default bytes
+ }
+ m.Set(fd, v)
+ }
+ continue
+ }
+ }
+
+ m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+ switch {
+ // Handle singular message.
+ case fd.Cardinality() != protoreflect.Repeated:
+ if fd.Message() != nil {
+ setDefaults(m.Get(fd).Message())
+ }
+ // Handle list of messages.
+ case fd.IsList():
+ if fd.Message() != nil {
+ ls := m.Get(fd).List()
+ for i := 0; i < ls.Len(); i++ {
+ setDefaults(ls.Get(i).Message())
+ }
+ }
+ // Handle map of messages.
+ case fd.IsMap():
+ if fd.MapValue().Message() != nil {
+ ms := m.Get(fd).Map()
+ ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
+ setDefaults(v.Message())
+ return true
+ })
+ }
+ }
+ return true
+ })
+}
diff --git a/vendor/github.com/golang/protobuf/proto/deprecated.go b/vendor/github.com/golang/protobuf/proto/deprecated.go
index 35b882c09..e8db57e09 100644
--- a/vendor/github.com/golang/protobuf/proto/deprecated.go
+++ b/vendor/github.com/golang/protobuf/proto/deprecated.go
@@ -1,63 +1,113 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2018 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
package proto
-import "errors"
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "strconv"
-// Deprecated: do not use.
+ protoV2 "google.golang.org/protobuf/proto"
+)
+
+var (
+ // Deprecated: No longer returned.
+ ErrNil = errors.New("proto: Marshal called with nil")
+
+ // Deprecated: No longer returned.
+ ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
+
+ // Deprecated: No longer returned.
+ ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
+)
+
+// Deprecated: Do not use.
type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
-// Deprecated: do not use.
+// Deprecated: Do not use.
func GetStats() Stats { return Stats{} }
-// Deprecated: do not use.
+// Deprecated: Do not use.
func MarshalMessageSet(interface{}) ([]byte, error) {
return nil, errors.New("proto: not implemented")
}
-// Deprecated: do not use.
+// Deprecated: Do not use.
func UnmarshalMessageSet([]byte, interface{}) error {
return errors.New("proto: not implemented")
}
-// Deprecated: do not use.
+// Deprecated: Do not use.
func MarshalMessageSetJSON(interface{}) ([]byte, error) {
return nil, errors.New("proto: not implemented")
}
-// Deprecated: do not use.
+// Deprecated: Do not use.
func UnmarshalMessageSetJSON([]byte, interface{}) error {
return errors.New("proto: not implemented")
}
-// Deprecated: do not use.
+// Deprecated: Do not use.
func RegisterMessageSetType(Message, int32, string) {}
+
+// Deprecated: Do not use.
+func EnumName(m map[int32]string, v int32) string {
+ s, ok := m[v]
+ if ok {
+ return s
+ }
+ return strconv.Itoa(int(v))
+}
+
+// Deprecated: Do not use.
+func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
+ if data[0] == '"' {
+ // New style: enums are strings.
+ var repr string
+ if err := json.Unmarshal(data, &repr); err != nil {
+ return -1, err
+ }
+ val, ok := m[repr]
+ if !ok {
+ return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
+ }
+ return val, nil
+ }
+ // Old style: enums are ints.
+ var val int32
+ if err := json.Unmarshal(data, &val); err != nil {
+ return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
+ }
+ return val, nil
+}
+
+// Deprecated: Do not use; this type existed for intenal-use only.
+type InternalMessageInfo struct{}
+
+// Deprecated: Do not use; this method existed for intenal-use only.
+func (*InternalMessageInfo) DiscardUnknown(m Message) {
+ DiscardUnknown(m)
+}
+
+// Deprecated: Do not use; this method existed for intenal-use only.
+func (*InternalMessageInfo) Marshal(b []byte, m Message, deterministic bool) ([]byte, error) {
+ return protoV2.MarshalOptions{Deterministic: deterministic}.MarshalAppend(b, MessageV2(m))
+}
+
+// Deprecated: Do not use; this method existed for intenal-use only.
+func (*InternalMessageInfo) Merge(dst, src Message) {
+ protoV2.Merge(MessageV2(dst), MessageV2(src))
+}
+
+// Deprecated: Do not use; this method existed for intenal-use only.
+func (*InternalMessageInfo) Size(m Message) int {
+ return protoV2.Size(MessageV2(m))
+}
+
+// Deprecated: Do not use; this method existed for intenal-use only.
+func (*InternalMessageInfo) Unmarshal(m Message, b []byte) error {
+ return protoV2.UnmarshalOptions{Merge: true}.Unmarshal(b, MessageV2(m))
+}
diff --git a/vendor/github.com/golang/protobuf/proto/discard.go b/vendor/github.com/golang/protobuf/proto/discard.go
index dea2617ce..2187e877f 100644
--- a/vendor/github.com/golang/protobuf/proto/discard.go
+++ b/vendor/github.com/golang/protobuf/proto/discard.go
@@ -1,48 +1,13 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2017 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
package proto
import (
- "fmt"
- "reflect"
- "strings"
- "sync"
- "sync/atomic"
+ "google.golang.org/protobuf/reflect/protoreflect"
)
-type generatedDiscarder interface {
- XXX_DiscardUnknown()
-}
-
// DiscardUnknown recursively discards all unknown fields from this message
// and all embedded messages.
//
@@ -51,300 +16,43 @@ type generatedDiscarder interface {
// marshal to be able to produce a message that continues to have those
// unrecognized fields. To avoid this, DiscardUnknown is used to
// explicitly clear the unknown fields after unmarshaling.
-//
-// For proto2 messages, the unknown fields of message extensions are only
-// discarded from messages that have been accessed via GetExtension.
func DiscardUnknown(m Message) {
- if m, ok := m.(generatedDiscarder); ok {
- m.XXX_DiscardUnknown()
- return
- }
- // TODO: Dynamically populate a InternalMessageInfo for legacy messages,
- // but the master branch has no implementation for InternalMessageInfo,
- // so it would be more work to replicate that approach.
- discardLegacy(m)
-}
-
-// DiscardUnknown recursively discards all unknown fields.
-func (a *InternalMessageInfo) DiscardUnknown(m Message) {
- di := atomicLoadDiscardInfo(&a.discard)
- if di == nil {
- di = getDiscardInfo(reflect.TypeOf(m).Elem())
- atomicStoreDiscardInfo(&a.discard, di)
- }
- di.discard(toPointer(&m))
-}
-
-type discardInfo struct {
- typ reflect.Type
-
- initialized int32 // 0: only typ is valid, 1: everything is valid
- lock sync.Mutex
-
- fields []discardFieldInfo
- unrecognized field
-}
-
-type discardFieldInfo struct {
- field field // Offset of field, guaranteed to be valid
- discard func(src pointer)
-}
-
-var (
- discardInfoMap = map[reflect.Type]*discardInfo{}
- discardInfoLock sync.Mutex
-)
-
-func getDiscardInfo(t reflect.Type) *discardInfo {
- discardInfoLock.Lock()
- defer discardInfoLock.Unlock()
- di := discardInfoMap[t]
- if di == nil {
- di = &discardInfo{typ: t}
- discardInfoMap[t] = di
+ if m != nil {
+ discardUnknown(MessageReflect(m))
}
- return di
}
-func (di *discardInfo) discard(src pointer) {
- if src.isNil() {
- return // Nothing to do.
- }
-
- if atomic.LoadInt32(&di.initialized) == 0 {
- di.computeDiscardInfo()
- }
-
- for _, fi := range di.fields {
- sfp := src.offset(fi.field)
- fi.discard(sfp)
- }
-
- // For proto2 messages, only discard unknown fields in message extensions
- // that have been accessed via GetExtension.
- if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil {
- // Ignore lock since DiscardUnknown is not concurrency safe.
- emm, _ := em.extensionsRead()
- for _, mx := range emm {
- if m, ok := mx.value.(Message); ok {
- DiscardUnknown(m)
+func discardUnknown(m protoreflect.Message) {
+ m.Range(func(fd protoreflect.FieldDescriptor, val protoreflect.Value) bool {
+ switch {
+ // Handle singular message.
+ case fd.Cardinality() != protoreflect.Repeated:
+ if fd.Message() != nil {
+ discardUnknown(m.Get(fd).Message())
}
- }
- }
-
- if di.unrecognized.IsValid() {
- *src.offset(di.unrecognized).toBytes() = nil
- }
-}
-
-func (di *discardInfo) computeDiscardInfo() {
- di.lock.Lock()
- defer di.lock.Unlock()
- if di.initialized != 0 {
- return
- }
- t := di.typ
- n := t.NumField()
-
- for i := 0; i < n; i++ {
- f := t.Field(i)
- if strings.HasPrefix(f.Name, "XXX_") {
- continue
- }
-
- dfi := discardFieldInfo{field: toField(&f)}
- tf := f.Type
-
- // Unwrap tf to get its most basic type.
- var isPointer, isSlice bool
- if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
- isSlice = true
- tf = tf.Elem()
- }
- if tf.Kind() == reflect.Ptr {
- isPointer = true
- tf = tf.Elem()
- }
- if isPointer && isSlice && tf.Kind() != reflect.Struct {
- panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name))
- }
-
- switch tf.Kind() {
- case reflect.Struct:
- switch {
- case !isPointer:
- panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name))
- case isSlice: // E.g., []*pb.T
- di := getDiscardInfo(tf)
- dfi.discard = func(src pointer) {
- sps := src.getPointerSlice()
- for _, sp := range sps {
- if !sp.isNil() {
- di.discard(sp)
- }
- }
- }
- default: // E.g., *pb.T
- di := getDiscardInfo(tf)
- dfi.discard = func(src pointer) {
- sp := src.getPointer()
- if !sp.isNil() {
- di.discard(sp)
- }
+ // Handle list of messages.
+ case fd.IsList():
+ if fd.Message() != nil {
+ ls := m.Get(fd).List()
+ for i := 0; i < ls.Len(); i++ {
+ discardUnknown(ls.Get(i).Message())
}
}
- case reflect.Map:
- switch {
- case isPointer || isSlice:
- panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name))
- default: // E.g., map[K]V
- if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T)
- dfi.discard = func(src pointer) {
- sm := src.asPointerTo(tf).Elem()
- if sm.Len() == 0 {
- return
- }
- for _, key := range sm.MapKeys() {
- val := sm.MapIndex(key)
- DiscardUnknown(val.Interface().(Message))
- }
- }
- } else {
- dfi.discard = func(pointer) {} // Noop
- }
- }
- case reflect.Interface:
- // Must be oneof field.
- switch {
- case isPointer || isSlice:
- panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name))
- default: // E.g., interface{}
- // TODO: Make this faster?
- dfi.discard = func(src pointer) {
- su := src.asPointerTo(tf).Elem()
- if !su.IsNil() {
- sv := su.Elem().Elem().Field(0)
- if sv.Kind() == reflect.Ptr && sv.IsNil() {
- return
- }
- switch sv.Type().Kind() {
- case reflect.Ptr: // Proto struct (e.g., *T)
- DiscardUnknown(sv.Interface().(Message))
- }
- }
- }
+ // Handle map of messages.
+ case fd.IsMap():
+ if fd.MapValue().Message() != nil {
+ ms := m.Get(fd).Map()
+ ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
+ discardUnknown(v.Message())
+ return true
+ })
}
- default:
- continue
- }
- di.fields = append(di.fields, dfi)
- }
-
- di.unrecognized = invalidField
- if f, ok := t.FieldByName("XXX_unrecognized"); ok {
- if f.Type != reflect.TypeOf([]byte{}) {
- panic("expected XXX_unrecognized to be of type []byte")
- }
- di.unrecognized = toField(&f)
- }
-
- atomic.StoreInt32(&di.initialized, 1)
-}
-
-func discardLegacy(m Message) {
- v := reflect.ValueOf(m)
- if v.Kind() != reflect.Ptr || v.IsNil() {
- return
- }
- v = v.Elem()
- if v.Kind() != reflect.Struct {
- return
- }
- t := v.Type()
-
- for i := 0; i < v.NumField(); i++ {
- f := t.Field(i)
- if strings.HasPrefix(f.Name, "XXX_") {
- continue
}
- vf := v.Field(i)
- tf := f.Type
+ return true
+ })
- // Unwrap tf to get its most basic type.
- var isPointer, isSlice bool
- if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
- isSlice = true
- tf = tf.Elem()
- }
- if tf.Kind() == reflect.Ptr {
- isPointer = true
- tf = tf.Elem()
- }
- if isPointer && isSlice && tf.Kind() != reflect.Struct {
- panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name))
- }
-
- switch tf.Kind() {
- case reflect.Struct:
- switch {
- case !isPointer:
- panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name))
- case isSlice: // E.g., []*pb.T
- for j := 0; j < vf.Len(); j++ {
- discardLegacy(vf.Index(j).Interface().(Message))
- }
- default: // E.g., *pb.T
- discardLegacy(vf.Interface().(Message))
- }
- case reflect.Map:
- switch {
- case isPointer || isSlice:
- panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name))
- default: // E.g., map[K]V
- tv := vf.Type().Elem()
- if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T)
- for _, key := range vf.MapKeys() {
- val := vf.MapIndex(key)
- discardLegacy(val.Interface().(Message))
- }
- }
- }
- case reflect.Interface:
- // Must be oneof field.
- switch {
- case isPointer || isSlice:
- panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name))
- default: // E.g., test_proto.isCommunique_Union interface
- if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" {
- vf = vf.Elem() // E.g., *test_proto.Communique_Msg
- if !vf.IsNil() {
- vf = vf.Elem() // E.g., test_proto.Communique_Msg
- vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value
- if vf.Kind() == reflect.Ptr {
- discardLegacy(vf.Interface().(Message))
- }
- }
- }
- }
- }
- }
-
- if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() {
- if vf.Type() != reflect.TypeOf([]byte{}) {
- panic("expected XXX_unrecognized to be of type []byte")
- }
- vf.Set(reflect.ValueOf([]byte(nil)))
- }
-
- // For proto2 messages, only discard unknown fields in message extensions
- // that have been accessed via GetExtension.
- if em, err := extendable(m); err == nil {
- // Ignore lock since discardLegacy is not concurrency safe.
- emm, _ := em.extensionsRead()
- for _, mx := range emm {
- if m, ok := mx.value.(Message); ok {
- discardLegacy(m)
- }
- }
+ // Discard unknown fields.
+ if len(m.GetUnknown()) > 0 {
+ m.SetUnknown(nil)
}
}
diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go
deleted file mode 100644
index 3abfed2cf..000000000
--- a/vendor/github.com/golang/protobuf/proto/encode.go
+++ /dev/null
@@ -1,203 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-/*
- * Routines for encoding data into the wire format for protocol buffers.
- */
-
-import (
- "errors"
- "reflect"
-)
-
-var (
- // errRepeatedHasNil is the error returned if Marshal is called with
- // a struct with a repeated field containing a nil element.
- errRepeatedHasNil = errors.New("proto: repeated field has nil element")
-
- // errOneofHasNil is the error returned if Marshal is called with
- // a struct with a oneof field containing a nil element.
- errOneofHasNil = errors.New("proto: oneof field has nil value")
-
- // ErrNil is the error returned if Marshal is called with nil.
- ErrNil = errors.New("proto: Marshal called with nil")
-
- // ErrTooLarge is the error returned if Marshal is called with a
- // message that encodes to >2GB.
- ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
-)
-
-// The fundamental encoders that put bytes on the wire.
-// Those that take integer types all accept uint64 and are
-// therefore of type valueEncoder.
-
-const maxVarintBytes = 10 // maximum length of a varint
-
-// EncodeVarint returns the varint encoding of x.
-// This is the format for the
-// int32, int64, uint32, uint64, bool, and enum
-// protocol buffer types.
-// Not used by the package itself, but helpful to clients
-// wishing to use the same encoding.
-func EncodeVarint(x uint64) []byte {
- var buf [maxVarintBytes]byte
- var n int
- for n = 0; x > 127; n++ {
- buf[n] = 0x80 | uint8(x&0x7F)
- x >>= 7
- }
- buf[n] = uint8(x)
- n++
- return buf[0:n]
-}
-
-// EncodeVarint writes a varint-encoded integer to the Buffer.
-// This is the format for the
-// int32, int64, uint32, uint64, bool, and enum
-// protocol buffer types.
-func (p *Buffer) EncodeVarint(x uint64) error {
- for x >= 1<<7 {
- p.buf = append(p.buf, uint8(x&0x7f|0x80))
- x >>= 7
- }
- p.buf = append(p.buf, uint8(x))
- return nil
-}
-
-// SizeVarint returns the varint encoding size of an integer.
-func SizeVarint(x uint64) int {
- switch {
- case x < 1<<7:
- return 1
- case x < 1<<14:
- return 2
- case x < 1<<21:
- return 3
- case x < 1<<28:
- return 4
- case x < 1<<35:
- return 5
- case x < 1<<42:
- return 6
- case x < 1<<49:
- return 7
- case x < 1<<56:
- return 8
- case x < 1<<63:
- return 9
- }
- return 10
-}
-
-// EncodeFixed64 writes a 64-bit integer to the Buffer.
-// This is the format for the
-// fixed64, sfixed64, and double protocol buffer types.
-func (p *Buffer) EncodeFixed64(x uint64) error {
- p.buf = append(p.buf,
- uint8(x),
- uint8(x>>8),
- uint8(x>>16),
- uint8(x>>24),
- uint8(x>>32),
- uint8(x>>40),
- uint8(x>>48),
- uint8(x>>56))
- return nil
-}
-
-// EncodeFixed32 writes a 32-bit integer to the Buffer.
-// This is the format for the
-// fixed32, sfixed32, and float protocol buffer types.
-func (p *Buffer) EncodeFixed32(x uint64) error {
- p.buf = append(p.buf,
- uint8(x),
- uint8(x>>8),
- uint8(x>>16),
- uint8(x>>24))
- return nil
-}
-
-// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
-// to the Buffer.
-// This is the format used for the sint64 protocol buffer type.
-func (p *Buffer) EncodeZigzag64(x uint64) error {
- // use signed number to get arithmetic right shift.
- return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-
-// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
-// to the Buffer.
-// This is the format used for the sint32 protocol buffer type.
-func (p *Buffer) EncodeZigzag32(x uint64) error {
- // use signed number to get arithmetic right shift.
- return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
-}
-
-// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
-// This is the format used for the bytes protocol buffer
-// type and for embedded messages.
-func (p *Buffer) EncodeRawBytes(b []byte) error {
- p.EncodeVarint(uint64(len(b)))
- p.buf = append(p.buf, b...)
- return nil
-}
-
-// EncodeStringBytes writes an encoded string to the Buffer.
-// This is the format used for the proto2 string type.
-func (p *Buffer) EncodeStringBytes(s string) error {
- p.EncodeVarint(uint64(len(s)))
- p.buf = append(p.buf, s...)
- return nil
-}
-
-// Marshaler is the interface representing objects that can marshal themselves.
-type Marshaler interface {
- Marshal() ([]byte, error)
-}
-
-// EncodeMessage writes the protocol buffer to the Buffer,
-// prefixed by a varint-encoded length.
-func (p *Buffer) EncodeMessage(pb Message) error {
- siz := Size(pb)
- p.EncodeVarint(uint64(siz))
- return p.Marshal(pb)
-}
-
-// All protocol buffer fields are nillable, but be careful.
-func isNil(v reflect.Value) bool {
- switch v.Kind() {
- case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
- return v.IsNil()
- }
- return false
-}
diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go
deleted file mode 100644
index f9b6e41b3..000000000
--- a/vendor/github.com/golang/protobuf/proto/equal.go
+++ /dev/null
@@ -1,301 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2011 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Protocol buffer comparison.
-
-package proto
-
-import (
- "bytes"
- "log"
- "reflect"
- "strings"
-)
-
-/*
-Equal returns true iff protocol buffers a and b are equal.
-The arguments must both be pointers to protocol buffer structs.
-
-Equality is defined in this way:
- - Two messages are equal iff they are the same type,
- corresponding fields are equal, unknown field sets
- are equal, and extensions sets are equal.
- - Two set scalar fields are equal iff their values are equal.
- If the fields are of a floating-point type, remember that
- NaN != x for all x, including NaN. If the message is defined
- in a proto3 .proto file, fields are not "set"; specifically,
- zero length proto3 "bytes" fields are equal (nil == {}).
- - Two repeated fields are equal iff their lengths are the same,
- and their corresponding elements are equal. Note a "bytes" field,
- although represented by []byte, is not a repeated field and the
- rule for the scalar fields described above applies.
- - Two unset fields are equal.
- - Two unknown field sets are equal if their current
- encoded state is equal.
- - Two extension sets are equal iff they have corresponding
- elements that are pairwise equal.
- - Two map fields are equal iff their lengths are the same,
- and they contain the same set of elements. Zero-length map
- fields are equal.
- - Every other combination of things are not equal.
-
-The return value is undefined if a and b are not protocol buffers.
-*/
-func Equal(a, b Message) bool {
- if a == nil || b == nil {
- return a == b
- }
- v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
- if v1.Type() != v2.Type() {
- return false
- }
- if v1.Kind() == reflect.Ptr {
- if v1.IsNil() {
- return v2.IsNil()
- }
- if v2.IsNil() {
- return false
- }
- v1, v2 = v1.Elem(), v2.Elem()
- }
- if v1.Kind() != reflect.Struct {
- return false
- }
- return equalStruct(v1, v2)
-}
-
-// v1 and v2 are known to have the same type.
-func equalStruct(v1, v2 reflect.Value) bool {
- sprop := GetProperties(v1.Type())
- for i := 0; i < v1.NumField(); i++ {
- f := v1.Type().Field(i)
- if strings.HasPrefix(f.Name, "XXX_") {
- continue
- }
- f1, f2 := v1.Field(i), v2.Field(i)
- if f.Type.Kind() == reflect.Ptr {
- if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
- // both unset
- continue
- } else if n1 != n2 {
- // set/unset mismatch
- return false
- }
- f1, f2 = f1.Elem(), f2.Elem()
- }
- if !equalAny(f1, f2, sprop.Prop[i]) {
- return false
- }
- }
-
- if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() {
- em2 := v2.FieldByName("XXX_InternalExtensions")
- if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) {
- return false
- }
- }
-
- if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
- em2 := v2.FieldByName("XXX_extensions")
- if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
- return false
- }
- }
-
- uf := v1.FieldByName("XXX_unrecognized")
- if !uf.IsValid() {
- return true
- }
-
- u1 := uf.Bytes()
- u2 := v2.FieldByName("XXX_unrecognized").Bytes()
- return bytes.Equal(u1, u2)
-}
-
-// v1 and v2 are known to have the same type.
-// prop may be nil.
-func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
- if v1.Type() == protoMessageType {
- m1, _ := v1.Interface().(Message)
- m2, _ := v2.Interface().(Message)
- return Equal(m1, m2)
- }
- switch v1.Kind() {
- case reflect.Bool:
- return v1.Bool() == v2.Bool()
- case reflect.Float32, reflect.Float64:
- return v1.Float() == v2.Float()
- case reflect.Int32, reflect.Int64:
- return v1.Int() == v2.Int()
- case reflect.Interface:
- // Probably a oneof field; compare the inner values.
- n1, n2 := v1.IsNil(), v2.IsNil()
- if n1 || n2 {
- return n1 == n2
- }
- e1, e2 := v1.Elem(), v2.Elem()
- if e1.Type() != e2.Type() {
- return false
- }
- return equalAny(e1, e2, nil)
- case reflect.Map:
- if v1.Len() != v2.Len() {
- return false
- }
- for _, key := range v1.MapKeys() {
- val2 := v2.MapIndex(key)
- if !val2.IsValid() {
- // This key was not found in the second map.
- return false
- }
- if !equalAny(v1.MapIndex(key), val2, nil) {
- return false
- }
- }
- return true
- case reflect.Ptr:
- // Maps may have nil values in them, so check for nil.
- if v1.IsNil() && v2.IsNil() {
- return true
- }
- if v1.IsNil() != v2.IsNil() {
- return false
- }
- return equalAny(v1.Elem(), v2.Elem(), prop)
- case reflect.Slice:
- if v1.Type().Elem().Kind() == reflect.Uint8 {
- // short circuit: []byte
-
- // Edge case: if this is in a proto3 message, a zero length
- // bytes field is considered the zero value.
- if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
- return true
- }
- if v1.IsNil() != v2.IsNil() {
- return false
- }
- return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
- }
-
- if v1.Len() != v2.Len() {
- return false
- }
- for i := 0; i < v1.Len(); i++ {
- if !equalAny(v1.Index(i), v2.Index(i), prop) {
- return false
- }
- }
- return true
- case reflect.String:
- return v1.Interface().(string) == v2.Interface().(string)
- case reflect.Struct:
- return equalStruct(v1, v2)
- case reflect.Uint32, reflect.Uint64:
- return v1.Uint() == v2.Uint()
- }
-
- // unknown type, so not a protocol buffer
- log.Printf("proto: don't know how to compare %v", v1)
- return false
-}
-
-// base is the struct type that the extensions are based on.
-// x1 and x2 are InternalExtensions.
-func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool {
- em1, _ := x1.extensionsRead()
- em2, _ := x2.extensionsRead()
- return equalExtMap(base, em1, em2)
-}
-
-func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
- if len(em1) != len(em2) {
- return false
- }
-
- for extNum, e1 := range em1 {
- e2, ok := em2[extNum]
- if !ok {
- return false
- }
-
- m1 := extensionAsLegacyType(e1.value)
- m2 := extensionAsLegacyType(e2.value)
-
- if m1 == nil && m2 == nil {
- // Both have only encoded form.
- if bytes.Equal(e1.enc, e2.enc) {
- continue
- }
- // The bytes are different, but the extensions might still be
- // equal. We need to decode them to compare.
- }
-
- if m1 != nil && m2 != nil {
- // Both are unencoded.
- if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
- return false
- }
- continue
- }
-
- // At least one is encoded. To do a semantically correct comparison
- // we need to unmarshal them first.
- var desc *ExtensionDesc
- if m := extensionMaps[base]; m != nil {
- desc = m[extNum]
- }
- if desc == nil {
- // If both have only encoded form and the bytes are the same,
- // it is handled above. We get here when the bytes are different.
- // We don't know how to decode it, so just compare them as byte
- // slices.
- log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
- return false
- }
- var err error
- if m1 == nil {
- m1, err = decodeExtension(e1.enc, desc)
- }
- if m2 == nil && err == nil {
- m2, err = decodeExtension(e2.enc, desc)
- }
- if err != nil {
- // The encoded form is invalid.
- log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
- return false
- }
- if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
- return false
- }
- }
-
- return true
-}
diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go
index fa88add30..42fc120c9 100644
--- a/vendor/github.com/golang/protobuf/proto/extensions.go
+++ b/vendor/github.com/golang/protobuf/proto/extensions.go
@@ -1,607 +1,356 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
package proto
-/*
- * Types and routines for supporting protocol buffer extensions.
- */
-
import (
"errors"
"fmt"
- "io"
"reflect"
- "strconv"
- "sync"
-)
-
-// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
-var ErrMissingExtension = errors.New("proto: missing extension")
-
-// ExtensionRange represents a range of message extensions for a protocol buffer.
-// Used in code generated by the protocol compiler.
-type ExtensionRange struct {
- Start, End int32 // both inclusive
-}
-
-// extendableProto is an interface implemented by any protocol buffer generated by the current
-// proto compiler that may be extended.
-type extendableProto interface {
- Message
- ExtensionRangeArray() []ExtensionRange
- extensionsWrite() map[int32]Extension
- extensionsRead() (map[int32]Extension, sync.Locker)
-}
-
-// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous
-// version of the proto compiler that may be extended.
-type extendableProtoV1 interface {
- Message
- ExtensionRangeArray() []ExtensionRange
- ExtensionMap() map[int32]Extension
-}
-// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
-type extensionAdapter struct {
- extendableProtoV1
-}
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+ "google.golang.org/protobuf/runtime/protoiface"
+ "google.golang.org/protobuf/runtime/protoimpl"
+)
-func (e extensionAdapter) extensionsWrite() map[int32]Extension {
- return e.ExtensionMap()
-}
+type (
+ // ExtensionDesc represents an extension descriptor and
+ // is used to interact with an extension field in a message.
+ //
+ // Variables of this type are generated in code by protoc-gen-go.
+ ExtensionDesc = protoimpl.ExtensionInfo
-func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
- return e.ExtensionMap(), notLocker{}
-}
+ // ExtensionRange represents a range of message extensions.
+ // Used in code generated by protoc-gen-go.
+ ExtensionRange = protoiface.ExtensionRangeV1
-// notLocker is a sync.Locker whose Lock and Unlock methods are nops.
-type notLocker struct{}
+ // Deprecated: Do not use; this is an internal type.
+ Extension = protoimpl.ExtensionFieldV1
-func (n notLocker) Lock() {}
-func (n notLocker) Unlock() {}
+ // Deprecated: Do not use; this is an internal type.
+ XXX_InternalExtensions = protoimpl.ExtensionFields
+)
-// extendable returns the extendableProto interface for the given generated proto message.
-// If the proto message has the old extension format, it returns a wrapper that implements
-// the extendableProto interface.
-func extendable(p interface{}) (extendableProto, error) {
- switch p := p.(type) {
- case extendableProto:
- if isNilPtr(p) {
- return nil, fmt.Errorf("proto: nil %T is not extendable", p)
- }
- return p, nil
- case extendableProtoV1:
- if isNilPtr(p) {
- return nil, fmt.Errorf("proto: nil %T is not extendable", p)
- }
- return extensionAdapter{p}, nil
- }
- // Don't allocate a specific error containing %T:
- // this is the hot path for Clone and MarshalText.
- return nil, errNotExtendable
-}
+// ErrMissingExtension reports whether the extension was not present.
+var ErrMissingExtension = errors.New("proto: missing extension")
var errNotExtendable = errors.New("proto: not an extendable proto.Message")
-func isNilPtr(x interface{}) bool {
- v := reflect.ValueOf(x)
- return v.Kind() == reflect.Ptr && v.IsNil()
-}
-
-// XXX_InternalExtensions is an internal representation of proto extensions.
-//
-// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,
-// thus gaining the unexported 'extensions' method, which can be called only from the proto package.
-//
-// The methods of XXX_InternalExtensions are not concurrency safe in general,
-// but calls to logically read-only methods such as has and get may be executed concurrently.
-type XXX_InternalExtensions struct {
- // The struct must be indirect so that if a user inadvertently copies a
- // generated message and its embedded XXX_InternalExtensions, they
- // avoid the mayhem of a copied mutex.
- //
- // The mutex serializes all logically read-only operations to p.extensionMap.
- // It is up to the client to ensure that write operations to p.extensionMap are
- // mutually exclusive with other accesses.
- p *struct {
- mu sync.Mutex
- extensionMap map[int32]Extension
+// HasExtension reports whether the extension field is present in m
+// either as an explicitly populated field or as an unknown field.
+func HasExtension(m Message, xt *ExtensionDesc) (has bool) {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() {
+ return false
}
-}
-// extensionsWrite returns the extension map, creating it on first use.
-func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension {
- if e.p == nil {
- e.p = new(struct {
- mu sync.Mutex
- extensionMap map[int32]Extension
+ // Check whether any populated known field matches the field number.
+ xtd := xt.TypeDescriptor()
+ if isValidExtension(mr.Descriptor(), xtd) {
+ has = mr.Has(xtd)
+ } else {
+ mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
+ has = int32(fd.Number()) == xt.Field
+ return !has
})
- e.p.extensionMap = make(map[int32]Extension)
}
- return e.p.extensionMap
-}
-// extensionsRead returns the extensions map for read-only use. It may be nil.
-// The caller must hold the returned mutex's lock when accessing Elements within the map.
-func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) {
- if e.p == nil {
- return nil, nil
+ // Check whether any unknown field matches the field number.
+ for b := mr.GetUnknown(); !has && len(b) > 0; {
+ num, _, n := protowire.ConsumeField(b)
+ has = int32(num) == xt.Field
+ b = b[n:]
}
- return e.p.extensionMap, &e.p.mu
-}
-
-// ExtensionDesc represents an extension specification.
-// Used in generated code from the protocol compiler.
-type ExtensionDesc struct {
- ExtendedType Message // nil pointer to the type that is being extended
- ExtensionType interface{} // nil pointer to the extension type
- Field int32 // field number
- Name string // fully-qualified name of extension, for text formatting
- Tag string // protobuf tag style
- Filename string // name of the file in which the extension is defined
-}
-
-func (ed *ExtensionDesc) repeated() bool {
- t := reflect.TypeOf(ed.ExtensionType)
- return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
-}
-
-// Extension represents an extension in a message.
-type Extension struct {
- // When an extension is stored in a message using SetExtension
- // only desc and value are set. When the message is marshaled
- // enc will be set to the encoded form of the message.
- //
- // When a message is unmarshaled and contains extensions, each
- // extension will have only enc set. When such an extension is
- // accessed using GetExtension (or GetExtensions) desc and value
- // will be set.
- desc *ExtensionDesc
-
- // value is a concrete value for the extension field. Let the type of
- // desc.ExtensionType be the "API type" and the type of Extension.value
- // be the "storage type". The API type and storage type are the same except:
- // * For scalars (except []byte), the API type uses *T,
- // while the storage type uses T.
- // * For repeated fields, the API type uses []T, while the storage type
- // uses *[]T.
- //
- // The reason for the divergence is so that the storage type more naturally
- // matches what is expected of when retrieving the values through the
- // protobuf reflection APIs.
- //
- // The value may only be populated if desc is also populated.
- value interface{}
-
- // enc is the raw bytes for the extension field.
- enc []byte
+ return has
}
-// SetRawExtension is for testing only.
-func SetRawExtension(base Message, id int32, b []byte) {
- epb, err := extendable(base)
- if err != nil {
+// ClearExtension removes the extension field from m
+// either as an explicitly populated field or as an unknown field.
+func ClearExtension(m Message, xt *ExtensionDesc) {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() {
return
}
- extmap := epb.extensionsWrite()
- extmap[id] = Extension{enc: b}
-}
-// isExtensionField returns true iff the given field number is in an extension range.
-func isExtensionField(pb extendableProto, field int32) bool {
- for _, er := range pb.ExtensionRangeArray() {
- if er.Start <= field && field <= er.End {
+ xtd := xt.TypeDescriptor()
+ if isValidExtension(mr.Descriptor(), xtd) {
+ mr.Clear(xtd)
+ } else {
+ mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
+ if int32(fd.Number()) == xt.Field {
+ mr.Clear(fd)
+ return false
+ }
return true
- }
- }
- return false
-}
-
-// checkExtensionTypes checks that the given extension is valid for pb.
-func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
- var pbi interface{} = pb
- // Check the extended type.
- if ea, ok := pbi.(extensionAdapter); ok {
- pbi = ea.extendableProtoV1
- }
- if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
- return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a)
- }
- // Check the range.
- if !isExtensionField(pb, extension.Field) {
- return errors.New("proto: bad extension number; not in declared ranges")
- }
- return nil
-}
-
-// extPropKey is sufficient to uniquely identify an extension.
-type extPropKey struct {
- base reflect.Type
- field int32
-}
-
-var extProp = struct {
- sync.RWMutex
- m map[extPropKey]*Properties
-}{
- m: make(map[extPropKey]*Properties),
-}
-
-func extensionProperties(ed *ExtensionDesc) *Properties {
- key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
-
- extProp.RLock()
- if prop, ok := extProp.m[key]; ok {
- extProp.RUnlock()
- return prop
- }
- extProp.RUnlock()
-
- extProp.Lock()
- defer extProp.Unlock()
- // Check again.
- if prop, ok := extProp.m[key]; ok {
- return prop
- }
-
- prop := new(Properties)
- prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
- extProp.m[key] = prop
- return prop
-}
-
-// HasExtension returns whether the given extension is present in pb.
-func HasExtension(pb Message, extension *ExtensionDesc) bool {
- // TODO: Check types, field numbers, etc.?
- epb, err := extendable(pb)
- if err != nil {
- return false
- }
- extmap, mu := epb.extensionsRead()
- if extmap == nil {
- return false
+ })
}
- mu.Lock()
- _, ok := extmap[extension.Field]
- mu.Unlock()
- return ok
+ clearUnknown(mr, fieldNum(xt.Field))
}
-// ClearExtension removes the given extension from pb.
-func ClearExtension(pb Message, extension *ExtensionDesc) {
- epb, err := extendable(pb)
- if err != nil {
+// ClearAllExtensions clears all extensions from m.
+// This includes populated fields and unknown fields in the extension range.
+func ClearAllExtensions(m Message) {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() {
return
}
- // TODO: Check types, field numbers, etc.?
- extmap := epb.extensionsWrite()
- delete(extmap, extension.Field)
+
+ mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
+ if fd.IsExtension() {
+ mr.Clear(fd)
+ }
+ return true
+ })
+ clearUnknown(mr, mr.Descriptor().ExtensionRanges())
}
-// GetExtension retrieves a proto2 extended field from pb.
+// GetExtension retrieves a proto2 extended field from m.
//
// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
// then GetExtension parses the encoded field and returns a Go value of the specified type.
// If the field is not present, then the default value is returned (if one is specified),
// otherwise ErrMissingExtension is reported.
//
-// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil),
-// then GetExtension returns the raw encoded bytes of the field extension.
-func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
- epb, err := extendable(pb)
- if err != nil {
- return nil, err
- }
-
- if extension.ExtendedType != nil {
- // can only check type if this is a complete descriptor
- if err := checkExtensionTypes(epb, extension); err != nil {
- return nil, err
+// If the descriptor is type incomplete (i.e., ExtensionDesc.ExtensionType is nil),
+// then GetExtension returns the raw encoded bytes for the extension field.
+func GetExtension(m Message, xt *ExtensionDesc) (interface{}, error) {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
+ return nil, errNotExtendable
+ }
+
+ // Retrieve the unknown fields for this extension field.
+ var bo protoreflect.RawFields
+ for bi := mr.GetUnknown(); len(bi) > 0; {
+ num, _, n := protowire.ConsumeField(bi)
+ if int32(num) == xt.Field {
+ bo = append(bo, bi[:n]...)
}
+ bi = bi[n:]
}
- emap, mu := epb.extensionsRead()
- if emap == nil {
- return defaultExtensionValue(extension)
- }
- mu.Lock()
- defer mu.Unlock()
- e, ok := emap[extension.Field]
- if !ok {
- // defaultExtensionValue returns the default value or
- // ErrMissingExtension if there is no default.
- return defaultExtensionValue(extension)
- }
-
- if e.value != nil {
- // Already decoded. Check the descriptor, though.
- if e.desc != extension {
- // This shouldn't happen. If it does, it means that
- // GetExtension was called twice with two different
- // descriptors with the same field number.
- return nil, errors.New("proto: descriptor conflict")
- }
- return extensionAsLegacyType(e.value), nil
+ // For type incomplete descriptors, only retrieve the unknown fields.
+ if xt.ExtensionType == nil {
+ return []byte(bo), nil
}
- if extension.ExtensionType == nil {
- // incomplete descriptor
- return e.enc, nil
+ // If the extension field only exists as unknown fields, unmarshal it.
+ // This is rarely done since proto.Unmarshal eagerly unmarshals extensions.
+ xtd := xt.TypeDescriptor()
+ if !isValidExtension(mr.Descriptor(), xtd) {
+ return nil, fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m)
}
-
- v, err := decodeExtension(e.enc, extension)
- if err != nil {
- return nil, err
+ if !mr.Has(xtd) && len(bo) > 0 {
+ m2 := mr.New()
+ if err := (proto.UnmarshalOptions{
+ Resolver: extensionResolver{xt},
+ }.Unmarshal(bo, m2.Interface())); err != nil {
+ return nil, err
+ }
+ if m2.Has(xtd) {
+ mr.Set(xtd, m2.Get(xtd))
+ clearUnknown(mr, fieldNum(xt.Field))
+ }
}
- // Remember the decoded version and drop the encoded version.
- // That way it is safe to mutate what we return.
- e.value = extensionAsStorageType(v)
- e.desc = extension
- e.enc = nil
- emap[extension.Field] = e
- return extensionAsLegacyType(e.value), nil
-}
-
-// defaultExtensionValue returns the default value for extension.
-// If no default for an extension is defined ErrMissingExtension is returned.
-func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
- if extension.ExtensionType == nil {
- // incomplete descriptor, so no default
+ // Check whether the message has the extension field set or a default.
+ var pv protoreflect.Value
+ switch {
+ case mr.Has(xtd):
+ pv = mr.Get(xtd)
+ case xtd.HasDefault():
+ pv = xtd.Default()
+ default:
return nil, ErrMissingExtension
}
- t := reflect.TypeOf(extension.ExtensionType)
- props := extensionProperties(extension)
-
- sf, _, err := fieldDefault(t, props)
- if err != nil {
- return nil, err
- }
-
- if sf == nil || sf.value == nil {
- // There is no default value.
- return nil, ErrMissingExtension
+ v := xt.InterfaceOf(pv)
+ rv := reflect.ValueOf(v)
+ if isScalarKind(rv.Kind()) {
+ rv2 := reflect.New(rv.Type())
+ rv2.Elem().Set(rv)
+ v = rv2.Interface()
}
+ return v, nil
+}
- if t.Kind() != reflect.Ptr {
- // We do not need to return a Ptr, we can directly return sf.value.
- return sf.value, nil
- }
+// extensionResolver is a custom extension resolver that stores a single
+// extension type that takes precedence over the global registry.
+type extensionResolver struct{ xt protoreflect.ExtensionType }
- // We need to return an interface{} that is a pointer to sf.value.
- value := reflect.New(t).Elem()
- value.Set(reflect.New(value.Type().Elem()))
- if sf.kind == reflect.Int32 {
- // We may have an int32 or an enum, but the underlying data is int32.
- // Since we can't set an int32 into a non int32 reflect.value directly
- // set it as a int32.
- value.Elem().SetInt(int64(sf.value.(int32)))
- } else {
- value.Elem().Set(reflect.ValueOf(sf.value))
+func (r extensionResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
+ if xtd := r.xt.TypeDescriptor(); xtd.FullName() == field {
+ return r.xt, nil
}
- return value.Interface(), nil
+ return protoregistry.GlobalTypes.FindExtensionByName(field)
}
-// decodeExtension decodes an extension encoded in b.
-func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
- t := reflect.TypeOf(extension.ExtensionType)
- unmarshal := typeUnmarshaler(t, extension.Tag)
-
- // t is a pointer to a struct, pointer to basic type or a slice.
- // Allocate space to store the pointer/slice.
- value := reflect.New(t).Elem()
-
- var err error
- for {
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- wire := int(x) & 7
-
- b, err = unmarshal(b, valToPointer(value.Addr()), wire)
- if err != nil {
- return nil, err
- }
-
- if len(b) == 0 {
- break
- }
+func (r extensionResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
+ if xtd := r.xt.TypeDescriptor(); xtd.ContainingMessage().FullName() == message && xtd.Number() == field {
+ return r.xt, nil
}
- return value.Interface(), nil
+ return protoregistry.GlobalTypes.FindExtensionByNumber(message, field)
}
-// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
-// The returned slice has the same length as es; missing extensions will appear as nil elements.
-func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
- epb, err := extendable(pb)
- if err != nil {
- return nil, err
+// GetExtensions returns a list of the extensions values present in m,
+// corresponding with the provided list of extension descriptors, xts.
+// If an extension is missing in m, the corresponding value is nil.
+func GetExtensions(m Message, xts []*ExtensionDesc) ([]interface{}, error) {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() {
+ return nil, errNotExtendable
}
- extensions = make([]interface{}, len(es))
- for i, e := range es {
- extensions[i], err = GetExtension(epb, e)
- if err == ErrMissingExtension {
- err = nil
- }
+
+ vs := make([]interface{}, len(xts))
+ for i, xt := range xts {
+ v, err := GetExtension(m, xt)
if err != nil {
- return
+ if err == ErrMissingExtension {
+ continue
+ }
+ return vs, err
}
+ vs[i] = v
}
- return
+ return vs, nil
}
-// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
-// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
-// just the Field field, which defines the extension's field number.
-func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
- epb, err := extendable(pb)
- if err != nil {
- return nil, err
+// SetExtension sets an extension field in m to the provided value.
+func SetExtension(m Message, xt *ExtensionDesc, v interface{}) error {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
+ return errNotExtendable
}
- registeredExtensions := RegisteredExtensions(pb)
- emap, mu := epb.extensionsRead()
- if emap == nil {
- return nil, nil
+ rv := reflect.ValueOf(v)
+ if reflect.TypeOf(v) != reflect.TypeOf(xt.ExtensionType) {
+ return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", v, xt.ExtensionType)
}
- mu.Lock()
- defer mu.Unlock()
- extensions := make([]*ExtensionDesc, 0, len(emap))
- for extid, e := range emap {
- desc := e.desc
- if desc == nil {
- desc = registeredExtensions[extid]
- if desc == nil {
- desc = &ExtensionDesc{Field: extid}
- }
+ if rv.Kind() == reflect.Ptr {
+ if rv.IsNil() {
+ return fmt.Errorf("proto: SetExtension called with nil value of type %T", v)
+ }
+ if isScalarKind(rv.Elem().Kind()) {
+ v = rv.Elem().Interface()
}
-
- extensions = append(extensions, desc)
}
- return extensions, nil
-}
-// SetExtension sets the specified extension of pb to the specified value.
-func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
- epb, err := extendable(pb)
- if err != nil {
- return err
- }
- if err := checkExtensionTypes(epb, extension); err != nil {
- return err
- }
- typ := reflect.TypeOf(extension.ExtensionType)
- if typ != reflect.TypeOf(value) {
- return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType)
+ xtd := xt.TypeDescriptor()
+ if !isValidExtension(mr.Descriptor(), xtd) {
+ return fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m)
}
- // nil extension values need to be caught early, because the
- // encoder can't distinguish an ErrNil due to a nil extension
- // from an ErrNil due to a missing field. Extensions are
- // always optional, so the encoder would just swallow the error
- // and drop all the extensions from the encoded message.
- if reflect.ValueOf(value).IsNil() {
- return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
- }
-
- extmap := epb.extensionsWrite()
- extmap[extension.Field] = Extension{desc: extension, value: extensionAsStorageType(value)}
+ mr.Set(xtd, xt.ValueOf(v))
+ clearUnknown(mr, fieldNum(xt.Field))
return nil
}
-// ClearAllExtensions clears all extensions from pb.
-func ClearAllExtensions(pb Message) {
- epb, err := extendable(pb)
- if err != nil {
+// SetRawExtension inserts b into the unknown fields of m.
+//
+// Deprecated: Use Message.ProtoReflect.SetUnknown instead.
+func SetRawExtension(m Message, fnum int32, b []byte) {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() {
return
}
- m := epb.extensionsWrite()
- for k := range m {
- delete(m, k)
+
+ // Verify that the raw field is valid.
+ for b0 := b; len(b0) > 0; {
+ num, _, n := protowire.ConsumeField(b0)
+ if int32(num) != fnum {
+ panic(fmt.Sprintf("mismatching field number: got %d, want %d", num, fnum))
+ }
+ b0 = b0[n:]
}
-}
-// A global registry of extensions.
-// The generated code will register the generated descriptors by calling RegisterExtension.
+ ClearExtension(m, &ExtensionDesc{Field: fnum})
+ mr.SetUnknown(append(mr.GetUnknown(), b...))
+}
-var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
+// ExtensionDescs returns a list of extension descriptors found in m,
+// containing descriptors for both populated extension fields in m and
+// also unknown fields of m that are in the extension range.
+// For the later case, an type incomplete descriptor is provided where only
+// the ExtensionDesc.Field field is populated.
+// The order of the extension descriptors is undefined.
+func ExtensionDescs(m Message) ([]*ExtensionDesc, error) {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
+ return nil, errNotExtendable
+ }
-// RegisterExtension is called from the generated code.
-func RegisterExtension(desc *ExtensionDesc) {
- st := reflect.TypeOf(desc.ExtendedType).Elem()
- m := extensionMaps[st]
- if m == nil {
- m = make(map[int32]*ExtensionDesc)
- extensionMaps[st] = m
+ // Collect a set of known extension descriptors.
+ extDescs := make(map[protoreflect.FieldNumber]*ExtensionDesc)
+ mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+ if fd.IsExtension() {
+ xt := fd.(protoreflect.ExtensionTypeDescriptor)
+ if xd, ok := xt.Type().(*ExtensionDesc); ok {
+ extDescs[fd.Number()] = xd
+ }
+ }
+ return true
+ })
+
+ // Collect a set of unknown extension descriptors.
+ extRanges := mr.Descriptor().ExtensionRanges()
+ for b := mr.GetUnknown(); len(b) > 0; {
+ num, _, n := protowire.ConsumeField(b)
+ if extRanges.Has(num) && extDescs[num] == nil {
+ extDescs[num] = nil
+ }
+ b = b[n:]
}
- if _, ok := m[desc.Field]; ok {
- panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
+
+ // Transpose the set of descriptors into a list.
+ var xts []*ExtensionDesc
+ for num, xt := range extDescs {
+ if xt == nil {
+ xt = &ExtensionDesc{Field: int32(num)}
+ }
+ xts = append(xts, xt)
}
- m[desc.Field] = desc
+ return xts, nil
}
-// RegisteredExtensions returns a map of the registered extensions of a
-// protocol buffer struct, indexed by the extension number.
-// The argument pb should be a nil pointer to the struct type.
-func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
- return extensionMaps[reflect.TypeOf(pb).Elem()]
+// isValidExtension reports whether xtd is a valid extension descriptor for md.
+func isValidExtension(md protoreflect.MessageDescriptor, xtd protoreflect.ExtensionTypeDescriptor) bool {
+ return xtd.ContainingMessage() == md && md.ExtensionRanges().Has(xtd.Number())
}
-// extensionAsLegacyType converts an value in the storage type as the API type.
-// See Extension.value.
-func extensionAsLegacyType(v interface{}) interface{} {
- switch rv := reflect.ValueOf(v); rv.Kind() {
+// isScalarKind reports whether k is a protobuf scalar kind (except bytes).
+// This function exists for historical reasons since the representation of
+// scalars differs between v1 and v2, where v1 uses *T and v2 uses T.
+func isScalarKind(k reflect.Kind) bool {
+ switch k {
case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
- // Represent primitive types as a pointer to the value.
- rv2 := reflect.New(rv.Type())
- rv2.Elem().Set(rv)
- v = rv2.Interface()
- case reflect.Ptr:
- // Represent slice types as the value itself.
- switch rv.Type().Elem().Kind() {
- case reflect.Slice:
- if rv.IsNil() {
- v = reflect.Zero(rv.Type().Elem()).Interface()
- } else {
- v = rv.Elem().Interface()
- }
- }
+ return true
+ default:
+ return false
}
- return v
}
-// extensionAsStorageType converts an value in the API type as the storage type.
-// See Extension.value.
-func extensionAsStorageType(v interface{}) interface{} {
- switch rv := reflect.ValueOf(v); rv.Kind() {
- case reflect.Ptr:
- // Represent slice types as the value itself.
- switch rv.Type().Elem().Kind() {
- case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
- if rv.IsNil() {
- v = reflect.Zero(rv.Type().Elem()).Interface()
- } else {
- v = rv.Elem().Interface()
- }
- }
- case reflect.Slice:
- // Represent slice types as a pointer to the value.
- if rv.Type().Elem().Kind() != reflect.Uint8 {
- rv2 := reflect.New(rv.Type())
- rv2.Elem().Set(rv)
- v = rv2.Interface()
+// clearUnknown removes unknown fields from m where remover.Has reports true.
+func clearUnknown(m protoreflect.Message, remover interface {
+ Has(protoreflect.FieldNumber) bool
+}) {
+ var bo protoreflect.RawFields
+ for bi := m.GetUnknown(); len(bi) > 0; {
+ num, _, n := protowire.ConsumeField(bi)
+ if !remover.Has(num) {
+ bo = append(bo, bi[:n]...)
}
+ bi = bi[n:]
}
- return v
+ if bi := m.GetUnknown(); len(bi) != len(bo) {
+ m.SetUnknown(bo)
+ }
+}
+
+type fieldNum protoreflect.FieldNumber
+
+func (n1 fieldNum) Has(n2 protoreflect.FieldNumber) bool {
+ return protoreflect.FieldNumber(n1) == n2
}
diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go
deleted file mode 100644
index fdd328bb7..000000000
--- a/vendor/github.com/golang/protobuf/proto/lib.go
+++ /dev/null
@@ -1,965 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-/*
-Package proto converts data structures to and from the wire format of
-protocol buffers. It works in concert with the Go source code generated
-for .proto files by the protocol compiler.
-
-A summary of the properties of the protocol buffer interface
-for a protocol buffer variable v:
-
- - Names are turned from camel_case to CamelCase for export.
- - There are no methods on v to set fields; just treat
- them as structure fields.
- - There are getters that return a field's value if set,
- and return the field's default value if unset.
- The getters work even if the receiver is a nil message.
- - The zero value for a struct is its correct initialization state.
- All desired fields must be set before marshaling.
- - A Reset() method will restore a protobuf struct to its zero state.
- - Non-repeated fields are pointers to the values; nil means unset.
- That is, optional or required field int32 f becomes F *int32.
- - Repeated fields are slices.
- - Helper functions are available to aid the setting of fields.
- msg.Foo = proto.String("hello") // set field
- - Constants are defined to hold the default values of all fields that
- have them. They have the form Default_StructName_FieldName.
- Because the getter methods handle defaulted values,
- direct use of these constants should be rare.
- - Enums are given type names and maps from names to values.
- Enum values are prefixed by the enclosing message's name, or by the
- enum's type name if it is a top-level enum. Enum types have a String
- method, and a Enum method to assist in message construction.
- - Nested messages, groups and enums have type names prefixed with the name of
- the surrounding message type.
- - Extensions are given descriptor names that start with E_,
- followed by an underscore-delimited list of the nested messages
- that contain it (if any) followed by the CamelCased name of the
- extension field itself. HasExtension, ClearExtension, GetExtension
- and SetExtension are functions for manipulating extensions.
- - Oneof field sets are given a single field in their message,
- with distinguished wrapper types for each possible field value.
- - Marshal and Unmarshal are functions to encode and decode the wire format.
-
-When the .proto file specifies `syntax="proto3"`, there are some differences:
-
- - Non-repeated fields of non-message type are values instead of pointers.
- - Enum types do not get an Enum method.
-
-The simplest way to describe this is to see an example.
-Given file test.proto, containing
-
- package example;
-
- enum FOO { X = 17; }
-
- message Test {
- required string label = 1;
- optional int32 type = 2 [default=77];
- repeated int64 reps = 3;
- optional group OptionalGroup = 4 {
- required string RequiredField = 5;
- }
- oneof union {
- int32 number = 6;
- string name = 7;
- }
- }
-
-The resulting file, test.pb.go, is:
-
- package example
-
- import proto "github.com/golang/protobuf/proto"
- import math "math"
-
- type FOO int32
- const (
- FOO_X FOO = 17
- )
- var FOO_name = map[int32]string{
- 17: "X",
- }
- var FOO_value = map[string]int32{
- "X": 17,
- }
-
- func (x FOO) Enum() *FOO {
- p := new(FOO)
- *p = x
- return p
- }
- func (x FOO) String() string {
- return proto.EnumName(FOO_name, int32(x))
- }
- func (x *FOO) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(FOO_value, data)
- if err != nil {
- return err
- }
- *x = FOO(value)
- return nil
- }
-
- type Test struct {
- Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
- Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
- Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
- Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
- // Types that are valid to be assigned to Union:
- // *Test_Number
- // *Test_Name
- Union isTest_Union `protobuf_oneof:"union"`
- XXX_unrecognized []byte `json:"-"`
- }
- func (m *Test) Reset() { *m = Test{} }
- func (m *Test) String() string { return proto.CompactTextString(m) }
- func (*Test) ProtoMessage() {}
-
- type isTest_Union interface {
- isTest_Union()
- }
-
- type Test_Number struct {
- Number int32 `protobuf:"varint,6,opt,name=number"`
- }
- type Test_Name struct {
- Name string `protobuf:"bytes,7,opt,name=name"`
- }
-
- func (*Test_Number) isTest_Union() {}
- func (*Test_Name) isTest_Union() {}
-
- func (m *Test) GetUnion() isTest_Union {
- if m != nil {
- return m.Union
- }
- return nil
- }
- const Default_Test_Type int32 = 77
-
- func (m *Test) GetLabel() string {
- if m != nil && m.Label != nil {
- return *m.Label
- }
- return ""
- }
-
- func (m *Test) GetType() int32 {
- if m != nil && m.Type != nil {
- return *m.Type
- }
- return Default_Test_Type
- }
-
- func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
- if m != nil {
- return m.Optionalgroup
- }
- return nil
- }
-
- type Test_OptionalGroup struct {
- RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
- }
- func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} }
- func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
-
- func (m *Test_OptionalGroup) GetRequiredField() string {
- if m != nil && m.RequiredField != nil {
- return *m.RequiredField
- }
- return ""
- }
-
- func (m *Test) GetNumber() int32 {
- if x, ok := m.GetUnion().(*Test_Number); ok {
- return x.Number
- }
- return 0
- }
-
- func (m *Test) GetName() string {
- if x, ok := m.GetUnion().(*Test_Name); ok {
- return x.Name
- }
- return ""
- }
-
- func init() {
- proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
- }
-
-To create and play with a Test object:
-
- package main
-
- import (
- "log"
-
- "github.com/golang/protobuf/proto"
- pb "./example.pb"
- )
-
- func main() {
- test := &pb.Test{
- Label: proto.String("hello"),
- Type: proto.Int32(17),
- Reps: []int64{1, 2, 3},
- Optionalgroup: &pb.Test_OptionalGroup{
- RequiredField: proto.String("good bye"),
- },
- Union: &pb.Test_Name{"fred"},
- }
- data, err := proto.Marshal(test)
- if err != nil {
- log.Fatal("marshaling error: ", err)
- }
- newTest := &pb.Test{}
- err = proto.Unmarshal(data, newTest)
- if err != nil {
- log.Fatal("unmarshaling error: ", err)
- }
- // Now test and newTest contain the same data.
- if test.GetLabel() != newTest.GetLabel() {
- log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
- }
- // Use a type switch to determine which oneof was set.
- switch u := test.Union.(type) {
- case *pb.Test_Number: // u.Number contains the number.
- case *pb.Test_Name: // u.Name contains the string.
- }
- // etc.
- }
-*/
-package proto
-
-import (
- "encoding/json"
- "fmt"
- "log"
- "reflect"
- "sort"
- "strconv"
- "sync"
-)
-
-// RequiredNotSetError is an error type returned by either Marshal or Unmarshal.
-// Marshal reports this when a required field is not initialized.
-// Unmarshal reports this when a required field is missing from the wire data.
-type RequiredNotSetError struct{ field string }
-
-func (e *RequiredNotSetError) Error() string {
- if e.field == "" {
- return fmt.Sprintf("proto: required field not set")
- }
- return fmt.Sprintf("proto: required field %q not set", e.field)
-}
-func (e *RequiredNotSetError) RequiredNotSet() bool {
- return true
-}
-
-type invalidUTF8Error struct{ field string }
-
-func (e *invalidUTF8Error) Error() string {
- if e.field == "" {
- return "proto: invalid UTF-8 detected"
- }
- return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field)
-}
-func (e *invalidUTF8Error) InvalidUTF8() bool {
- return true
-}
-
-// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8.
-// This error should not be exposed to the external API as such errors should
-// be recreated with the field information.
-var errInvalidUTF8 = &invalidUTF8Error{}
-
-// isNonFatal reports whether the error is either a RequiredNotSet error
-// or a InvalidUTF8 error.
-func isNonFatal(err error) bool {
- if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() {
- return true
- }
- if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() {
- return true
- }
- return false
-}
-
-type nonFatal struct{ E error }
-
-// Merge merges err into nf and reports whether it was successful.
-// Otherwise it returns false for any fatal non-nil errors.
-func (nf *nonFatal) Merge(err error) (ok bool) {
- if err == nil {
- return true // not an error
- }
- if !isNonFatal(err) {
- return false // fatal error
- }
- if nf.E == nil {
- nf.E = err // store first instance of non-fatal error
- }
- return true
-}
-
-// Message is implemented by generated protocol buffer messages.
-type Message interface {
- Reset()
- String() string
- ProtoMessage()
-}
-
-// A Buffer is a buffer manager for marshaling and unmarshaling
-// protocol buffers. It may be reused between invocations to
-// reduce memory usage. It is not necessary to use a Buffer;
-// the global functions Marshal and Unmarshal create a
-// temporary Buffer and are fine for most applications.
-type Buffer struct {
- buf []byte // encode/decode byte stream
- index int // read point
-
- deterministic bool
-}
-
-// NewBuffer allocates a new Buffer and initializes its internal data to
-// the contents of the argument slice.
-func NewBuffer(e []byte) *Buffer {
- return &Buffer{buf: e}
-}
-
-// Reset resets the Buffer, ready for marshaling a new protocol buffer.
-func (p *Buffer) Reset() {
- p.buf = p.buf[0:0] // for reading/writing
- p.index = 0 // for reading
-}
-
-// SetBuf replaces the internal buffer with the slice,
-// ready for unmarshaling the contents of the slice.
-func (p *Buffer) SetBuf(s []byte) {
- p.buf = s
- p.index = 0
-}
-
-// Bytes returns the contents of the Buffer.
-func (p *Buffer) Bytes() []byte { return p.buf }
-
-// SetDeterministic sets whether to use deterministic serialization.
-//
-// Deterministic serialization guarantees that for a given binary, equal
-// messages will always be serialized to the same bytes. This implies:
-//
-// - Repeated serialization of a message will return the same bytes.
-// - Different processes of the same binary (which may be executing on
-// different machines) will serialize equal messages to the same bytes.
-//
-// Note that the deterministic serialization is NOT canonical across
-// languages. It is not guaranteed to remain stable over time. It is unstable
-// across different builds with schema changes due to unknown fields.
-// Users who need canonical serialization (e.g., persistent storage in a
-// canonical form, fingerprinting, etc.) should define their own
-// canonicalization specification and implement their own serializer rather
-// than relying on this API.
-//
-// If deterministic serialization is requested, map entries will be sorted
-// by keys in lexographical order. This is an implementation detail and
-// subject to change.
-func (p *Buffer) SetDeterministic(deterministic bool) {
- p.deterministic = deterministic
-}
-
-/*
- * Helper routines for simplifying the creation of optional fields of basic type.
- */
-
-// Bool is a helper routine that allocates a new bool value
-// to store v and returns a pointer to it.
-func Bool(v bool) *bool {
- return &v
-}
-
-// Int32 is a helper routine that allocates a new int32 value
-// to store v and returns a pointer to it.
-func Int32(v int32) *int32 {
- return &v
-}
-
-// Int is a helper routine that allocates a new int32 value
-// to store v and returns a pointer to it, but unlike Int32
-// its argument value is an int.
-func Int(v int) *int32 {
- p := new(int32)
- *p = int32(v)
- return p
-}
-
-// Int64 is a helper routine that allocates a new int64 value
-// to store v and returns a pointer to it.
-func Int64(v int64) *int64 {
- return &v
-}
-
-// Float32 is a helper routine that allocates a new float32 value
-// to store v and returns a pointer to it.
-func Float32(v float32) *float32 {
- return &v
-}
-
-// Float64 is a helper routine that allocates a new float64 value
-// to store v and returns a pointer to it.
-func Float64(v float64) *float64 {
- return &v
-}
-
-// Uint32 is a helper routine that allocates a new uint32 value
-// to store v and returns a pointer to it.
-func Uint32(v uint32) *uint32 {
- return &v
-}
-
-// Uint64 is a helper routine that allocates a new uint64 value
-// to store v and returns a pointer to it.
-func Uint64(v uint64) *uint64 {
- return &v
-}
-
-// String is a helper routine that allocates a new string value
-// to store v and returns a pointer to it.
-func String(v string) *string {
- return &v
-}
-
-// EnumName is a helper function to simplify printing protocol buffer enums
-// by name. Given an enum map and a value, it returns a useful string.
-func EnumName(m map[int32]string, v int32) string {
- s, ok := m[v]
- if ok {
- return s
- }
- return strconv.Itoa(int(v))
-}
-
-// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
-// from their JSON-encoded representation. Given a map from the enum's symbolic
-// names to its int values, and a byte buffer containing the JSON-encoded
-// value, it returns an int32 that can be cast to the enum type by the caller.
-//
-// The function can deal with both JSON representations, numeric and symbolic.
-func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
- if data[0] == '"' {
- // New style: enums are strings.
- var repr string
- if err := json.Unmarshal(data, &repr); err != nil {
- return -1, err
- }
- val, ok := m[repr]
- if !ok {
- return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
- }
- return val, nil
- }
- // Old style: enums are ints.
- var val int32
- if err := json.Unmarshal(data, &val); err != nil {
- return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
- }
- return val, nil
-}
-
-// DebugPrint dumps the encoded data in b in a debugging format with a header
-// including the string s. Used in testing but made available for general debugging.
-func (p *Buffer) DebugPrint(s string, b []byte) {
- var u uint64
-
- obuf := p.buf
- index := p.index
- p.buf = b
- p.index = 0
- depth := 0
-
- fmt.Printf("\n--- %s ---\n", s)
-
-out:
- for {
- for i := 0; i < depth; i++ {
- fmt.Print(" ")
- }
-
- index := p.index
- if index == len(p.buf) {
- break
- }
-
- op, err := p.DecodeVarint()
- if err != nil {
- fmt.Printf("%3d: fetching op err %v\n", index, err)
- break out
- }
- tag := op >> 3
- wire := op & 7
-
- switch wire {
- default:
- fmt.Printf("%3d: t=%3d unknown wire=%d\n",
- index, tag, wire)
- break out
-
- case WireBytes:
- var r []byte
-
- r, err = p.DecodeRawBytes(false)
- if err != nil {
- break out
- }
- fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
- if len(r) <= 6 {
- for i := 0; i < len(r); i++ {
- fmt.Printf(" %.2x", r[i])
- }
- } else {
- for i := 0; i < 3; i++ {
- fmt.Printf(" %.2x", r[i])
- }
- fmt.Printf(" ..")
- for i := len(r) - 3; i < len(r); i++ {
- fmt.Printf(" %.2x", r[i])
- }
- }
- fmt.Printf("\n")
-
- case WireFixed32:
- u, err = p.DecodeFixed32()
- if err != nil {
- fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
- break out
- }
- fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
-
- case WireFixed64:
- u, err = p.DecodeFixed64()
- if err != nil {
- fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
- break out
- }
- fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
-
- case WireVarint:
- u, err = p.DecodeVarint()
- if err != nil {
- fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
- break out
- }
- fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
-
- case WireStartGroup:
- fmt.Printf("%3d: t=%3d start\n", index, tag)
- depth++
-
- case WireEndGroup:
- depth--
- fmt.Printf("%3d: t=%3d end\n", index, tag)
- }
- }
-
- if depth != 0 {
- fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
- }
- fmt.Printf("\n")
-
- p.buf = obuf
- p.index = index
-}
-
-// SetDefaults sets unset protocol buffer fields to their default values.
-// It only modifies fields that are both unset and have defined defaults.
-// It recursively sets default values in any non-nil sub-messages.
-func SetDefaults(pb Message) {
- setDefaults(reflect.ValueOf(pb), true, false)
-}
-
-// v is a pointer to a struct.
-func setDefaults(v reflect.Value, recur, zeros bool) {
- v = v.Elem()
-
- defaultMu.RLock()
- dm, ok := defaults[v.Type()]
- defaultMu.RUnlock()
- if !ok {
- dm = buildDefaultMessage(v.Type())
- defaultMu.Lock()
- defaults[v.Type()] = dm
- defaultMu.Unlock()
- }
-
- for _, sf := range dm.scalars {
- f := v.Field(sf.index)
- if !f.IsNil() {
- // field already set
- continue
- }
- dv := sf.value
- if dv == nil && !zeros {
- // no explicit default, and don't want to set zeros
- continue
- }
- fptr := f.Addr().Interface() // **T
- // TODO: Consider batching the allocations we do here.
- switch sf.kind {
- case reflect.Bool:
- b := new(bool)
- if dv != nil {
- *b = dv.(bool)
- }
- *(fptr.(**bool)) = b
- case reflect.Float32:
- f := new(float32)
- if dv != nil {
- *f = dv.(float32)
- }
- *(fptr.(**float32)) = f
- case reflect.Float64:
- f := new(float64)
- if dv != nil {
- *f = dv.(float64)
- }
- *(fptr.(**float64)) = f
- case reflect.Int32:
- // might be an enum
- if ft := f.Type(); ft != int32PtrType {
- // enum
- f.Set(reflect.New(ft.Elem()))
- if dv != nil {
- f.Elem().SetInt(int64(dv.(int32)))
- }
- } else {
- // int32 field
- i := new(int32)
- if dv != nil {
- *i = dv.(int32)
- }
- *(fptr.(**int32)) = i
- }
- case reflect.Int64:
- i := new(int64)
- if dv != nil {
- *i = dv.(int64)
- }
- *(fptr.(**int64)) = i
- case reflect.String:
- s := new(string)
- if dv != nil {
- *s = dv.(string)
- }
- *(fptr.(**string)) = s
- case reflect.Uint8:
- // exceptional case: []byte
- var b []byte
- if dv != nil {
- db := dv.([]byte)
- b = make([]byte, len(db))
- copy(b, db)
- } else {
- b = []byte{}
- }
- *(fptr.(*[]byte)) = b
- case reflect.Uint32:
- u := new(uint32)
- if dv != nil {
- *u = dv.(uint32)
- }
- *(fptr.(**uint32)) = u
- case reflect.Uint64:
- u := new(uint64)
- if dv != nil {
- *u = dv.(uint64)
- }
- *(fptr.(**uint64)) = u
- default:
- log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
- }
- }
-
- for _, ni := range dm.nested {
- f := v.Field(ni)
- // f is *T or []*T or map[T]*T
- switch f.Kind() {
- case reflect.Ptr:
- if f.IsNil() {
- continue
- }
- setDefaults(f, recur, zeros)
-
- case reflect.Slice:
- for i := 0; i < f.Len(); i++ {
- e := f.Index(i)
- if e.IsNil() {
- continue
- }
- setDefaults(e, recur, zeros)
- }
-
- case reflect.Map:
- for _, k := range f.MapKeys() {
- e := f.MapIndex(k)
- if e.IsNil() {
- continue
- }
- setDefaults(e, recur, zeros)
- }
- }
- }
-}
-
-var (
- // defaults maps a protocol buffer struct type to a slice of the fields,
- // with its scalar fields set to their proto-declared non-zero default values.
- defaultMu sync.RWMutex
- defaults = make(map[reflect.Type]defaultMessage)
-
- int32PtrType = reflect.TypeOf((*int32)(nil))
-)
-
-// defaultMessage represents information about the default values of a message.
-type defaultMessage struct {
- scalars []scalarField
- nested []int // struct field index of nested messages
-}
-
-type scalarField struct {
- index int // struct field index
- kind reflect.Kind // element type (the T in *T or []T)
- value interface{} // the proto-declared default value, or nil
-}
-
-// t is a struct type.
-func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
- sprop := GetProperties(t)
- for _, prop := range sprop.Prop {
- fi, ok := sprop.decoderTags.get(prop.Tag)
- if !ok {
- // XXX_unrecognized
- continue
- }
- ft := t.Field(fi).Type
-
- sf, nested, err := fieldDefault(ft, prop)
- switch {
- case err != nil:
- log.Print(err)
- case nested:
- dm.nested = append(dm.nested, fi)
- case sf != nil:
- sf.index = fi
- dm.scalars = append(dm.scalars, *sf)
- }
- }
-
- return dm
-}
-
-// fieldDefault returns the scalarField for field type ft.
-// sf will be nil if the field can not have a default.
-// nestedMessage will be true if this is a nested message.
-// Note that sf.index is not set on return.
-func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
- var canHaveDefault bool
- switch ft.Kind() {
- case reflect.Ptr:
- if ft.Elem().Kind() == reflect.Struct {
- nestedMessage = true
- } else {
- canHaveDefault = true // proto2 scalar field
- }
-
- case reflect.Slice:
- switch ft.Elem().Kind() {
- case reflect.Ptr:
- nestedMessage = true // repeated message
- case reflect.Uint8:
- canHaveDefault = true // bytes field
- }
-
- case reflect.Map:
- if ft.Elem().Kind() == reflect.Ptr {
- nestedMessage = true // map with message values
- }
- }
-
- if !canHaveDefault {
- if nestedMessage {
- return nil, true, nil
- }
- return nil, false, nil
- }
-
- // We now know that ft is a pointer or slice.
- sf = &scalarField{kind: ft.Elem().Kind()}
-
- // scalar fields without defaults
- if !prop.HasDefault {
- return sf, false, nil
- }
-
- // a scalar field: either *T or []byte
- switch ft.Elem().Kind() {
- case reflect.Bool:
- x, err := strconv.ParseBool(prop.Default)
- if err != nil {
- return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
- }
- sf.value = x
- case reflect.Float32:
- x, err := strconv.ParseFloat(prop.Default, 32)
- if err != nil {
- return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
- }
- sf.value = float32(x)
- case reflect.Float64:
- x, err := strconv.ParseFloat(prop.Default, 64)
- if err != nil {
- return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
- }
- sf.value = x
- case reflect.Int32:
- x, err := strconv.ParseInt(prop.Default, 10, 32)
- if err != nil {
- return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
- }
- sf.value = int32(x)
- case reflect.Int64:
- x, err := strconv.ParseInt(prop.Default, 10, 64)
- if err != nil {
- return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
- }
- sf.value = x
- case reflect.String:
- sf.value = prop.Default
- case reflect.Uint8:
- // []byte (not *uint8)
- sf.value = []byte(prop.Default)
- case reflect.Uint32:
- x, err := strconv.ParseUint(prop.Default, 10, 32)
- if err != nil {
- return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
- }
- sf.value = uint32(x)
- case reflect.Uint64:
- x, err := strconv.ParseUint(prop.Default, 10, 64)
- if err != nil {
- return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
- }
- sf.value = x
- default:
- return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
- }
-
- return sf, false, nil
-}
-
-// mapKeys returns a sort.Interface to be used for sorting the map keys.
-// Map fields may have key types of non-float scalars, strings and enums.
-func mapKeys(vs []reflect.Value) sort.Interface {
- s := mapKeySorter{vs: vs}
-
- // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps.
- if len(vs) == 0 {
- return s
- }
- switch vs[0].Kind() {
- case reflect.Int32, reflect.Int64:
- s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
- case reflect.Uint32, reflect.Uint64:
- s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
- case reflect.Bool:
- s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true
- case reflect.String:
- s.less = func(a, b reflect.Value) bool { return a.String() < b.String() }
- default:
- panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind()))
- }
-
- return s
-}
-
-type mapKeySorter struct {
- vs []reflect.Value
- less func(a, b reflect.Value) bool
-}
-
-func (s mapKeySorter) Len() int { return len(s.vs) }
-func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
-func (s mapKeySorter) Less(i, j int) bool {
- return s.less(s.vs[i], s.vs[j])
-}
-
-// isProto3Zero reports whether v is a zero proto3 value.
-func isProto3Zero(v reflect.Value) bool {
- switch v.Kind() {
- case reflect.Bool:
- return !v.Bool()
- case reflect.Int32, reflect.Int64:
- return v.Int() == 0
- case reflect.Uint32, reflect.Uint64:
- return v.Uint() == 0
- case reflect.Float32, reflect.Float64:
- return v.Float() == 0
- case reflect.String:
- return v.String() == ""
- }
- return false
-}
-
-const (
- // ProtoPackageIsVersion3 is referenced from generated protocol buffer files
- // to assert that that code is compatible with this version of the proto package.
- ProtoPackageIsVersion3 = true
-
- // ProtoPackageIsVersion2 is referenced from generated protocol buffer files
- // to assert that that code is compatible with this version of the proto package.
- ProtoPackageIsVersion2 = true
-
- // ProtoPackageIsVersion1 is referenced from generated protocol buffer files
- // to assert that that code is compatible with this version of the proto package.
- ProtoPackageIsVersion1 = true
-)
-
-// InternalMessageInfo is a type used internally by generated .pb.go files.
-// This type is not intended to be used by non-generated code.
-// This type is not subject to any compatibility guarantee.
-type InternalMessageInfo struct {
- marshal *marshalInfo
- unmarshal *unmarshalInfo
- merge *mergeInfo
- discard *discardInfo
-}
diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go
deleted file mode 100644
index f48a75676..000000000
--- a/vendor/github.com/golang/protobuf/proto/message_set.go
+++ /dev/null
@@ -1,181 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-/*
- * Support for message sets.
- */
-
-import (
- "errors"
-)
-
-// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
-// A message type ID is required for storing a protocol buffer in a message set.
-var errNoMessageTypeID = errors.New("proto does not have a message type ID")
-
-// The first two types (_MessageSet_Item and messageSet)
-// model what the protocol compiler produces for the following protocol message:
-// message MessageSet {
-// repeated group Item = 1 {
-// required int32 type_id = 2;
-// required string message = 3;
-// };
-// }
-// That is the MessageSet wire format. We can't use a proto to generate these
-// because that would introduce a circular dependency between it and this package.
-
-type _MessageSet_Item struct {
- TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
- Message []byte `protobuf:"bytes,3,req,name=message"`
-}
-
-type messageSet struct {
- Item []*_MessageSet_Item `protobuf:"group,1,rep"`
- XXX_unrecognized []byte
- // TODO: caching?
-}
-
-// Make sure messageSet is a Message.
-var _ Message = (*messageSet)(nil)
-
-// messageTypeIder is an interface satisfied by a protocol buffer type
-// that may be stored in a MessageSet.
-type messageTypeIder interface {
- MessageTypeId() int32
-}
-
-func (ms *messageSet) find(pb Message) *_MessageSet_Item {
- mti, ok := pb.(messageTypeIder)
- if !ok {
- return nil
- }
- id := mti.MessageTypeId()
- for _, item := range ms.Item {
- if *item.TypeId == id {
- return item
- }
- }
- return nil
-}
-
-func (ms *messageSet) Has(pb Message) bool {
- return ms.find(pb) != nil
-}
-
-func (ms *messageSet) Unmarshal(pb Message) error {
- if item := ms.find(pb); item != nil {
- return Unmarshal(item.Message, pb)
- }
- if _, ok := pb.(messageTypeIder); !ok {
- return errNoMessageTypeID
- }
- return nil // TODO: return error instead?
-}
-
-func (ms *messageSet) Marshal(pb Message) error {
- msg, err := Marshal(pb)
- if err != nil {
- return err
- }
- if item := ms.find(pb); item != nil {
- // reuse existing item
- item.Message = msg
- return nil
- }
-
- mti, ok := pb.(messageTypeIder)
- if !ok {
- return errNoMessageTypeID
- }
-
- mtid := mti.MessageTypeId()
- ms.Item = append(ms.Item, &_MessageSet_Item{
- TypeId: &mtid,
- Message: msg,
- })
- return nil
-}
-
-func (ms *messageSet) Reset() { *ms = messageSet{} }
-func (ms *messageSet) String() string { return CompactTextString(ms) }
-func (*messageSet) ProtoMessage() {}
-
-// Support for the message_set_wire_format message option.
-
-func skipVarint(buf []byte) []byte {
- i := 0
- for ; buf[i]&0x80 != 0; i++ {
- }
- return buf[i+1:]
-}
-
-// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
-// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
-func unmarshalMessageSet(buf []byte, exts interface{}) error {
- var m map[int32]Extension
- switch exts := exts.(type) {
- case *XXX_InternalExtensions:
- m = exts.extensionsWrite()
- case map[int32]Extension:
- m = exts
- default:
- return errors.New("proto: not an extension map")
- }
-
- ms := new(messageSet)
- if err := Unmarshal(buf, ms); err != nil {
- return err
- }
- for _, item := range ms.Item {
- id := *item.TypeId
- msg := item.Message
-
- // Restore wire type and field number varint, plus length varint.
- // Be careful to preserve duplicate items.
- b := EncodeVarint(uint64(id)<<3 | WireBytes)
- if ext, ok := m[id]; ok {
- // Existing data; rip off the tag and length varint
- // so we join the new data correctly.
- // We can assume that ext.enc is set because we are unmarshaling.
- o := ext.enc[len(b):] // skip wire type and field number
- _, n := DecodeVarint(o) // calculate length of length varint
- o = o[n:] // skip length varint
- msg = append(o, msg...) // join old data and new data
- }
- b = append(b, EncodeVarint(uint64(len(msg)))...)
- b = append(b, msg...)
-
- m[id] = Extension{enc: b}
- }
- return nil
-}
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
deleted file mode 100644
index 94fa9194a..000000000
--- a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
+++ /dev/null
@@ -1,360 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2012 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// +build purego appengine js
-
-// This file contains an implementation of proto field accesses using package reflect.
-// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
-// be used on App Engine.
-
-package proto
-
-import (
- "reflect"
- "sync"
-)
-
-const unsafeAllowed = false
-
-// A field identifies a field in a struct, accessible from a pointer.
-// In this implementation, a field is identified by the sequence of field indices
-// passed to reflect's FieldByIndex.
-type field []int
-
-// toField returns a field equivalent to the given reflect field.
-func toField(f *reflect.StructField) field {
- return f.Index
-}
-
-// invalidField is an invalid field identifier.
-var invalidField = field(nil)
-
-// zeroField is a noop when calling pointer.offset.
-var zeroField = field([]int{})
-
-// IsValid reports whether the field identifier is valid.
-func (f field) IsValid() bool { return f != nil }
-
-// The pointer type is for the table-driven decoder.
-// The implementation here uses a reflect.Value of pointer type to
-// create a generic pointer. In pointer_unsafe.go we use unsafe
-// instead of reflect to implement the same (but faster) interface.
-type pointer struct {
- v reflect.Value
-}
-
-// toPointer converts an interface of pointer type to a pointer
-// that points to the same target.
-func toPointer(i *Message) pointer {
- return pointer{v: reflect.ValueOf(*i)}
-}
-
-// toAddrPointer converts an interface to a pointer that points to
-// the interface data.
-func toAddrPointer(i *interface{}, isptr, deref bool) pointer {
- v := reflect.ValueOf(*i)
- u := reflect.New(v.Type())
- u.Elem().Set(v)
- if deref {
- u = u.Elem()
- }
- return pointer{v: u}
-}
-
-// valToPointer converts v to a pointer. v must be of pointer type.
-func valToPointer(v reflect.Value) pointer {
- return pointer{v: v}
-}
-
-// offset converts from a pointer to a structure to a pointer to
-// one of its fields.
-func (p pointer) offset(f field) pointer {
- return pointer{v: p.v.Elem().FieldByIndex(f).Addr()}
-}
-
-func (p pointer) isNil() bool {
- return p.v.IsNil()
-}
-
-// grow updates the slice s in place to make it one element longer.
-// s must be addressable.
-// Returns the (addressable) new element.
-func grow(s reflect.Value) reflect.Value {
- n, m := s.Len(), s.Cap()
- if n < m {
- s.SetLen(n + 1)
- } else {
- s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem())))
- }
- return s.Index(n)
-}
-
-func (p pointer) toInt64() *int64 {
- return p.v.Interface().(*int64)
-}
-func (p pointer) toInt64Ptr() **int64 {
- return p.v.Interface().(**int64)
-}
-func (p pointer) toInt64Slice() *[]int64 {
- return p.v.Interface().(*[]int64)
-}
-
-var int32ptr = reflect.TypeOf((*int32)(nil))
-
-func (p pointer) toInt32() *int32 {
- return p.v.Convert(int32ptr).Interface().(*int32)
-}
-
-// The toInt32Ptr/Slice methods don't work because of enums.
-// Instead, we must use set/get methods for the int32ptr/slice case.
-/*
- func (p pointer) toInt32Ptr() **int32 {
- return p.v.Interface().(**int32)
-}
- func (p pointer) toInt32Slice() *[]int32 {
- return p.v.Interface().(*[]int32)
-}
-*/
-func (p pointer) getInt32Ptr() *int32 {
- if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
- // raw int32 type
- return p.v.Elem().Interface().(*int32)
- }
- // an enum
- return p.v.Elem().Convert(int32PtrType).Interface().(*int32)
-}
-func (p pointer) setInt32Ptr(v int32) {
- // Allocate value in a *int32. Possibly convert that to a *enum.
- // Then assign it to a **int32 or **enum.
- // Note: we can convert *int32 to *enum, but we can't convert
- // **int32 to **enum!
- p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem()))
-}
-
-// getInt32Slice copies []int32 from p as a new slice.
-// This behavior differs from the implementation in pointer_unsafe.go.
-func (p pointer) getInt32Slice() []int32 {
- if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
- // raw int32 type
- return p.v.Elem().Interface().([]int32)
- }
- // an enum
- // Allocate a []int32, then assign []enum's values into it.
- // Note: we can't convert []enum to []int32.
- slice := p.v.Elem()
- s := make([]int32, slice.Len())
- for i := 0; i < slice.Len(); i++ {
- s[i] = int32(slice.Index(i).Int())
- }
- return s
-}
-
-// setInt32Slice copies []int32 into p as a new slice.
-// This behavior differs from the implementation in pointer_unsafe.go.
-func (p pointer) setInt32Slice(v []int32) {
- if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
- // raw int32 type
- p.v.Elem().Set(reflect.ValueOf(v))
- return
- }
- // an enum
- // Allocate a []enum, then assign []int32's values into it.
- // Note: we can't convert []enum to []int32.
- slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v))
- for i, x := range v {
- slice.Index(i).SetInt(int64(x))
- }
- p.v.Elem().Set(slice)
-}
-func (p pointer) appendInt32Slice(v int32) {
- grow(p.v.Elem()).SetInt(int64(v))
-}
-
-func (p pointer) toUint64() *uint64 {
- return p.v.Interface().(*uint64)
-}
-func (p pointer) toUint64Ptr() **uint64 {
- return p.v.Interface().(**uint64)
-}
-func (p pointer) toUint64Slice() *[]uint64 {
- return p.v.Interface().(*[]uint64)
-}
-func (p pointer) toUint32() *uint32 {
- return p.v.Interface().(*uint32)
-}
-func (p pointer) toUint32Ptr() **uint32 {
- return p.v.Interface().(**uint32)
-}
-func (p pointer) toUint32Slice() *[]uint32 {
- return p.v.Interface().(*[]uint32)
-}
-func (p pointer) toBool() *bool {
- return p.v.Interface().(*bool)
-}
-func (p pointer) toBoolPtr() **bool {
- return p.v.Interface().(**bool)
-}
-func (p pointer) toBoolSlice() *[]bool {
- return p.v.Interface().(*[]bool)
-}
-func (p pointer) toFloat64() *float64 {
- return p.v.Interface().(*float64)
-}
-func (p pointer) toFloat64Ptr() **float64 {
- return p.v.Interface().(**float64)
-}
-func (p pointer) toFloat64Slice() *[]float64 {
- return p.v.Interface().(*[]float64)
-}
-func (p pointer) toFloat32() *float32 {
- return p.v.Interface().(*float32)
-}
-func (p pointer) toFloat32Ptr() **float32 {
- return p.v.Interface().(**float32)
-}
-func (p pointer) toFloat32Slice() *[]float32 {
- return p.v.Interface().(*[]float32)
-}
-func (p pointer) toString() *string {
- return p.v.Interface().(*string)
-}
-func (p pointer) toStringPtr() **string {
- return p.v.Interface().(**string)
-}
-func (p pointer) toStringSlice() *[]string {
- return p.v.Interface().(*[]string)
-}
-func (p pointer) toBytes() *[]byte {
- return p.v.Interface().(*[]byte)
-}
-func (p pointer) toBytesSlice() *[][]byte {
- return p.v.Interface().(*[][]byte)
-}
-func (p pointer) toExtensions() *XXX_InternalExtensions {
- return p.v.Interface().(*XXX_InternalExtensions)
-}
-func (p pointer) toOldExtensions() *map[int32]Extension {
- return p.v.Interface().(*map[int32]Extension)
-}
-func (p pointer) getPointer() pointer {
- return pointer{v: p.v.Elem()}
-}
-func (p pointer) setPointer(q pointer) {
- p.v.Elem().Set(q.v)
-}
-func (p pointer) appendPointer(q pointer) {
- grow(p.v.Elem()).Set(q.v)
-}
-
-// getPointerSlice copies []*T from p as a new []pointer.
-// This behavior differs from the implementation in pointer_unsafe.go.
-func (p pointer) getPointerSlice() []pointer {
- if p.v.IsNil() {
- return nil
- }
- n := p.v.Elem().Len()
- s := make([]pointer, n)
- for i := 0; i < n; i++ {
- s[i] = pointer{v: p.v.Elem().Index(i)}
- }
- return s
-}
-
-// setPointerSlice copies []pointer into p as a new []*T.
-// This behavior differs from the implementation in pointer_unsafe.go.
-func (p pointer) setPointerSlice(v []pointer) {
- if v == nil {
- p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem())
- return
- }
- s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v))
- for _, p := range v {
- s = reflect.Append(s, p.v)
- }
- p.v.Elem().Set(s)
-}
-
-// getInterfacePointer returns a pointer that points to the
-// interface data of the interface pointed by p.
-func (p pointer) getInterfacePointer() pointer {
- if p.v.Elem().IsNil() {
- return pointer{v: p.v.Elem()}
- }
- return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct
-}
-
-func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
- // TODO: check that p.v.Type().Elem() == t?
- return p.v
-}
-
-func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
- atomicLock.Lock()
- defer atomicLock.Unlock()
- return *p
-}
-func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
- atomicLock.Lock()
- defer atomicLock.Unlock()
- *p = v
-}
-func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
- atomicLock.Lock()
- defer atomicLock.Unlock()
- return *p
-}
-func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
- atomicLock.Lock()
- defer atomicLock.Unlock()
- *p = v
-}
-func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
- atomicLock.Lock()
- defer atomicLock.Unlock()
- return *p
-}
-func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
- atomicLock.Lock()
- defer atomicLock.Unlock()
- *p = v
-}
-func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
- atomicLock.Lock()
- defer atomicLock.Unlock()
- return *p
-}
-func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
- atomicLock.Lock()
- defer atomicLock.Unlock()
- *p = v
-}
-
-var atomicLock sync.Mutex
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
deleted file mode 100644
index dbfffe071..000000000
--- a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
+++ /dev/null
@@ -1,313 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2012 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// +build !purego,!appengine,!js
-
-// This file contains the implementation of the proto field accesses using package unsafe.
-
-package proto
-
-import (
- "reflect"
- "sync/atomic"
- "unsafe"
-)
-
-const unsafeAllowed = true
-
-// A field identifies a field in a struct, accessible from a pointer.
-// In this implementation, a field is identified by its byte offset from the start of the struct.
-type field uintptr
-
-// toField returns a field equivalent to the given reflect field.
-func toField(f *reflect.StructField) field {
- return field(f.Offset)
-}
-
-// invalidField is an invalid field identifier.
-const invalidField = ^field(0)
-
-// zeroField is a noop when calling pointer.offset.
-const zeroField = field(0)
-
-// IsValid reports whether the field identifier is valid.
-func (f field) IsValid() bool {
- return f != invalidField
-}
-
-// The pointer type below is for the new table-driven encoder/decoder.
-// The implementation here uses unsafe.Pointer to create a generic pointer.
-// In pointer_reflect.go we use reflect instead of unsafe to implement
-// the same (but slower) interface.
-type pointer struct {
- p unsafe.Pointer
-}
-
-// size of pointer
-var ptrSize = unsafe.Sizeof(uintptr(0))
-
-// toPointer converts an interface of pointer type to a pointer
-// that points to the same target.
-func toPointer(i *Message) pointer {
- // Super-tricky - read pointer out of data word of interface value.
- // Saves ~25ns over the equivalent:
- // return valToPointer(reflect.ValueOf(*i))
- return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
-}
-
-// toAddrPointer converts an interface to a pointer that points to
-// the interface data.
-func toAddrPointer(i *interface{}, isptr, deref bool) (p pointer) {
- // Super-tricky - read or get the address of data word of interface value.
- if isptr {
- // The interface is of pointer type, thus it is a direct interface.
- // The data word is the pointer data itself. We take its address.
- p = pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
- } else {
- // The interface is not of pointer type. The data word is the pointer
- // to the data.
- p = pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
- }
- if deref {
- p.p = *(*unsafe.Pointer)(p.p)
- }
- return p
-}
-
-// valToPointer converts v to a pointer. v must be of pointer type.
-func valToPointer(v reflect.Value) pointer {
- return pointer{p: unsafe.Pointer(v.Pointer())}
-}
-
-// offset converts from a pointer to a structure to a pointer to
-// one of its fields.
-func (p pointer) offset(f field) pointer {
- // For safety, we should panic if !f.IsValid, however calling panic causes
- // this to no longer be inlineable, which is a serious performance cost.
- /*
- if !f.IsValid() {
- panic("invalid field")
- }
- */
- return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))}
-}
-
-func (p pointer) isNil() bool {
- return p.p == nil
-}
-
-func (p pointer) toInt64() *int64 {
- return (*int64)(p.p)
-}
-func (p pointer) toInt64Ptr() **int64 {
- return (**int64)(p.p)
-}
-func (p pointer) toInt64Slice() *[]int64 {
- return (*[]int64)(p.p)
-}
-func (p pointer) toInt32() *int32 {
- return (*int32)(p.p)
-}
-
-// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist.
-/*
- func (p pointer) toInt32Ptr() **int32 {
- return (**int32)(p.p)
- }
- func (p pointer) toInt32Slice() *[]int32 {
- return (*[]int32)(p.p)
- }
-*/
-func (p pointer) getInt32Ptr() *int32 {
- return *(**int32)(p.p)
-}
-func (p pointer) setInt32Ptr(v int32) {
- *(**int32)(p.p) = &v
-}
-
-// getInt32Slice loads a []int32 from p.
-// The value returned is aliased with the original slice.
-// This behavior differs from the implementation in pointer_reflect.go.
-func (p pointer) getInt32Slice() []int32 {
- return *(*[]int32)(p.p)
-}
-
-// setInt32Slice stores a []int32 to p.
-// The value set is aliased with the input slice.
-// This behavior differs from the implementation in pointer_reflect.go.
-func (p pointer) setInt32Slice(v []int32) {
- *(*[]int32)(p.p) = v
-}
-
-// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead?
-func (p pointer) appendInt32Slice(v int32) {
- s := (*[]int32)(p.p)
- *s = append(*s, v)
-}
-
-func (p pointer) toUint64() *uint64 {
- return (*uint64)(p.p)
-}
-func (p pointer) toUint64Ptr() **uint64 {
- return (**uint64)(p.p)
-}
-func (p pointer) toUint64Slice() *[]uint64 {
- return (*[]uint64)(p.p)
-}
-func (p pointer) toUint32() *uint32 {
- return (*uint32)(p.p)
-}
-func (p pointer) toUint32Ptr() **uint32 {
- return (**uint32)(p.p)
-}
-func (p pointer) toUint32Slice() *[]uint32 {
- return (*[]uint32)(p.p)
-}
-func (p pointer) toBool() *bool {
- return (*bool)(p.p)
-}
-func (p pointer) toBoolPtr() **bool {
- return (**bool)(p.p)
-}
-func (p pointer) toBoolSlice() *[]bool {
- return (*[]bool)(p.p)
-}
-func (p pointer) toFloat64() *float64 {
- return (*float64)(p.p)
-}
-func (p pointer) toFloat64Ptr() **float64 {
- return (**float64)(p.p)
-}
-func (p pointer) toFloat64Slice() *[]float64 {
- return (*[]float64)(p.p)
-}
-func (p pointer) toFloat32() *float32 {
- return (*float32)(p.p)
-}
-func (p pointer) toFloat32Ptr() **float32 {
- return (**float32)(p.p)
-}
-func (p pointer) toFloat32Slice() *[]float32 {
- return (*[]float32)(p.p)
-}
-func (p pointer) toString() *string {
- return (*string)(p.p)
-}
-func (p pointer) toStringPtr() **string {
- return (**string)(p.p)
-}
-func (p pointer) toStringSlice() *[]string {
- return (*[]string)(p.p)
-}
-func (p pointer) toBytes() *[]byte {
- return (*[]byte)(p.p)
-}
-func (p pointer) toBytesSlice() *[][]byte {
- return (*[][]byte)(p.p)
-}
-func (p pointer) toExtensions() *XXX_InternalExtensions {
- return (*XXX_InternalExtensions)(p.p)
-}
-func (p pointer) toOldExtensions() *map[int32]Extension {
- return (*map[int32]Extension)(p.p)
-}
-
-// getPointerSlice loads []*T from p as a []pointer.
-// The value returned is aliased with the original slice.
-// This behavior differs from the implementation in pointer_reflect.go.
-func (p pointer) getPointerSlice() []pointer {
- // Super-tricky - p should point to a []*T where T is a
- // message type. We load it as []pointer.
- return *(*[]pointer)(p.p)
-}
-
-// setPointerSlice stores []pointer into p as a []*T.
-// The value set is aliased with the input slice.
-// This behavior differs from the implementation in pointer_reflect.go.
-func (p pointer) setPointerSlice(v []pointer) {
- // Super-tricky - p should point to a []*T where T is a
- // message type. We store it as []pointer.
- *(*[]pointer)(p.p) = v
-}
-
-// getPointer loads the pointer at p and returns it.
-func (p pointer) getPointer() pointer {
- return pointer{p: *(*unsafe.Pointer)(p.p)}
-}
-
-// setPointer stores the pointer q at p.
-func (p pointer) setPointer(q pointer) {
- *(*unsafe.Pointer)(p.p) = q.p
-}
-
-// append q to the slice pointed to by p.
-func (p pointer) appendPointer(q pointer) {
- s := (*[]unsafe.Pointer)(p.p)
- *s = append(*s, q.p)
-}
-
-// getInterfacePointer returns a pointer that points to the
-// interface data of the interface pointed by p.
-func (p pointer) getInterfacePointer() pointer {
- // Super-tricky - read pointer out of data word of interface value.
- return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]}
-}
-
-// asPointerTo returns a reflect.Value that is a pointer to an
-// object of type t stored at p.
-func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
- return reflect.NewAt(t, p.p)
-}
-
-func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
- return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
-}
-func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
- atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
-}
-func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
- return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
-}
-func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
- atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
-}
-func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
- return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
-}
-func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
- atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
-}
-func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
- return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
-}
-func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
- atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
-}
diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go
index a4b8c0cd3..dcdc2202f 100644
--- a/vendor/github.com/golang/protobuf/proto/properties.go
+++ b/vendor/github.com/golang/protobuf/proto/properties.go
@@ -1,162 +1,104 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
package proto
-/*
- * Routines for encoding data into the wire format for protocol buffers.
- */
-
import (
"fmt"
- "log"
"reflect"
- "sort"
"strconv"
"strings"
"sync"
-)
-
-const debug bool = false
-// Constants that identify the encoding of a value on the wire.
-const (
- WireVarint = 0
- WireFixed64 = 1
- WireBytes = 2
- WireStartGroup = 3
- WireEndGroup = 4
- WireFixed32 = 5
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/runtime/protoimpl"
)
-// tagMap is an optimization over map[int]int for typical protocol buffer
-// use-cases. Encoded protocol buffers are often in tag order with small tag
-// numbers.
-type tagMap struct {
- fastTags []int
- slowTags map[int]int
-}
-
-// tagMapFastLimit is the upper bound on the tag number that will be stored in
-// the tagMap slice rather than its map.
-const tagMapFastLimit = 1024
-
-func (p *tagMap) get(t int) (int, bool) {
- if t > 0 && t < tagMapFastLimit {
- if t >= len(p.fastTags) {
- return 0, false
- }
- fi := p.fastTags[t]
- return fi, fi >= 0
- }
- fi, ok := p.slowTags[t]
- return fi, ok
-}
-
-func (p *tagMap) put(t int, fi int) {
- if t > 0 && t < tagMapFastLimit {
- for len(p.fastTags) < t+1 {
- p.fastTags = append(p.fastTags, -1)
- }
- p.fastTags[t] = fi
- return
- }
- if p.slowTags == nil {
- p.slowTags = make(map[int]int)
- }
- p.slowTags[t] = fi
-}
-
-// StructProperties represents properties for all the fields of a struct.
-// decoderTags and decoderOrigNames should only be used by the decoder.
+// StructProperties represents protocol buffer type information for a
+// generated protobuf message in the open-struct API.
+//
+// Deprecated: Do not use.
type StructProperties struct {
- Prop []*Properties // properties for each field
- reqCount int // required count
- decoderTags tagMap // map from proto tag to struct field number
- decoderOrigNames map[string]int // map from original name to struct field number
- order []int // list of struct field numbers in tag order
+ // Prop are the properties for each field.
+ //
+ // Fields belonging to a oneof are stored in OneofTypes instead, with a
+ // single Properties representing the parent oneof held here.
+ //
+ // The order of Prop matches the order of fields in the Go struct.
+ // Struct fields that are not related to protobufs have a "XXX_" prefix
+ // in the Properties.Name and must be ignored by the user.
+ Prop []*Properties
// OneofTypes contains information about the oneof fields in this message.
- // It is keyed by the original name of a field.
+ // It is keyed by the protobuf field name.
OneofTypes map[string]*OneofProperties
}
-// OneofProperties represents information about a specific field in a oneof.
-type OneofProperties struct {
- Type reflect.Type // pointer to generated struct type for this oneof field
- Field int // struct field number of the containing oneof in the message
- Prop *Properties
-}
-
-// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
-// See encode.go, (*Buffer).enc_struct.
-
-func (sp *StructProperties) Len() int { return len(sp.order) }
-func (sp *StructProperties) Less(i, j int) bool {
- return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
-}
-func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
-
-// Properties represents the protocol-specific behavior of a single struct field.
+// Properties represents the type information for a protobuf message field.
+//
+// Deprecated: Do not use.
type Properties struct {
- Name string // name of the field, for error messages
- OrigName string // original name before protocol compiler (always set)
- JSONName string // name to use for JSON; determined by protoc
- Wire string
+ // Name is a placeholder name with little meaningful semantic value.
+ // If the name has an "XXX_" prefix, the entire Properties must be ignored.
+ Name string
+ // OrigName is the protobuf field name or oneof name.
+ OrigName string
+ // JSONName is the JSON name for the protobuf field.
+ JSONName string
+ // Enum is a placeholder name for enums.
+ // For historical reasons, this is neither the Go name for the enum,
+ // nor the protobuf name for the enum.
+ Enum string // Deprecated: Do not use.
+ // Weak contains the full name of the weakly referenced message.
+ Weak string
+ // Wire is a string representation of the wire type.
+ Wire string
+ // WireType is the protobuf wire type for the field.
WireType int
- Tag int
+ // Tag is the protobuf field number.
+ Tag int
+ // Required reports whether this is a required field.
Required bool
+ // Optional reports whether this is a optional field.
Optional bool
+ // Repeated reports whether this is a repeated field.
Repeated bool
- Packed bool // relevant for repeated primitives only
- Enum string // set for enum types only
- proto3 bool // whether this is known to be a proto3 field
- oneof bool // whether this is a oneof field
-
- Default string // default value
- HasDefault bool // whether an explicit default was provided
-
- stype reflect.Type // set for struct types only
- sprop *StructProperties // set for struct types only
+ // Packed reports whether this is a packed repeated field of scalars.
+ Packed bool
+ // Proto3 reports whether this field operates under the proto3 syntax.
+ Proto3 bool
+ // Oneof reports whether this field belongs within a oneof.
+ Oneof bool
+
+ // Default is the default value in string form.
+ Default string
+ // HasDefault reports whether the field has a default value.
+ HasDefault bool
+
+ // MapKeyProp is the properties for the key field for a map field.
+ MapKeyProp *Properties
+ // MapValProp is the properties for the value field for a map field.
+ MapValProp *Properties
+}
- mtype reflect.Type // set for map types only
- MapKeyProp *Properties // set for map types only
- MapValProp *Properties // set for map types only
+// OneofProperties represents the type information for a protobuf oneof.
+//
+// Deprecated: Do not use.
+type OneofProperties struct {
+ // Type is a pointer to the generated wrapper type for the field value.
+ // This is nil for messages that are not in the open-struct API.
+ Type reflect.Type
+ // Field is the index into StructProperties.Prop for the containing oneof.
+ Field int
+ // Prop is the properties for the field.
+ Prop *Properties
}
// String formats the properties in the protobuf struct field tag style.
func (p *Properties) String() string {
s := p.Wire
- s += ","
- s += strconv.Itoa(p.Tag)
+ s += "," + strconv.Itoa(p.Tag)
if p.Required {
s += ",req"
}
@@ -170,18 +112,21 @@ func (p *Properties) String() string {
s += ",packed"
}
s += ",name=" + p.OrigName
- if p.JSONName != p.OrigName {
+ if p.JSONName != "" {
s += ",json=" + p.JSONName
}
- if p.proto3 {
+ if len(p.Enum) > 0 {
+ s += ",enum=" + p.Enum
+ }
+ if len(p.Weak) > 0 {
+ s += ",weak=" + p.Weak
+ }
+ if p.Proto3 {
s += ",proto3"
}
- if p.oneof {
+ if p.Oneof {
s += ",oneof"
}
- if len(p.Enum) > 0 {
- s += ",enum=" + p.Enum
- }
if p.HasDefault {
s += ",def=" + p.Default
}
@@ -189,356 +134,173 @@ func (p *Properties) String() string {
}
// Parse populates p by parsing a string in the protobuf struct field tag style.
-func (p *Properties) Parse(s string) {
- // "bytes,49,opt,name=foo,def=hello!"
- fields := strings.Split(s, ",") // breaks def=, but handled below.
- if len(fields) < 2 {
- log.Printf("proto: tag has too few fields: %q", s)
- return
- }
-
- p.Wire = fields[0]
- switch p.Wire {
- case "varint":
- p.WireType = WireVarint
- case "fixed32":
- p.WireType = WireFixed32
- case "fixed64":
- p.WireType = WireFixed64
- case "zigzag32":
- p.WireType = WireVarint
- case "zigzag64":
- p.WireType = WireVarint
- case "bytes", "group":
- p.WireType = WireBytes
- // no numeric converter for non-numeric types
- default:
- log.Printf("proto: tag has unknown wire type: %q", s)
- return
- }
-
- var err error
- p.Tag, err = strconv.Atoi(fields[1])
- if err != nil {
- return
- }
-
-outer:
- for i := 2; i < len(fields); i++ {
- f := fields[i]
- switch {
- case f == "req":
- p.Required = true
- case f == "opt":
+func (p *Properties) Parse(tag string) {
+ // For example: "bytes,49,opt,name=foo,def=hello!"
+ for len(tag) > 0 {
+ i := strings.IndexByte(tag, ',')
+ if i < 0 {
+ i = len(tag)
+ }
+ switch s := tag[:i]; {
+ case strings.HasPrefix(s, "name="):
+ p.OrigName = s[len("name="):]
+ case strings.HasPrefix(s, "json="):
+ p.JSONName = s[len("json="):]
+ case strings.HasPrefix(s, "enum="):
+ p.Enum = s[len("enum="):]
+ case strings.HasPrefix(s, "weak="):
+ p.Weak = s[len("weak="):]
+ case strings.Trim(s, "0123456789") == "":
+ n, _ := strconv.ParseUint(s, 10, 32)
+ p.Tag = int(n)
+ case s == "opt":
p.Optional = true
- case f == "rep":
+ case s == "req":
+ p.Required = true
+ case s == "rep":
p.Repeated = true
- case f == "packed":
+ case s == "varint" || s == "zigzag32" || s == "zigzag64":
+ p.Wire = s
+ p.WireType = WireVarint
+ case s == "fixed32":
+ p.Wire = s
+ p.WireType = WireFixed32
+ case s == "fixed64":
+ p.Wire = s
+ p.WireType = WireFixed64
+ case s == "bytes":
+ p.Wire = s
+ p.WireType = WireBytes
+ case s == "group":
+ p.Wire = s
+ p.WireType = WireStartGroup
+ case s == "packed":
p.Packed = true
- case strings.HasPrefix(f, "name="):
- p.OrigName = f[5:]
- case strings.HasPrefix(f, "json="):
- p.JSONName = f[5:]
- case strings.HasPrefix(f, "enum="):
- p.Enum = f[5:]
- case f == "proto3":
- p.proto3 = true
- case f == "oneof":
- p.oneof = true
- case strings.HasPrefix(f, "def="):
+ case s == "proto3":
+ p.Proto3 = true
+ case s == "oneof":
+ p.Oneof = true
+ case strings.HasPrefix(s, "def="):
+ // The default tag is special in that everything afterwards is the
+ // default regardless of the presence of commas.
p.HasDefault = true
- p.Default = f[4:] // rest of string
- if i+1 < len(fields) {
- // Commas aren't escaped, and def is always last.
- p.Default += "," + strings.Join(fields[i+1:], ",")
- break outer
- }
- }
- }
-}
-
-var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
-
-// setFieldProps initializes the field properties for submessages and maps.
-func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
- switch t1 := typ; t1.Kind() {
- case reflect.Ptr:
- if t1.Elem().Kind() == reflect.Struct {
- p.stype = t1.Elem()
- }
-
- case reflect.Slice:
- if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct {
- p.stype = t2.Elem()
- }
-
- case reflect.Map:
- p.mtype = t1
- p.MapKeyProp = &Properties{}
- p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
- p.MapValProp = &Properties{}
- vtype := p.mtype.Elem()
- if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
- // The value type is not a message (*T) or bytes ([]byte),
- // so we need encoders for the pointer to this type.
- vtype = reflect.PtrTo(vtype)
- }
- p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
- }
-
- if p.stype != nil {
- if lockGetProp {
- p.sprop = GetProperties(p.stype)
- } else {
- p.sprop = getPropertiesLocked(p.stype)
+ p.Default, i = tag[len("def="):], len(tag)
}
+ tag = strings.TrimPrefix(tag[i:], ",")
}
}
-var (
- marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
-)
-
// Init populates the properties from a protocol buffer struct tag.
+//
+// Deprecated: Do not use.
func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
- p.init(typ, name, tag, f, true)
-}
-
-func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
- // "bytes,49,opt,def=hello!"
p.Name = name
p.OrigName = name
if tag == "" {
return
}
p.Parse(tag)
- p.setFieldProps(typ, f, lockGetProp)
+
+ if typ != nil && typ.Kind() == reflect.Map {
+ p.MapKeyProp = new(Properties)
+ p.MapKeyProp.Init(nil, "Key", f.Tag.Get("protobuf_key"), nil)
+ p.MapValProp = new(Properties)
+ p.MapValProp.Init(nil, "Value", f.Tag.Get("protobuf_val"), nil)
+ }
}
-var (
- propertiesMu sync.RWMutex
- propertiesMap = make(map[reflect.Type]*StructProperties)
-)
+var propertiesCache sync.Map // map[reflect.Type]*StructProperties
-// GetProperties returns the list of properties for the type represented by t.
-// t must represent a generated struct type of a protocol message.
+// GetProperties returns the list of properties for the type represented by t,
+// which must be a generated protocol buffer message in the open-struct API,
+// where protobuf message fields are represented by exported Go struct fields.
+//
+// Deprecated: Use protobuf reflection instead.
func GetProperties(t reflect.Type) *StructProperties {
- if t.Kind() != reflect.Struct {
- panic("proto: type must have kind struct")
- }
-
- // Most calls to GetProperties in a long-running program will be
- // retrieving details for types we have seen before.
- propertiesMu.RLock()
- sprop, ok := propertiesMap[t]
- propertiesMu.RUnlock()
- if ok {
- return sprop
+ if p, ok := propertiesCache.Load(t); ok {
+ return p.(*StructProperties)
}
-
- propertiesMu.Lock()
- sprop = getPropertiesLocked(t)
- propertiesMu.Unlock()
- return sprop
+ p, _ := propertiesCache.LoadOrStore(t, newProperties(t))
+ return p.(*StructProperties)
}
-type (
- oneofFuncsIface interface {
- XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
- }
- oneofWrappersIface interface {
- XXX_OneofWrappers() []interface{}
- }
-)
-
-// getPropertiesLocked requires that propertiesMu is held.
-func getPropertiesLocked(t reflect.Type) *StructProperties {
- if prop, ok := propertiesMap[t]; ok {
- return prop
+func newProperties(t reflect.Type) *StructProperties {
+ if t.Kind() != reflect.Struct {
+ panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t))
}
+ var hasOneof bool
prop := new(StructProperties)
- // in case of recursive protos, fill this in now.
- propertiesMap[t] = prop
-
- // build properties
- prop.Prop = make([]*Properties, t.NumField())
- prop.order = make([]int, t.NumField())
+ // Construct a list of properties for each field in the struct.
for i := 0; i < t.NumField(); i++ {
- f := t.Field(i)
p := new(Properties)
- name := f.Name
- p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
+ f := t.Field(i)
+ tagField := f.Tag.Get("protobuf")
+ p.Init(f.Type, f.Name, tagField, &f)
- oneof := f.Tag.Get("protobuf_oneof") // special case
- if oneof != "" {
- // Oneof fields don't use the traditional protobuf tag.
- p.OrigName = oneof
+ tagOneof := f.Tag.Get("protobuf_oneof")
+ if tagOneof != "" {
+ hasOneof = true
+ p.OrigName = tagOneof
}
- prop.Prop[i] = p
- prop.order[i] = i
- if debug {
- print(i, " ", f.Name, " ", t.String(), " ")
- if p.Tag > 0 {
- print(p.String())
- }
- print("\n")
+
+ // Rename unrelated struct fields with the "XXX_" prefix since so much
+ // user code simply checks for this to exclude special fields.
+ if tagField == "" && tagOneof == "" && !strings.HasPrefix(p.Name, "XXX_") {
+ p.Name = "XXX_" + p.Name
+ p.OrigName = "XXX_" + p.OrigName
+ } else if p.Weak != "" {
+ p.Name = p.OrigName // avoid possible "XXX_" prefix on weak field
}
+
+ prop.Prop = append(prop.Prop, p)
}
- // Re-order prop.order.
- sort.Sort(prop)
+ // Construct a mapping of oneof field names to properties.
+ if hasOneof {
+ var oneofWrappers []interface{}
+ if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok {
+ oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[3].Interface().([]interface{})
+ }
+ if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok {
+ oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0].Interface().([]interface{})
+ }
+ if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(protoreflect.ProtoMessage); ok {
+ if m, ok := m.ProtoReflect().(interface{ ProtoMessageInfo() *protoimpl.MessageInfo }); ok {
+ oneofWrappers = m.ProtoMessageInfo().OneofWrappers
+ }
+ }
- var oots []interface{}
- switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
- case oneofFuncsIface:
- _, _, _, oots = m.XXX_OneofFuncs()
- case oneofWrappersIface:
- oots = m.XXX_OneofWrappers()
- }
- if len(oots) > 0 {
- // Interpret oneof metadata.
prop.OneofTypes = make(map[string]*OneofProperties)
- for _, oot := range oots {
- oop := &OneofProperties{
- Type: reflect.ValueOf(oot).Type(), // *T
+ for _, wrapper := range oneofWrappers {
+ p := &OneofProperties{
+ Type: reflect.ValueOf(wrapper).Type(), // *T
Prop: new(Properties),
}
- sft := oop.Type.Elem().Field(0)
- oop.Prop.Name = sft.Name
- oop.Prop.Parse(sft.Tag.Get("protobuf"))
- // There will be exactly one interface field that
- // this new value is assignable to.
- for i := 0; i < t.NumField(); i++ {
- f := t.Field(i)
- if f.Type.Kind() != reflect.Interface {
- continue
+ f := p.Type.Elem().Field(0)
+ p.Prop.Name = f.Name
+ p.Prop.Parse(f.Tag.Get("protobuf"))
+
+ // Determine the struct field that contains this oneof.
+ // Each wrapper is assignable to exactly one parent field.
+ var foundOneof bool
+ for i := 0; i < t.NumField() && !foundOneof; i++ {
+ if p.Type.AssignableTo(t.Field(i).Type) {
+ p.Field = i
+ foundOneof = true
}
- if !oop.Type.AssignableTo(f.Type) {
- continue
- }
- oop.Field = i
- break
}
- prop.OneofTypes[oop.Prop.OrigName] = oop
- }
- }
-
- // build required counts
- // build tags
- reqCount := 0
- prop.decoderOrigNames = make(map[string]int)
- for i, p := range prop.Prop {
- if strings.HasPrefix(p.Name, "XXX_") {
- // Internal fields should not appear in tags/origNames maps.
- // They are handled specially when encoding and decoding.
- continue
- }
- if p.Required {
- reqCount++
+ if !foundOneof {
+ panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t))
+ }
+ prop.OneofTypes[p.Prop.OrigName] = p
}
- prop.decoderTags.put(p.Tag, i)
- prop.decoderOrigNames[p.OrigName] = i
}
- prop.reqCount = reqCount
return prop
}
-// A global registry of enum types.
-// The generated code will register the generated maps by calling RegisterEnum.
-
-var enumValueMaps = make(map[string]map[string]int32)
-
-// RegisterEnum is called from the generated code to install the enum descriptor
-// maps into the global table to aid parsing text format protocol buffers.
-func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
- if _, ok := enumValueMaps[typeName]; ok {
- panic("proto: duplicate enum registered: " + typeName)
- }
- enumValueMaps[typeName] = valueMap
-}
-
-// EnumValueMap returns the mapping from names to integers of the
-// enum type enumType, or a nil if not found.
-func EnumValueMap(enumType string) map[string]int32 {
- return enumValueMaps[enumType]
-}
-
-// A registry of all linked message types.
-// The string is a fully-qualified proto name ("pkg.Message").
-var (
- protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers
- protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types
- revProtoTypes = make(map[reflect.Type]string)
-)
-
-// RegisterType is called from generated code and maps from the fully qualified
-// proto name to the type (pointer to struct) of the protocol buffer.
-func RegisterType(x Message, name string) {
- if _, ok := protoTypedNils[name]; ok {
- // TODO: Some day, make this a panic.
- log.Printf("proto: duplicate proto type registered: %s", name)
- return
- }
- t := reflect.TypeOf(x)
- if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 {
- // Generated code always calls RegisterType with nil x.
- // This check is just for extra safety.
- protoTypedNils[name] = x
- } else {
- protoTypedNils[name] = reflect.Zero(t).Interface().(Message)
- }
- revProtoTypes[t] = name
-}
-
-// RegisterMapType is called from generated code and maps from the fully qualified
-// proto name to the native map type of the proto map definition.
-func RegisterMapType(x interface{}, name string) {
- if reflect.TypeOf(x).Kind() != reflect.Map {
- panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name))
- }
- if _, ok := protoMapTypes[name]; ok {
- log.Printf("proto: duplicate proto type registered: %s", name)
- return
- }
- t := reflect.TypeOf(x)
- protoMapTypes[name] = t
- revProtoTypes[t] = name
-}
-
-// MessageName returns the fully-qualified proto name for the given message type.
-func MessageName(x Message) string {
- type xname interface {
- XXX_MessageName() string
- }
- if m, ok := x.(xname); ok {
- return m.XXX_MessageName()
- }
- return revProtoTypes[reflect.TypeOf(x)]
-}
-
-// MessageType returns the message type (pointer to struct) for a named message.
-// The type is not guaranteed to implement proto.Message if the name refers to a
-// map entry.
-func MessageType(name string) reflect.Type {
- if t, ok := protoTypedNils[name]; ok {
- return reflect.TypeOf(t)
- }
- return protoMapTypes[name]
-}
-
-// A registry of all linked proto files.
-var (
- protoFiles = make(map[string][]byte) // file name => fileDescriptor
-)
-
-// RegisterFile is called from generated code and maps from the
-// full file name of a .proto file to its compressed FileDescriptorProto.
-func RegisterFile(filename string, fileDescriptor []byte) {
- protoFiles[filename] = fileDescriptor
-}
-
-// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
-func FileDescriptor(filename string) []byte { return protoFiles[filename] }
+func (sp *StructProperties) Len() int { return len(sp.Prop) }
+func (sp *StructProperties) Less(i, j int) bool { return false }
+func (sp *StructProperties) Swap(i, j int) { return }
diff --git a/vendor/github.com/golang/protobuf/proto/proto.go b/vendor/github.com/golang/protobuf/proto/proto.go
new file mode 100644
index 000000000..5aee89c32
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/proto.go
@@ -0,0 +1,167 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package proto provides functionality for handling protocol buffer messages.
+// In particular, it provides marshaling and unmarshaling between a protobuf
+// message and the binary wire format.
+//
+// See https://developers.google.com/protocol-buffers/docs/gotutorial for
+// more information.
+//
+// Deprecated: Use the "google.golang.org/protobuf/proto" package instead.
+package proto
+
+import (
+ protoV2 "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/runtime/protoiface"
+ "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ ProtoPackageIsVersion1 = true
+ ProtoPackageIsVersion2 = true
+ ProtoPackageIsVersion3 = true
+ ProtoPackageIsVersion4 = true
+)
+
+// GeneratedEnum is any enum type generated by protoc-gen-go
+// which is a named int32 kind.
+// This type exists for documentation purposes.
+type GeneratedEnum interface{}
+
+// GeneratedMessage is any message type generated by protoc-gen-go
+// which is a pointer to a named struct kind.
+// This type exists for documentation purposes.
+type GeneratedMessage interface{}
+
+// Message is a protocol buffer message.
+//
+// This is the v1 version of the message interface and is marginally better
+// than an empty interface as it lacks any method to programatically interact
+// with the contents of the message.
+//
+// A v2 message is declared in "google.golang.org/protobuf/proto".Message and
+// exposes protobuf reflection as a first-class feature of the interface.
+//
+// To convert a v1 message to a v2 message, use the MessageV2 function.
+// To convert a v2 message to a v1 message, use the MessageV1 function.
+type Message = protoiface.MessageV1
+
+// MessageV1 converts either a v1 or v2 message to a v1 message.
+// It returns nil if m is nil.
+func MessageV1(m GeneratedMessage) protoiface.MessageV1 {
+ return protoimpl.X.ProtoMessageV1Of(m)
+}
+
+// MessageV2 converts either a v1 or v2 message to a v2 message.
+// It returns nil if m is nil.
+func MessageV2(m GeneratedMessage) protoV2.Message {
+ return protoimpl.X.ProtoMessageV2Of(m)
+}
+
+// MessageReflect returns a reflective view for a message.
+// It returns nil if m is nil.
+func MessageReflect(m Message) protoreflect.Message {
+ return protoimpl.X.MessageOf(m)
+}
+
+// Marshaler is implemented by messages that can marshal themselves.
+// This interface is used by the following functions: Size, Marshal,
+// Buffer.Marshal, and Buffer.EncodeMessage.
+//
+// Deprecated: Do not implement.
+type Marshaler interface {
+ // Marshal formats the encoded bytes of the message.
+ // It should be deterministic and emit valid protobuf wire data.
+ // The caller takes ownership of the returned buffer.
+ Marshal() ([]byte, error)
+}
+
+// Unmarshaler is implemented by messages that can unmarshal themselves.
+// This interface is used by the following functions: Unmarshal, UnmarshalMerge,
+// Buffer.Unmarshal, Buffer.DecodeMessage, and Buffer.DecodeGroup.
+//
+// Deprecated: Do not implement.
+type Unmarshaler interface {
+ // Unmarshal parses the encoded bytes of the protobuf wire input.
+ // The provided buffer is only valid for during method call.
+ // It should not reset the receiver message.
+ Unmarshal([]byte) error
+}
+
+// Merger is implemented by messages that can merge themselves.
+// This interface is used by the following functions: Clone and Merge.
+//
+// Deprecated: Do not implement.
+type Merger interface {
+ // Merge merges the contents of src into the receiver message.
+ // It clones all data structures in src such that it aliases no mutable
+ // memory referenced by src.
+ Merge(src Message)
+}
+
+// RequiredNotSetError is an error type returned when
+// marshaling or unmarshaling a message with missing required fields.
+type RequiredNotSetError struct {
+ err error
+}
+
+func (e *RequiredNotSetError) Error() string {
+ if e.err != nil {
+ return e.err.Error()
+ }
+ return "proto: required field not set"
+}
+func (e *RequiredNotSetError) RequiredNotSet() bool {
+ return true
+}
+
+func checkRequiredNotSet(m protoV2.Message) error {
+ if err := protoV2.CheckInitialized(m); err != nil {
+ return &RequiredNotSetError{err: err}
+ }
+ return nil
+}
+
+// Clone returns a deep copy of src.
+func Clone(src Message) Message {
+ return MessageV1(protoV2.Clone(MessageV2(src)))
+}
+
+// Merge merges src into dst, which must be messages of the same type.
+//
+// Populated scalar fields in src are copied to dst, while populated
+// singular messages in src are merged into dst by recursively calling Merge.
+// The elements of every list field in src is appended to the corresponded
+// list fields in dst. The entries of every map field in src is copied into
+// the corresponding map field in dst, possibly replacing existing entries.
+// The unknown fields of src are appended to the unknown fields of dst.
+func Merge(dst, src Message) {
+ protoV2.Merge(MessageV2(dst), MessageV2(src))
+}
+
+// Equal reports whether two messages are equal.
+// If two messages marshal to the same bytes under deterministic serialization,
+// then Equal is guaranteed to report true.
+//
+// Two messages are equal if they are the same protobuf message type,
+// have the same set of populated known and extension field values,
+// and the same set of unknown fields values.
+//
+// Scalar values are compared with the equivalent of the == operator in Go,
+// except bytes values which are compared using bytes.Equal and
+// floating point values which specially treat NaNs as equal.
+// Message values are compared by recursively calling Equal.
+// Lists are equal if each element value is also equal.
+// Maps are equal if they have the same set of keys, where the pair of values
+// for each key is also equal.
+func Equal(x, y Message) bool {
+ return protoV2.Equal(MessageV2(x), MessageV2(y))
+}
+
+func isMessageSet(md protoreflect.MessageDescriptor) bool {
+ ms, ok := md.(interface{ IsMessageSet() bool })
+ return ok && ms.IsMessageSet()
+}
diff --git a/vendor/github.com/golang/protobuf/proto/registry.go b/vendor/github.com/golang/protobuf/proto/registry.go
new file mode 100644
index 000000000..1e7ff6420
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/registry.go
@@ -0,0 +1,323 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io/ioutil"
+ "reflect"
+ "strings"
+ "sync"
+
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+ "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+// filePath is the path to the proto source file.
+type filePath = string // e.g., "google/protobuf/descriptor.proto"
+
+// fileDescGZIP is the compressed contents of the encoded FileDescriptorProto.
+type fileDescGZIP = []byte
+
+var fileCache sync.Map // map[filePath]fileDescGZIP
+
+// RegisterFile is called from generated code to register the compressed
+// FileDescriptorProto with the file path for a proto source file.
+//
+// Deprecated: Use protoregistry.GlobalFiles.RegisterFile instead.
+func RegisterFile(s filePath, d fileDescGZIP) {
+ // Decompress the descriptor.
+ zr, err := gzip.NewReader(bytes.NewReader(d))
+ if err != nil {
+ panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err))
+ }
+ b, err := ioutil.ReadAll(zr)
+ if err != nil {
+ panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err))
+ }
+
+ // Construct a protoreflect.FileDescriptor from the raw descriptor.
+ // Note that DescBuilder.Build automatically registers the constructed
+ // file descriptor with the v2 registry.
+ protoimpl.DescBuilder{RawDescriptor: b}.Build()
+
+ // Locally cache the raw descriptor form for the file.
+ fileCache.Store(s, d)
+}
+
+// FileDescriptor returns the compressed FileDescriptorProto given the file path
+// for a proto source file. It returns nil if not found.
+//
+// Deprecated: Use protoregistry.GlobalFiles.FindFileByPath instead.
+func FileDescriptor(s filePath) fileDescGZIP {
+ if v, ok := fileCache.Load(s); ok {
+ return v.(fileDescGZIP)
+ }
+
+ // Find the descriptor in the v2 registry.
+ var b []byte
+ if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil {
+ if fd, ok := fd.(interface{ ProtoLegacyRawDesc() []byte }); ok {
+ b = fd.ProtoLegacyRawDesc()
+ } else {
+ // TODO: Use protodesc.ToFileDescriptorProto to construct
+ // a descriptorpb.FileDescriptorProto and marshal it.
+ // However, doing so causes the proto package to have a dependency
+ // on descriptorpb, leading to cyclic dependency issues.
+ }
+ }
+
+ // Locally cache the raw descriptor form for the file.
+ if len(b) > 0 {
+ v, _ := fileCache.LoadOrStore(s, protoimpl.X.CompressGZIP(b))
+ return v.(fileDescGZIP)
+ }
+ return nil
+}
+
+// enumName is the name of an enum. For historical reasons, the enum name is
+// neither the full Go name nor the full protobuf name of the enum.
+// The name is the dot-separated combination of just the proto package that the
+// enum is declared within followed by the Go type name of the generated enum.
+type enumName = string // e.g., "my.proto.package.GoMessage_GoEnum"
+
+// enumsByName maps enum values by name to their numeric counterpart.
+type enumsByName = map[string]int32
+
+// enumsByNumber maps enum values by number to their name counterpart.
+type enumsByNumber = map[int32]string
+
+var enumCache sync.Map // map[enumName]enumsByName
+var numFilesCache sync.Map // map[protoreflect.FullName]int
+
+// RegisterEnum is called from the generated code to register the mapping of
+// enum value names to enum numbers for the enum identified by s.
+//
+// Deprecated: Use protoregistry.GlobalTypes.RegisterEnum instead.
+func RegisterEnum(s enumName, _ enumsByNumber, m enumsByName) {
+ if _, ok := enumCache.Load(s); ok {
+ panic("proto: duplicate enum registered: " + s)
+ }
+ enumCache.Store(s, m)
+
+ // This does not forward registration to the v2 registry since this API
+ // lacks sufficient information to construct a complete v2 enum descriptor.
+}
+
+// EnumValueMap returns the mapping from enum value names to enum numbers for
+// the enum of the given name. It returns nil if not found.
+//
+// Deprecated: Use protoregistry.GlobalTypes.FindEnumByName instead.
+func EnumValueMap(s enumName) enumsByName {
+ if v, ok := enumCache.Load(s); ok {
+ return v.(enumsByName)
+ }
+
+ // Check whether the cache is stale. If the number of files in the current
+ // package differs, then it means that some enums may have been recently
+ // registered upstream that we do not know about.
+ var protoPkg protoreflect.FullName
+ if i := strings.LastIndexByte(s, '.'); i >= 0 {
+ protoPkg = protoreflect.FullName(s[:i])
+ }
+ v, _ := numFilesCache.Load(protoPkg)
+ numFiles, _ := v.(int)
+ if protoregistry.GlobalFiles.NumFilesByPackage(protoPkg) == numFiles {
+ return nil // cache is up-to-date; was not found earlier
+ }
+
+ // Update the enum cache for all enums declared in the given proto package.
+ numFiles = 0
+ protoregistry.GlobalFiles.RangeFilesByPackage(protoPkg, func(fd protoreflect.FileDescriptor) bool {
+ walkEnums(fd, func(ed protoreflect.EnumDescriptor) {
+ name := protoimpl.X.LegacyEnumName(ed)
+ if _, ok := enumCache.Load(name); !ok {
+ m := make(enumsByName)
+ evs := ed.Values()
+ for i := evs.Len() - 1; i >= 0; i-- {
+ ev := evs.Get(i)
+ m[string(ev.Name())] = int32(ev.Number())
+ }
+ enumCache.LoadOrStore(name, m)
+ }
+ })
+ numFiles++
+ return true
+ })
+ numFilesCache.Store(protoPkg, numFiles)
+
+ // Check cache again for enum map.
+ if v, ok := enumCache.Load(s); ok {
+ return v.(enumsByName)
+ }
+ return nil
+}
+
+// walkEnums recursively walks all enums declared in d.
+func walkEnums(d interface {
+ Enums() protoreflect.EnumDescriptors
+ Messages() protoreflect.MessageDescriptors
+}, f func(protoreflect.EnumDescriptor)) {
+ eds := d.Enums()
+ for i := eds.Len() - 1; i >= 0; i-- {
+ f(eds.Get(i))
+ }
+ mds := d.Messages()
+ for i := mds.Len() - 1; i >= 0; i-- {
+ walkEnums(mds.Get(i), f)
+ }
+}
+
+// messageName is the full name of protobuf message.
+type messageName = string
+
+var messageTypeCache sync.Map // map[messageName]reflect.Type
+
+// RegisterType is called from generated code to register the message Go type
+// for a message of the given name.
+//
+// Deprecated: Use protoregistry.GlobalTypes.RegisterMessage instead.
+func RegisterType(m Message, s messageName) {
+ mt := protoimpl.X.LegacyMessageTypeOf(m, protoreflect.FullName(s))
+ if err := protoregistry.GlobalTypes.RegisterMessage(mt); err != nil {
+ panic(err)
+ }
+ messageTypeCache.Store(s, reflect.TypeOf(m))
+}
+
+// RegisterMapType is called from generated code to register the Go map type
+// for a protobuf message representing a map entry.
+//
+// Deprecated: Do not use.
+func RegisterMapType(m interface{}, s messageName) {
+ t := reflect.TypeOf(m)
+ if t.Kind() != reflect.Map {
+ panic(fmt.Sprintf("invalid map kind: %v", t))
+ }
+ if _, ok := messageTypeCache.Load(s); ok {
+ panic(fmt.Errorf("proto: duplicate proto message registered: %s", s))
+ }
+ messageTypeCache.Store(s, t)
+}
+
+// MessageType returns the message type for a named message.
+// It returns nil if not found.
+//
+// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead.
+func MessageType(s messageName) reflect.Type {
+ if v, ok := messageTypeCache.Load(s); ok {
+ return v.(reflect.Type)
+ }
+
+ // Derive the message type from the v2 registry.
+ var t reflect.Type
+ if mt, _ := protoregistry.GlobalTypes.FindMessageByName(protoreflect.FullName(s)); mt != nil {
+ t = messageGoType(mt)
+ }
+
+ // If we could not get a concrete type, it is possible that it is a
+ // pseudo-message for a map entry.
+ if t == nil {
+ d, _ := protoregistry.GlobalFiles.FindDescriptorByName(protoreflect.FullName(s))
+ if md, _ := d.(protoreflect.MessageDescriptor); md != nil && md.IsMapEntry() {
+ kt := goTypeForField(md.Fields().ByNumber(1))
+ vt := goTypeForField(md.Fields().ByNumber(2))
+ t = reflect.MapOf(kt, vt)
+ }
+ }
+
+ // Locally cache the message type for the given name.
+ if t != nil {
+ v, _ := messageTypeCache.LoadOrStore(s, t)
+ return v.(reflect.Type)
+ }
+ return nil
+}
+
+func goTypeForField(fd protoreflect.FieldDescriptor) reflect.Type {
+ switch k := fd.Kind(); k {
+ case protoreflect.EnumKind:
+ if et, _ := protoregistry.GlobalTypes.FindEnumByName(fd.Enum().FullName()); et != nil {
+ return enumGoType(et)
+ }
+ return reflect.TypeOf(protoreflect.EnumNumber(0))
+ case protoreflect.MessageKind, protoreflect.GroupKind:
+ if mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()); mt != nil {
+ return messageGoType(mt)
+ }
+ return reflect.TypeOf((*protoreflect.Message)(nil)).Elem()
+ default:
+ return reflect.TypeOf(fd.Default().Interface())
+ }
+}
+
+func enumGoType(et protoreflect.EnumType) reflect.Type {
+ return reflect.TypeOf(et.New(0))
+}
+
+func messageGoType(mt protoreflect.MessageType) reflect.Type {
+ return reflect.TypeOf(MessageV1(mt.Zero().Interface()))
+}
+
+// MessageName returns the full protobuf name for the given message type.
+//
+// Deprecated: Use protoreflect.MessageDescriptor.FullName instead.
+func MessageName(m Message) messageName {
+ if m == nil {
+ return ""
+ }
+ if m, ok := m.(interface{ XXX_MessageName() messageName }); ok {
+ return m.XXX_MessageName()
+ }
+ return messageName(protoimpl.X.MessageDescriptorOf(m).FullName())
+}
+
+// RegisterExtension is called from the generated code to register
+// the extension descriptor.
+//
+// Deprecated: Use protoregistry.GlobalTypes.RegisterExtension instead.
+func RegisterExtension(d *ExtensionDesc) {
+ if err := protoregistry.GlobalTypes.RegisterExtension(d); err != nil {
+ panic(err)
+ }
+}
+
+type extensionsByNumber = map[int32]*ExtensionDesc
+
+var extensionCache sync.Map // map[messageName]extensionsByNumber
+
+// RegisteredExtensions returns a map of the registered extensions for the
+// provided protobuf message, indexed by the extension field number.
+//
+// Deprecated: Use protoregistry.GlobalTypes.RangeExtensionsByMessage instead.
+func RegisteredExtensions(m Message) extensionsByNumber {
+ // Check whether the cache is stale. If the number of extensions for
+ // the given message differs, then it means that some extensions were
+ // recently registered upstream that we do not know about.
+ s := MessageName(m)
+ v, _ := extensionCache.Load(s)
+ xs, _ := v.(extensionsByNumber)
+ if protoregistry.GlobalTypes.NumExtensionsByMessage(protoreflect.FullName(s)) == len(xs) {
+ return xs // cache is up-to-date
+ }
+
+ // Cache is stale, re-compute the extensions map.
+ xs = make(extensionsByNumber)
+ protoregistry.GlobalTypes.RangeExtensionsByMessage(protoreflect.FullName(s), func(xt protoreflect.ExtensionType) bool {
+ if xd, ok := xt.(*ExtensionDesc); ok {
+ xs[int32(xt.TypeDescriptor().Number())] = xd
+ } else {
+ // TODO: This implies that the protoreflect.ExtensionType is a
+ // custom type not generated by protoc-gen-go. We could try and
+ // convert the type to an ExtensionDesc.
+ }
+ return true
+ })
+ extensionCache.Store(s, xs)
+ return xs
+}
diff --git a/vendor/github.com/golang/protobuf/proto/table_marshal.go b/vendor/github.com/golang/protobuf/proto/table_marshal.go
deleted file mode 100644
index 5cb11fa95..000000000
--- a/vendor/github.com/golang/protobuf/proto/table_marshal.go
+++ /dev/null
@@ -1,2776 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2016 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-import (
- "errors"
- "fmt"
- "math"
- "reflect"
- "sort"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
- "unicode/utf8"
-)
-
-// a sizer takes a pointer to a field and the size of its tag, computes the size of
-// the encoded data.
-type sizer func(pointer, int) int
-
-// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format),
-// marshals the field to the end of the slice, returns the slice and error (if any).
-type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error)
-
-// marshalInfo is the information used for marshaling a message.
-type marshalInfo struct {
- typ reflect.Type
- fields []*marshalFieldInfo
- unrecognized field // offset of XXX_unrecognized
- extensions field // offset of XXX_InternalExtensions
- v1extensions field // offset of XXX_extensions
- sizecache field // offset of XXX_sizecache
- initialized int32 // 0 -- only typ is set, 1 -- fully initialized
- messageset bool // uses message set wire format
- hasmarshaler bool // has custom marshaler
- sync.RWMutex // protect extElems map, also for initialization
- extElems map[int32]*marshalElemInfo // info of extension elements
-}
-
-// marshalFieldInfo is the information used for marshaling a field of a message.
-type marshalFieldInfo struct {
- field field
- wiretag uint64 // tag in wire format
- tagsize int // size of tag in wire format
- sizer sizer
- marshaler marshaler
- isPointer bool
- required bool // field is required
- name string // name of the field, for error reporting
- oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements
-}
-
-// marshalElemInfo is the information used for marshaling an extension or oneof element.
-type marshalElemInfo struct {
- wiretag uint64 // tag in wire format
- tagsize int // size of tag in wire format
- sizer sizer
- marshaler marshaler
- isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only)
- deref bool // dereference the pointer before operating on it; implies isptr
-}
-
-var (
- marshalInfoMap = map[reflect.Type]*marshalInfo{}
- marshalInfoLock sync.Mutex
-)
-
-// getMarshalInfo returns the information to marshal a given type of message.
-// The info it returns may not necessarily initialized.
-// t is the type of the message (NOT the pointer to it).
-func getMarshalInfo(t reflect.Type) *marshalInfo {
- marshalInfoLock.Lock()
- u, ok := marshalInfoMap[t]
- if !ok {
- u = &marshalInfo{typ: t}
- marshalInfoMap[t] = u
- }
- marshalInfoLock.Unlock()
- return u
-}
-
-// Size is the entry point from generated code,
-// and should be ONLY called by generated code.
-// It computes the size of encoded data of msg.
-// a is a pointer to a place to store cached marshal info.
-func (a *InternalMessageInfo) Size(msg Message) int {
- u := getMessageMarshalInfo(msg, a)
- ptr := toPointer(&msg)
- if ptr.isNil() {
- // We get here if msg is a typed nil ((*SomeMessage)(nil)),
- // so it satisfies the interface, and msg == nil wouldn't
- // catch it. We don't want crash in this case.
- return 0
- }
- return u.size(ptr)
-}
-
-// Marshal is the entry point from generated code,
-// and should be ONLY called by generated code.
-// It marshals msg to the end of b.
-// a is a pointer to a place to store cached marshal info.
-func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) {
- u := getMessageMarshalInfo(msg, a)
- ptr := toPointer(&msg)
- if ptr.isNil() {
- // We get here if msg is a typed nil ((*SomeMessage)(nil)),
- // so it satisfies the interface, and msg == nil wouldn't
- // catch it. We don't want crash in this case.
- return b, ErrNil
- }
- return u.marshal(b, ptr, deterministic)
-}
-
-func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo {
- // u := a.marshal, but atomically.
- // We use an atomic here to ensure memory consistency.
- u := atomicLoadMarshalInfo(&a.marshal)
- if u == nil {
- // Get marshal information from type of message.
- t := reflect.ValueOf(msg).Type()
- if t.Kind() != reflect.Ptr {
- panic(fmt.Sprintf("cannot handle non-pointer message type %v", t))
- }
- u = getMarshalInfo(t.Elem())
- // Store it in the cache for later users.
- // a.marshal = u, but atomically.
- atomicStoreMarshalInfo(&a.marshal, u)
- }
- return u
-}
-
-// size is the main function to compute the size of the encoded data of a message.
-// ptr is the pointer to the message.
-func (u *marshalInfo) size(ptr pointer) int {
- if atomic.LoadInt32(&u.initialized) == 0 {
- u.computeMarshalInfo()
- }
-
- // If the message can marshal itself, let it do it, for compatibility.
- // NOTE: This is not efficient.
- if u.hasmarshaler {
- m := ptr.asPointerTo(u.typ).Interface().(Marshaler)
- b, _ := m.Marshal()
- return len(b)
- }
-
- n := 0
- for _, f := range u.fields {
- if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
- // nil pointer always marshals to nothing
- continue
- }
- n += f.sizer(ptr.offset(f.field), f.tagsize)
- }
- if u.extensions.IsValid() {
- e := ptr.offset(u.extensions).toExtensions()
- if u.messageset {
- n += u.sizeMessageSet(e)
- } else {
- n += u.sizeExtensions(e)
- }
- }
- if u.v1extensions.IsValid() {
- m := *ptr.offset(u.v1extensions).toOldExtensions()
- n += u.sizeV1Extensions(m)
- }
- if u.unrecognized.IsValid() {
- s := *ptr.offset(u.unrecognized).toBytes()
- n += len(s)
- }
- // cache the result for use in marshal
- if u.sizecache.IsValid() {
- atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n))
- }
- return n
-}
-
-// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated),
-// fall back to compute the size.
-func (u *marshalInfo) cachedsize(ptr pointer) int {
- if u.sizecache.IsValid() {
- return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32()))
- }
- return u.size(ptr)
-}
-
-// marshal is the main function to marshal a message. It takes a byte slice and appends
-// the encoded data to the end of the slice, returns the slice and error (if any).
-// ptr is the pointer to the message.
-// If deterministic is true, map is marshaled in deterministic order.
-func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) {
- if atomic.LoadInt32(&u.initialized) == 0 {
- u.computeMarshalInfo()
- }
-
- // If the message can marshal itself, let it do it, for compatibility.
- // NOTE: This is not efficient.
- if u.hasmarshaler {
- m := ptr.asPointerTo(u.typ).Interface().(Marshaler)
- b1, err := m.Marshal()
- b = append(b, b1...)
- return b, err
- }
-
- var err, errLater error
- // The old marshaler encodes extensions at beginning.
- if u.extensions.IsValid() {
- e := ptr.offset(u.extensions).toExtensions()
- if u.messageset {
- b, err = u.appendMessageSet(b, e, deterministic)
- } else {
- b, err = u.appendExtensions(b, e, deterministic)
- }
- if err != nil {
- return b, err
- }
- }
- if u.v1extensions.IsValid() {
- m := *ptr.offset(u.v1extensions).toOldExtensions()
- b, err = u.appendV1Extensions(b, m, deterministic)
- if err != nil {
- return b, err
- }
- }
- for _, f := range u.fields {
- if f.required {
- if ptr.offset(f.field).getPointer().isNil() {
- // Required field is not set.
- // We record the error but keep going, to give a complete marshaling.
- if errLater == nil {
- errLater = &RequiredNotSetError{f.name}
- }
- continue
- }
- }
- if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
- // nil pointer always marshals to nothing
- continue
- }
- b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic)
- if err != nil {
- if err1, ok := err.(*RequiredNotSetError); ok {
- // Required field in submessage is not set.
- // We record the error but keep going, to give a complete marshaling.
- if errLater == nil {
- errLater = &RequiredNotSetError{f.name + "." + err1.field}
- }
- continue
- }
- if err == errRepeatedHasNil {
- err = errors.New("proto: repeated field " + f.name + " has nil element")
- }
- if err == errInvalidUTF8 {
- if errLater == nil {
- fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
- errLater = &invalidUTF8Error{fullName}
- }
- continue
- }
- return b, err
- }
- }
- if u.unrecognized.IsValid() {
- s := *ptr.offset(u.unrecognized).toBytes()
- b = append(b, s...)
- }
- return b, errLater
-}
-
-// computeMarshalInfo initializes the marshal info.
-func (u *marshalInfo) computeMarshalInfo() {
- u.Lock()
- defer u.Unlock()
- if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock
- return
- }
-
- t := u.typ
- u.unrecognized = invalidField
- u.extensions = invalidField
- u.v1extensions = invalidField
- u.sizecache = invalidField
-
- // If the message can marshal itself, let it do it, for compatibility.
- // NOTE: This is not efficient.
- if reflect.PtrTo(t).Implements(marshalerType) {
- u.hasmarshaler = true
- atomic.StoreInt32(&u.initialized, 1)
- return
- }
-
- // get oneof implementers
- var oneofImplementers []interface{}
- switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
- case oneofFuncsIface:
- _, _, _, oneofImplementers = m.XXX_OneofFuncs()
- case oneofWrappersIface:
- oneofImplementers = m.XXX_OneofWrappers()
- }
-
- n := t.NumField()
-
- // deal with XXX fields first
- for i := 0; i < t.NumField(); i++ {
- f := t.Field(i)
- if !strings.HasPrefix(f.Name, "XXX_") {
- continue
- }
- switch f.Name {
- case "XXX_sizecache":
- u.sizecache = toField(&f)
- case "XXX_unrecognized":
- u.unrecognized = toField(&f)
- case "XXX_InternalExtensions":
- u.extensions = toField(&f)
- u.messageset = f.Tag.Get("protobuf_messageset") == "1"
- case "XXX_extensions":
- u.v1extensions = toField(&f)
- case "XXX_NoUnkeyedLiteral":
- // nothing to do
- default:
- panic("unknown XXX field: " + f.Name)
- }
- n--
- }
-
- // normal fields
- fields := make([]marshalFieldInfo, n) // batch allocation
- u.fields = make([]*marshalFieldInfo, 0, n)
- for i, j := 0, 0; i < t.NumField(); i++ {
- f := t.Field(i)
-
- if strings.HasPrefix(f.Name, "XXX_") {
- continue
- }
- field := &fields[j]
- j++
- field.name = f.Name
- u.fields = append(u.fields, field)
- if f.Tag.Get("protobuf_oneof") != "" {
- field.computeOneofFieldInfo(&f, oneofImplementers)
- continue
- }
- if f.Tag.Get("protobuf") == "" {
- // field has no tag (not in generated message), ignore it
- u.fields = u.fields[:len(u.fields)-1]
- j--
- continue
- }
- field.computeMarshalFieldInfo(&f)
- }
-
- // fields are marshaled in tag order on the wire.
- sort.Sort(byTag(u.fields))
-
- atomic.StoreInt32(&u.initialized, 1)
-}
-
-// helper for sorting fields by tag
-type byTag []*marshalFieldInfo
-
-func (a byTag) Len() int { return len(a) }
-func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag }
-
-// getExtElemInfo returns the information to marshal an extension element.
-// The info it returns is initialized.
-func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo {
- // get from cache first
- u.RLock()
- e, ok := u.extElems[desc.Field]
- u.RUnlock()
- if ok {
- return e
- }
-
- t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct
- tags := strings.Split(desc.Tag, ",")
- tag, err := strconv.Atoi(tags[1])
- if err != nil {
- panic("tag is not an integer")
- }
- wt := wiretype(tags[0])
- if t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct {
- t = t.Elem()
- }
- sizer, marshaler := typeMarshaler(t, tags, false, false)
- var deref bool
- if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
- t = reflect.PtrTo(t)
- deref = true
- }
- e = &marshalElemInfo{
- wiretag: uint64(tag)<<3 | wt,
- tagsize: SizeVarint(uint64(tag) << 3),
- sizer: sizer,
- marshaler: marshaler,
- isptr: t.Kind() == reflect.Ptr,
- deref: deref,
- }
-
- // update cache
- u.Lock()
- if u.extElems == nil {
- u.extElems = make(map[int32]*marshalElemInfo)
- }
- u.extElems[desc.Field] = e
- u.Unlock()
- return e
-}
-
-// computeMarshalFieldInfo fills up the information to marshal a field.
-func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) {
- // parse protobuf tag of the field.
- // tag has format of "bytes,49,opt,name=foo,def=hello!"
- tags := strings.Split(f.Tag.Get("protobuf"), ",")
- if tags[0] == "" {
- return
- }
- tag, err := strconv.Atoi(tags[1])
- if err != nil {
- panic("tag is not an integer")
- }
- wt := wiretype(tags[0])
- if tags[2] == "req" {
- fi.required = true
- }
- fi.setTag(f, tag, wt)
- fi.setMarshaler(f, tags)
-}
-
-func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) {
- fi.field = toField(f)
- fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire.
- fi.isPointer = true
- fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f)
- fi.oneofElems = make(map[reflect.Type]*marshalElemInfo)
-
- ityp := f.Type // interface type
- for _, o := range oneofImplementers {
- t := reflect.TypeOf(o)
- if !t.Implements(ityp) {
- continue
- }
- sf := t.Elem().Field(0) // oneof implementer is a struct with a single field
- tags := strings.Split(sf.Tag.Get("protobuf"), ",")
- tag, err := strconv.Atoi(tags[1])
- if err != nil {
- panic("tag is not an integer")
- }
- wt := wiretype(tags[0])
- sizer, marshaler := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value
- fi.oneofElems[t.Elem()] = &marshalElemInfo{
- wiretag: uint64(tag)<<3 | wt,
- tagsize: SizeVarint(uint64(tag) << 3),
- sizer: sizer,
- marshaler: marshaler,
- }
- }
-}
-
-// wiretype returns the wire encoding of the type.
-func wiretype(encoding string) uint64 {
- switch encoding {
- case "fixed32":
- return WireFixed32
- case "fixed64":
- return WireFixed64
- case "varint", "zigzag32", "zigzag64":
- return WireVarint
- case "bytes":
- return WireBytes
- case "group":
- return WireStartGroup
- }
- panic("unknown wire type " + encoding)
-}
-
-// setTag fills up the tag (in wire format) and its size in the info of a field.
-func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) {
- fi.field = toField(f)
- fi.wiretag = uint64(tag)<<3 | wt
- fi.tagsize = SizeVarint(uint64(tag) << 3)
-}
-
-// setMarshaler fills up the sizer and marshaler in the info of a field.
-func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) {
- switch f.Type.Kind() {
- case reflect.Map:
- // map field
- fi.isPointer = true
- fi.sizer, fi.marshaler = makeMapMarshaler(f)
- return
- case reflect.Ptr, reflect.Slice:
- fi.isPointer = true
- }
- fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false)
-}
-
-// typeMarshaler returns the sizer and marshaler of a given field.
-// t is the type of the field.
-// tags is the generated "protobuf" tag of the field.
-// If nozero is true, zero value is not marshaled to the wire.
-// If oneof is true, it is a oneof field.
-func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) {
- encoding := tags[0]
-
- pointer := false
- slice := false
- if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
- slice = true
- t = t.Elem()
- }
- if t.Kind() == reflect.Ptr {
- pointer = true
- t = t.Elem()
- }
-
- packed := false
- proto3 := false
- validateUTF8 := true
- for i := 2; i < len(tags); i++ {
- if tags[i] == "packed" {
- packed = true
- }
- if tags[i] == "proto3" {
- proto3 = true
- }
- }
- validateUTF8 = validateUTF8 && proto3
-
- switch t.Kind() {
- case reflect.Bool:
- if pointer {
- return sizeBoolPtr, appendBoolPtr
- }
- if slice {
- if packed {
- return sizeBoolPackedSlice, appendBoolPackedSlice
- }
- return sizeBoolSlice, appendBoolSlice
- }
- if nozero {
- return sizeBoolValueNoZero, appendBoolValueNoZero
- }
- return sizeBoolValue, appendBoolValue
- case reflect.Uint32:
- switch encoding {
- case "fixed32":
- if pointer {
- return sizeFixed32Ptr, appendFixed32Ptr
- }
- if slice {
- if packed {
- return sizeFixed32PackedSlice, appendFixed32PackedSlice
- }
- return sizeFixed32Slice, appendFixed32Slice
- }
- if nozero {
- return sizeFixed32ValueNoZero, appendFixed32ValueNoZero
- }
- return sizeFixed32Value, appendFixed32Value
- case "varint":
- if pointer {
- return sizeVarint32Ptr, appendVarint32Ptr
- }
- if slice {
- if packed {
- return sizeVarint32PackedSlice, appendVarint32PackedSlice
- }
- return sizeVarint32Slice, appendVarint32Slice
- }
- if nozero {
- return sizeVarint32ValueNoZero, appendVarint32ValueNoZero
- }
- return sizeVarint32Value, appendVarint32Value
- }
- case reflect.Int32:
- switch encoding {
- case "fixed32":
- if pointer {
- return sizeFixedS32Ptr, appendFixedS32Ptr
- }
- if slice {
- if packed {
- return sizeFixedS32PackedSlice, appendFixedS32PackedSlice
- }
- return sizeFixedS32Slice, appendFixedS32Slice
- }
- if nozero {
- return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero
- }
- return sizeFixedS32Value, appendFixedS32Value
- case "varint":
- if pointer {
- return sizeVarintS32Ptr, appendVarintS32Ptr
- }
- if slice {
- if packed {
- return sizeVarintS32PackedSlice, appendVarintS32PackedSlice
- }
- return sizeVarintS32Slice, appendVarintS32Slice
- }
- if nozero {
- return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero
- }
- return sizeVarintS32Value, appendVarintS32Value
- case "zigzag32":
- if pointer {
- return sizeZigzag32Ptr, appendZigzag32Ptr
- }
- if slice {
- if packed {
- return sizeZigzag32PackedSlice, appendZigzag32PackedSlice
- }
- return sizeZigzag32Slice, appendZigzag32Slice
- }
- if nozero {
- return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero
- }
- return sizeZigzag32Value, appendZigzag32Value
- }
- case reflect.Uint64:
- switch encoding {
- case "fixed64":
- if pointer {
- return sizeFixed64Ptr, appendFixed64Ptr
- }
- if slice {
- if packed {
- return sizeFixed64PackedSlice, appendFixed64PackedSlice
- }
- return sizeFixed64Slice, appendFixed64Slice
- }
- if nozero {
- return sizeFixed64ValueNoZero, appendFixed64ValueNoZero
- }
- return sizeFixed64Value, appendFixed64Value
- case "varint":
- if pointer {
- return sizeVarint64Ptr, appendVarint64Ptr
- }
- if slice {
- if packed {
- return sizeVarint64PackedSlice, appendVarint64PackedSlice
- }
- return sizeVarint64Slice, appendVarint64Slice
- }
- if nozero {
- return sizeVarint64ValueNoZero, appendVarint64ValueNoZero
- }
- return sizeVarint64Value, appendVarint64Value
- }
- case reflect.Int64:
- switch encoding {
- case "fixed64":
- if pointer {
- return sizeFixedS64Ptr, appendFixedS64Ptr
- }
- if slice {
- if packed {
- return sizeFixedS64PackedSlice, appendFixedS64PackedSlice
- }
- return sizeFixedS64Slice, appendFixedS64Slice
- }
- if nozero {
- return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero
- }
- return sizeFixedS64Value, appendFixedS64Value
- case "varint":
- if pointer {
- return sizeVarintS64Ptr, appendVarintS64Ptr
- }
- if slice {
- if packed {
- return sizeVarintS64PackedSlice, appendVarintS64PackedSlice
- }
- return sizeVarintS64Slice, appendVarintS64Slice
- }
- if nozero {
- return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero
- }
- return sizeVarintS64Value, appendVarintS64Value
- case "zigzag64":
- if pointer {
- return sizeZigzag64Ptr, appendZigzag64Ptr
- }
- if slice {
- if packed {
- return sizeZigzag64PackedSlice, appendZigzag64PackedSlice
- }
- return sizeZigzag64Slice, appendZigzag64Slice
- }
- if nozero {
- return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero
- }
- return sizeZigzag64Value, appendZigzag64Value
- }
- case reflect.Float32:
- if pointer {
- return sizeFloat32Ptr, appendFloat32Ptr
- }
- if slice {
- if packed {
- return sizeFloat32PackedSlice, appendFloat32PackedSlice
- }
- return sizeFloat32Slice, appendFloat32Slice
- }
- if nozero {
- return sizeFloat32ValueNoZero, appendFloat32ValueNoZero
- }
- return sizeFloat32Value, appendFloat32Value
- case reflect.Float64:
- if pointer {
- return sizeFloat64Ptr, appendFloat64Ptr
- }
- if slice {
- if packed {
- return sizeFloat64PackedSlice, appendFloat64PackedSlice
- }
- return sizeFloat64Slice, appendFloat64Slice
- }
- if nozero {
- return sizeFloat64ValueNoZero, appendFloat64ValueNoZero
- }
- return sizeFloat64Value, appendFloat64Value
- case reflect.String:
- if validateUTF8 {
- if pointer {
- return sizeStringPtr, appendUTF8StringPtr
- }
- if slice {
- return sizeStringSlice, appendUTF8StringSlice
- }
- if nozero {
- return sizeStringValueNoZero, appendUTF8StringValueNoZero
- }
- return sizeStringValue, appendUTF8StringValue
- }
- if pointer {
- return sizeStringPtr, appendStringPtr
- }
- if slice {
- return sizeStringSlice, appendStringSlice
- }
- if nozero {
- return sizeStringValueNoZero, appendStringValueNoZero
- }
- return sizeStringValue, appendStringValue
- case reflect.Slice:
- if slice {
- return sizeBytesSlice, appendBytesSlice
- }
- if oneof {
- // Oneof bytes field may also have "proto3" tag.
- // We want to marshal it as a oneof field. Do this
- // check before the proto3 check.
- return sizeBytesOneof, appendBytesOneof
- }
- if proto3 {
- return sizeBytes3, appendBytes3
- }
- return sizeBytes, appendBytes
- case reflect.Struct:
- switch encoding {
- case "group":
- if slice {
- return makeGroupSliceMarshaler(getMarshalInfo(t))
- }
- return makeGroupMarshaler(getMarshalInfo(t))
- case "bytes":
- if slice {
- return makeMessageSliceMarshaler(getMarshalInfo(t))
- }
- return makeMessageMarshaler(getMarshalInfo(t))
- }
- }
- panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding))
-}
-
-// Below are functions to size/marshal a specific type of a field.
-// They are stored in the field's info, and called by function pointers.
-// They have type sizer or marshaler.
-
-func sizeFixed32Value(_ pointer, tagsize int) int {
- return 4 + tagsize
-}
-func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toUint32()
- if v == 0 {
- return 0
- }
- return 4 + tagsize
-}
-func sizeFixed32Ptr(ptr pointer, tagsize int) int {
- p := *ptr.toUint32Ptr()
- if p == nil {
- return 0
- }
- return 4 + tagsize
-}
-func sizeFixed32Slice(ptr pointer, tagsize int) int {
- s := *ptr.toUint32Slice()
- return (4 + tagsize) * len(s)
-}
-func sizeFixed32PackedSlice(ptr pointer, tagsize int) int {
- s := *ptr.toUint32Slice()
- if len(s) == 0 {
- return 0
- }
- return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
-}
-func sizeFixedS32Value(_ pointer, tagsize int) int {
- return 4 + tagsize
-}
-func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toInt32()
- if v == 0 {
- return 0
- }
- return 4 + tagsize
-}
-func sizeFixedS32Ptr(ptr pointer, tagsize int) int {
- p := ptr.getInt32Ptr()
- if p == nil {
- return 0
- }
- return 4 + tagsize
-}
-func sizeFixedS32Slice(ptr pointer, tagsize int) int {
- s := ptr.getInt32Slice()
- return (4 + tagsize) * len(s)
-}
-func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int {
- s := ptr.getInt32Slice()
- if len(s) == 0 {
- return 0
- }
- return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
-}
-func sizeFloat32Value(_ pointer, tagsize int) int {
- return 4 + tagsize
-}
-func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int {
- v := math.Float32bits(*ptr.toFloat32())
- if v == 0 {
- return 0
- }
- return 4 + tagsize
-}
-func sizeFloat32Ptr(ptr pointer, tagsize int) int {
- p := *ptr.toFloat32Ptr()
- if p == nil {
- return 0
- }
- return 4 + tagsize
-}
-func sizeFloat32Slice(ptr pointer, tagsize int) int {
- s := *ptr.toFloat32Slice()
- return (4 + tagsize) * len(s)
-}
-func sizeFloat32PackedSlice(ptr pointer, tagsize int) int {
- s := *ptr.toFloat32Slice()
- if len(s) == 0 {
- return 0
- }
- return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
-}
-func sizeFixed64Value(_ pointer, tagsize int) int {
- return 8 + tagsize
-}
-func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toUint64()
- if v == 0 {
- return 0
- }
- return 8 + tagsize
-}
-func sizeFixed64Ptr(ptr pointer, tagsize int) int {
- p := *ptr.toUint64Ptr()
- if p == nil {
- return 0
- }
- return 8 + tagsize
-}
-func sizeFixed64Slice(ptr pointer, tagsize int) int {
- s := *ptr.toUint64Slice()
- return (8 + tagsize) * len(s)
-}
-func sizeFixed64PackedSlice(ptr pointer, tagsize int) int {
- s := *ptr.toUint64Slice()
- if len(s) == 0 {
- return 0
- }
- return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
-}
-func sizeFixedS64Value(_ pointer, tagsize int) int {
- return 8 + tagsize
-}
-func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toInt64()
- if v == 0 {
- return 0
- }
- return 8 + tagsize
-}
-func sizeFixedS64Ptr(ptr pointer, tagsize int) int {
- p := *ptr.toInt64Ptr()
- if p == nil {
- return 0
- }
- return 8 + tagsize
-}
-func sizeFixedS64Slice(ptr pointer, tagsize int) int {
- s := *ptr.toInt64Slice()
- return (8 + tagsize) * len(s)
-}
-func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int {
- s := *ptr.toInt64Slice()
- if len(s) == 0 {
- return 0
- }
- return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
-}
-func sizeFloat64Value(_ pointer, tagsize int) int {
- return 8 + tagsize
-}
-func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int {
- v := math.Float64bits(*ptr.toFloat64())
- if v == 0 {
- return 0
- }
- return 8 + tagsize
-}
-func sizeFloat64Ptr(ptr pointer, tagsize int) int {
- p := *ptr.toFloat64Ptr()
- if p == nil {
- return 0
- }
- return 8 + tagsize
-}
-func sizeFloat64Slice(ptr pointer, tagsize int) int {
- s := *ptr.toFloat64Slice()
- return (8 + tagsize) * len(s)
-}
-func sizeFloat64PackedSlice(ptr pointer, tagsize int) int {
- s := *ptr.toFloat64Slice()
- if len(s) == 0 {
- return 0
- }
- return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
-}
-func sizeVarint32Value(ptr pointer, tagsize int) int {
- v := *ptr.toUint32()
- return SizeVarint(uint64(v)) + tagsize
-}
-func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toUint32()
- if v == 0 {
- return 0
- }
- return SizeVarint(uint64(v)) + tagsize
-}
-func sizeVarint32Ptr(ptr pointer, tagsize int) int {
- p := *ptr.toUint32Ptr()
- if p == nil {
- return 0
- }
- return SizeVarint(uint64(*p)) + tagsize
-}
-func sizeVarint32Slice(ptr pointer, tagsize int) int {
- s := *ptr.toUint32Slice()
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v)) + tagsize
- }
- return n
-}
-func sizeVarint32PackedSlice(ptr pointer, tagsize int) int {
- s := *ptr.toUint32Slice()
- if len(s) == 0 {
- return 0
- }
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v))
- }
- return n + SizeVarint(uint64(n)) + tagsize
-}
-func sizeVarintS32Value(ptr pointer, tagsize int) int {
- v := *ptr.toInt32()
- return SizeVarint(uint64(v)) + tagsize
-}
-func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toInt32()
- if v == 0 {
- return 0
- }
- return SizeVarint(uint64(v)) + tagsize
-}
-func sizeVarintS32Ptr(ptr pointer, tagsize int) int {
- p := ptr.getInt32Ptr()
- if p == nil {
- return 0
- }
- return SizeVarint(uint64(*p)) + tagsize
-}
-func sizeVarintS32Slice(ptr pointer, tagsize int) int {
- s := ptr.getInt32Slice()
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v)) + tagsize
- }
- return n
-}
-func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int {
- s := ptr.getInt32Slice()
- if len(s) == 0 {
- return 0
- }
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v))
- }
- return n + SizeVarint(uint64(n)) + tagsize
-}
-func sizeVarint64Value(ptr pointer, tagsize int) int {
- v := *ptr.toUint64()
- return SizeVarint(v) + tagsize
-}
-func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toUint64()
- if v == 0 {
- return 0
- }
- return SizeVarint(v) + tagsize
-}
-func sizeVarint64Ptr(ptr pointer, tagsize int) int {
- p := *ptr.toUint64Ptr()
- if p == nil {
- return 0
- }
- return SizeVarint(*p) + tagsize
-}
-func sizeVarint64Slice(ptr pointer, tagsize int) int {
- s := *ptr.toUint64Slice()
- n := 0
- for _, v := range s {
- n += SizeVarint(v) + tagsize
- }
- return n
-}
-func sizeVarint64PackedSlice(ptr pointer, tagsize int) int {
- s := *ptr.toUint64Slice()
- if len(s) == 0 {
- return 0
- }
- n := 0
- for _, v := range s {
- n += SizeVarint(v)
- }
- return n + SizeVarint(uint64(n)) + tagsize
-}
-func sizeVarintS64Value(ptr pointer, tagsize int) int {
- v := *ptr.toInt64()
- return SizeVarint(uint64(v)) + tagsize
-}
-func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toInt64()
- if v == 0 {
- return 0
- }
- return SizeVarint(uint64(v)) + tagsize
-}
-func sizeVarintS64Ptr(ptr pointer, tagsize int) int {
- p := *ptr.toInt64Ptr()
- if p == nil {
- return 0
- }
- return SizeVarint(uint64(*p)) + tagsize
-}
-func sizeVarintS64Slice(ptr pointer, tagsize int) int {
- s := *ptr.toInt64Slice()
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v)) + tagsize
- }
- return n
-}
-func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int {
- s := *ptr.toInt64Slice()
- if len(s) == 0 {
- return 0
- }
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v))
- }
- return n + SizeVarint(uint64(n)) + tagsize
-}
-func sizeZigzag32Value(ptr pointer, tagsize int) int {
- v := *ptr.toInt32()
- return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
-}
-func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toInt32()
- if v == 0 {
- return 0
- }
- return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
-}
-func sizeZigzag32Ptr(ptr pointer, tagsize int) int {
- p := ptr.getInt32Ptr()
- if p == nil {
- return 0
- }
- v := *p
- return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
-}
-func sizeZigzag32Slice(ptr pointer, tagsize int) int {
- s := ptr.getInt32Slice()
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
- }
- return n
-}
-func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int {
- s := ptr.getInt32Slice()
- if len(s) == 0 {
- return 0
- }
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
- }
- return n + SizeVarint(uint64(n)) + tagsize
-}
-func sizeZigzag64Value(ptr pointer, tagsize int) int {
- v := *ptr.toInt64()
- return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
-}
-func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toInt64()
- if v == 0 {
- return 0
- }
- return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
-}
-func sizeZigzag64Ptr(ptr pointer, tagsize int) int {
- p := *ptr.toInt64Ptr()
- if p == nil {
- return 0
- }
- v := *p
- return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
-}
-func sizeZigzag64Slice(ptr pointer, tagsize int) int {
- s := *ptr.toInt64Slice()
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
- }
- return n
-}
-func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int {
- s := *ptr.toInt64Slice()
- if len(s) == 0 {
- return 0
- }
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63)))
- }
- return n + SizeVarint(uint64(n)) + tagsize
-}
-func sizeBoolValue(_ pointer, tagsize int) int {
- return 1 + tagsize
-}
-func sizeBoolValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toBool()
- if !v {
- return 0
- }
- return 1 + tagsize
-}
-func sizeBoolPtr(ptr pointer, tagsize int) int {
- p := *ptr.toBoolPtr()
- if p == nil {
- return 0
- }
- return 1 + tagsize
-}
-func sizeBoolSlice(ptr pointer, tagsize int) int {
- s := *ptr.toBoolSlice()
- return (1 + tagsize) * len(s)
-}
-func sizeBoolPackedSlice(ptr pointer, tagsize int) int {
- s := *ptr.toBoolSlice()
- if len(s) == 0 {
- return 0
- }
- return len(s) + SizeVarint(uint64(len(s))) + tagsize
-}
-func sizeStringValue(ptr pointer, tagsize int) int {
- v := *ptr.toString()
- return len(v) + SizeVarint(uint64(len(v))) + tagsize
-}
-func sizeStringValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toString()
- if v == "" {
- return 0
- }
- return len(v) + SizeVarint(uint64(len(v))) + tagsize
-}
-func sizeStringPtr(ptr pointer, tagsize int) int {
- p := *ptr.toStringPtr()
- if p == nil {
- return 0
- }
- v := *p
- return len(v) + SizeVarint(uint64(len(v))) + tagsize
-}
-func sizeStringSlice(ptr pointer, tagsize int) int {
- s := *ptr.toStringSlice()
- n := 0
- for _, v := range s {
- n += len(v) + SizeVarint(uint64(len(v))) + tagsize
- }
- return n
-}
-func sizeBytes(ptr pointer, tagsize int) int {
- v := *ptr.toBytes()
- if v == nil {
- return 0
- }
- return len(v) + SizeVarint(uint64(len(v))) + tagsize
-}
-func sizeBytes3(ptr pointer, tagsize int) int {
- v := *ptr.toBytes()
- if len(v) == 0 {
- return 0
- }
- return len(v) + SizeVarint(uint64(len(v))) + tagsize
-}
-func sizeBytesOneof(ptr pointer, tagsize int) int {
- v := *ptr.toBytes()
- return len(v) + SizeVarint(uint64(len(v))) + tagsize
-}
-func sizeBytesSlice(ptr pointer, tagsize int) int {
- s := *ptr.toBytesSlice()
- n := 0
- for _, v := range s {
- n += len(v) + SizeVarint(uint64(len(v))) + tagsize
- }
- return n
-}
-
-// appendFixed32 appends an encoded fixed32 to b.
-func appendFixed32(b []byte, v uint32) []byte {
- b = append(b,
- byte(v),
- byte(v>>8),
- byte(v>>16),
- byte(v>>24))
- return b
-}
-
-// appendFixed64 appends an encoded fixed64 to b.
-func appendFixed64(b []byte, v uint64) []byte {
- b = append(b,
- byte(v),
- byte(v>>8),
- byte(v>>16),
- byte(v>>24),
- byte(v>>32),
- byte(v>>40),
- byte(v>>48),
- byte(v>>56))
- return b
-}
-
-// appendVarint appends an encoded varint to b.
-func appendVarint(b []byte, v uint64) []byte {
- // TODO: make 1-byte (maybe 2-byte) case inline-able, once we
- // have non-leaf inliner.
- switch {
- case v < 1<<7:
- b = append(b, byte(v))
- case v < 1<<14:
- b = append(b,
- byte(v&0x7f|0x80),
- byte(v>>7))
- case v < 1<<21:
- b = append(b,
- byte(v&0x7f|0x80),
- byte((v>>7)&0x7f|0x80),
- byte(v>>14))
- case v < 1<<28:
- b = append(b,
- byte(v&0x7f|0x80),
- byte((v>>7)&0x7f|0x80),
- byte((v>>14)&0x7f|0x80),
- byte(v>>21))
- case v < 1<<35:
- b = append(b,
- byte(v&0x7f|0x80),
- byte((v>>7)&0x7f|0x80),
- byte((v>>14)&0x7f|0x80),
- byte((v>>21)&0x7f|0x80),
- byte(v>>28))
- case v < 1<<42:
- b = append(b,
- byte(v&0x7f|0x80),
- byte((v>>7)&0x7f|0x80),
- byte((v>>14)&0x7f|0x80),
- byte((v>>21)&0x7f|0x80),
- byte((v>>28)&0x7f|0x80),
- byte(v>>35))
- case v < 1<<49:
- b = append(b,
- byte(v&0x7f|0x80),
- byte((v>>7)&0x7f|0x80),
- byte((v>>14)&0x7f|0x80),
- byte((v>>21)&0x7f|0x80),
- byte((v>>28)&0x7f|0x80),
- byte((v>>35)&0x7f|0x80),
- byte(v>>42))
- case v < 1<<56:
- b = append(b,
- byte(v&0x7f|0x80),
- byte((v>>7)&0x7f|0x80),
- byte((v>>14)&0x7f|0x80),
- byte((v>>21)&0x7f|0x80),
- byte((v>>28)&0x7f|0x80),
- byte((v>>35)&0x7f|0x80),
- byte((v>>42)&0x7f|0x80),
- byte(v>>49))
- case v < 1<<63:
- b = append(b,
- byte(v&0x7f|0x80),
- byte((v>>7)&0x7f|0x80),
- byte((v>>14)&0x7f|0x80),
- byte((v>>21)&0x7f|0x80),
- byte((v>>28)&0x7f|0x80),
- byte((v>>35)&0x7f|0x80),
- byte((v>>42)&0x7f|0x80),
- byte((v>>49)&0x7f|0x80),
- byte(v>>56))
- default:
- b = append(b,
- byte(v&0x7f|0x80),
- byte((v>>7)&0x7f|0x80),
- byte((v>>14)&0x7f|0x80),
- byte((v>>21)&0x7f|0x80),
- byte((v>>28)&0x7f|0x80),
- byte((v>>35)&0x7f|0x80),
- byte((v>>42)&0x7f|0x80),
- byte((v>>49)&0x7f|0x80),
- byte((v>>56)&0x7f|0x80),
- 1)
- }
- return b
-}
-
-func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toUint32()
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, v)
- return b, nil
-}
-func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toUint32()
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, v)
- return b, nil
-}
-func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := *ptr.toUint32Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, *p)
- return b, nil
-}
-func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toUint32Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, v)
- }
- return b, nil
-}
-func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toUint32Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- b = appendVarint(b, uint64(4*len(s)))
- for _, v := range s {
- b = appendFixed32(b, v)
- }
- return b, nil
-}
-func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt32()
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, uint32(v))
- return b, nil
-}
-func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt32()
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, uint32(v))
- return b, nil
-}
-func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := ptr.getInt32Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, uint32(*p))
- return b, nil
-}
-func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := ptr.getInt32Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, uint32(v))
- }
- return b, nil
-}
-func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := ptr.getInt32Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- b = appendVarint(b, uint64(4*len(s)))
- for _, v := range s {
- b = appendFixed32(b, uint32(v))
- }
- return b, nil
-}
-func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := math.Float32bits(*ptr.toFloat32())
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, v)
- return b, nil
-}
-func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := math.Float32bits(*ptr.toFloat32())
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, v)
- return b, nil
-}
-func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := *ptr.toFloat32Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, math.Float32bits(*p))
- return b, nil
-}
-func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toFloat32Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, math.Float32bits(v))
- }
- return b, nil
-}
-func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toFloat32Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- b = appendVarint(b, uint64(4*len(s)))
- for _, v := range s {
- b = appendFixed32(b, math.Float32bits(v))
- }
- return b, nil
-}
-func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toUint64()
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, v)
- return b, nil
-}
-func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toUint64()
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, v)
- return b, nil
-}
-func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := *ptr.toUint64Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, *p)
- return b, nil
-}
-func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toUint64Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, v)
- }
- return b, nil
-}
-func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toUint64Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- b = appendVarint(b, uint64(8*len(s)))
- for _, v := range s {
- b = appendFixed64(b, v)
- }
- return b, nil
-}
-func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt64()
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, uint64(v))
- return b, nil
-}
-func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt64()
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, uint64(v))
- return b, nil
-}
-func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := *ptr.toInt64Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, uint64(*p))
- return b, nil
-}
-func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toInt64Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, uint64(v))
- }
- return b, nil
-}
-func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toInt64Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- b = appendVarint(b, uint64(8*len(s)))
- for _, v := range s {
- b = appendFixed64(b, uint64(v))
- }
- return b, nil
-}
-func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := math.Float64bits(*ptr.toFloat64())
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, v)
- return b, nil
-}
-func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := math.Float64bits(*ptr.toFloat64())
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, v)
- return b, nil
-}
-func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := *ptr.toFloat64Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, math.Float64bits(*p))
- return b, nil
-}
-func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toFloat64Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, math.Float64bits(v))
- }
- return b, nil
-}
-func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toFloat64Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- b = appendVarint(b, uint64(8*len(s)))
- for _, v := range s {
- b = appendFixed64(b, math.Float64bits(v))
- }
- return b, nil
-}
-func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toUint32()
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v))
- return b, nil
-}
-func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toUint32()
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v))
- return b, nil
-}
-func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := *ptr.toUint32Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(*p))
- return b, nil
-}
-func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toUint32Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v))
- }
- return b, nil
-}
-func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toUint32Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- // compute size
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v))
- }
- b = appendVarint(b, uint64(n))
- for _, v := range s {
- b = appendVarint(b, uint64(v))
- }
- return b, nil
-}
-func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt32()
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v))
- return b, nil
-}
-func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt32()
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v))
- return b, nil
-}
-func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := ptr.getInt32Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(*p))
- return b, nil
-}
-func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := ptr.getInt32Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v))
- }
- return b, nil
-}
-func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := ptr.getInt32Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- // compute size
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v))
- }
- b = appendVarint(b, uint64(n))
- for _, v := range s {
- b = appendVarint(b, uint64(v))
- }
- return b, nil
-}
-func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toUint64()
- b = appendVarint(b, wiretag)
- b = appendVarint(b, v)
- return b, nil
-}
-func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toUint64()
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, v)
- return b, nil
-}
-func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := *ptr.toUint64Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, *p)
- return b, nil
-}
-func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toUint64Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendVarint(b, v)
- }
- return b, nil
-}
-func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toUint64Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- // compute size
- n := 0
- for _, v := range s {
- n += SizeVarint(v)
- }
- b = appendVarint(b, uint64(n))
- for _, v := range s {
- b = appendVarint(b, v)
- }
- return b, nil
-}
-func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt64()
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v))
- return b, nil
-}
-func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt64()
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v))
- return b, nil
-}
-func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := *ptr.toInt64Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(*p))
- return b, nil
-}
-func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toInt64Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v))
- }
- return b, nil
-}
-func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toInt64Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- // compute size
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v))
- }
- b = appendVarint(b, uint64(n))
- for _, v := range s {
- b = appendVarint(b, uint64(v))
- }
- return b, nil
-}
-func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt32()
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
- return b, nil
-}
-func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt32()
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
- return b, nil
-}
-func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := ptr.getInt32Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- v := *p
- b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
- return b, nil
-}
-func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := ptr.getInt32Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
- }
- return b, nil
-}
-func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := ptr.getInt32Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- // compute size
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
- }
- b = appendVarint(b, uint64(n))
- for _, v := range s {
- b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
- }
- return b, nil
-}
-func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt64()
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
- return b, nil
-}
-func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt64()
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
- return b, nil
-}
-func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := *ptr.toInt64Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- v := *p
- b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
- return b, nil
-}
-func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toInt64Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
- }
- return b, nil
-}
-func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toInt64Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- // compute size
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63)))
- }
- b = appendVarint(b, uint64(n))
- for _, v := range s {
- b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
- }
- return b, nil
-}
-func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toBool()
- b = appendVarint(b, wiretag)
- if v {
- b = append(b, 1)
- } else {
- b = append(b, 0)
- }
- return b, nil
-}
-func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toBool()
- if !v {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = append(b, 1)
- return b, nil
-}
-
-func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := *ptr.toBoolPtr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- if *p {
- b = append(b, 1)
- } else {
- b = append(b, 0)
- }
- return b, nil
-}
-func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toBoolSlice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- if v {
- b = append(b, 1)
- } else {
- b = append(b, 0)
- }
- }
- return b, nil
-}
-func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toBoolSlice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- b = appendVarint(b, uint64(len(s)))
- for _, v := range s {
- if v {
- b = append(b, 1)
- } else {
- b = append(b, 0)
- }
- }
- return b, nil
-}
-func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toString()
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- return b, nil
-}
-func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toString()
- if v == "" {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- return b, nil
-}
-func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := *ptr.toStringPtr()
- if p == nil {
- return b, nil
- }
- v := *p
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- return b, nil
-}
-func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toStringSlice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- }
- return b, nil
-}
-func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- var invalidUTF8 bool
- v := *ptr.toString()
- if !utf8.ValidString(v) {
- invalidUTF8 = true
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- if invalidUTF8 {
- return b, errInvalidUTF8
- }
- return b, nil
-}
-func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- var invalidUTF8 bool
- v := *ptr.toString()
- if v == "" {
- return b, nil
- }
- if !utf8.ValidString(v) {
- invalidUTF8 = true
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- if invalidUTF8 {
- return b, errInvalidUTF8
- }
- return b, nil
-}
-func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- var invalidUTF8 bool
- p := *ptr.toStringPtr()
- if p == nil {
- return b, nil
- }
- v := *p
- if !utf8.ValidString(v) {
- invalidUTF8 = true
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- if invalidUTF8 {
- return b, errInvalidUTF8
- }
- return b, nil
-}
-func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- var invalidUTF8 bool
- s := *ptr.toStringSlice()
- for _, v := range s {
- if !utf8.ValidString(v) {
- invalidUTF8 = true
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- }
- if invalidUTF8 {
- return b, errInvalidUTF8
- }
- return b, nil
-}
-func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toBytes()
- if v == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- return b, nil
-}
-func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toBytes()
- if len(v) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- return b, nil
-}
-func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toBytes()
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- return b, nil
-}
-func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toBytesSlice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- }
- return b, nil
-}
-
-// makeGroupMarshaler returns the sizer and marshaler for a group.
-// u is the marshal info of the underlying message.
-func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- p := ptr.getPointer()
- if p.isNil() {
- return 0
- }
- return u.size(p) + 2*tagsize
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- p := ptr.getPointer()
- if p.isNil() {
- return b, nil
- }
- var err error
- b = appendVarint(b, wiretag) // start group
- b, err = u.marshal(b, p, deterministic)
- b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
- return b, err
- }
-}
-
-// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice.
-// u is the marshal info of the underlying message.
-func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- s := ptr.getPointerSlice()
- n := 0
- for _, v := range s {
- if v.isNil() {
- continue
- }
- n += u.size(v) + 2*tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- s := ptr.getPointerSlice()
- var err error
- var nerr nonFatal
- for _, v := range s {
- if v.isNil() {
- return b, errRepeatedHasNil
- }
- b = appendVarint(b, wiretag) // start group
- b, err = u.marshal(b, v, deterministic)
- b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
- if !nerr.Merge(err) {
- if err == ErrNil {
- err = errRepeatedHasNil
- }
- return b, err
- }
- }
- return b, nerr.E
- }
-}
-
-// makeMessageMarshaler returns the sizer and marshaler for a message field.
-// u is the marshal info of the message.
-func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- p := ptr.getPointer()
- if p.isNil() {
- return 0
- }
- siz := u.size(p)
- return siz + SizeVarint(uint64(siz)) + tagsize
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- p := ptr.getPointer()
- if p.isNil() {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- siz := u.cachedsize(p)
- b = appendVarint(b, uint64(siz))
- return u.marshal(b, p, deterministic)
- }
-}
-
-// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice.
-// u is the marshal info of the message.
-func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- s := ptr.getPointerSlice()
- n := 0
- for _, v := range s {
- if v.isNil() {
- continue
- }
- siz := u.size(v)
- n += siz + SizeVarint(uint64(siz)) + tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- s := ptr.getPointerSlice()
- var err error
- var nerr nonFatal
- for _, v := range s {
- if v.isNil() {
- return b, errRepeatedHasNil
- }
- b = appendVarint(b, wiretag)
- siz := u.cachedsize(v)
- b = appendVarint(b, uint64(siz))
- b, err = u.marshal(b, v, deterministic)
-
- if !nerr.Merge(err) {
- if err == ErrNil {
- err = errRepeatedHasNil
- }
- return b, err
- }
- }
- return b, nerr.E
- }
-}
-
-// makeMapMarshaler returns the sizer and marshaler for a map field.
-// f is the pointer to the reflect data structure of the field.
-func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) {
- // figure out key and value type
- t := f.Type
- keyType := t.Key()
- valType := t.Elem()
- keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",")
- valTags := strings.Split(f.Tag.Get("protobuf_val"), ",")
- keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map
- valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map
- keyWireTag := 1<<3 | wiretype(keyTags[0])
- valWireTag := 2<<3 | wiretype(valTags[0])
-
- // We create an interface to get the addresses of the map key and value.
- // If value is pointer-typed, the interface is a direct interface, the
- // idata itself is the value. Otherwise, the idata is the pointer to the
- // value.
- // Key cannot be pointer-typed.
- valIsPtr := valType.Kind() == reflect.Ptr
-
- // If value is a message with nested maps, calling
- // valSizer in marshal may be quadratic. We should use
- // cached version in marshal (but not in size).
- // If value is not message type, we don't have size cache,
- // but it cannot be nested either. Just use valSizer.
- valCachedSizer := valSizer
- if valIsPtr && valType.Elem().Kind() == reflect.Struct {
- u := getMarshalInfo(valType.Elem())
- valCachedSizer = func(ptr pointer, tagsize int) int {
- // Same as message sizer, but use cache.
- p := ptr.getPointer()
- if p.isNil() {
- return 0
- }
- siz := u.cachedsize(p)
- return siz + SizeVarint(uint64(siz)) + tagsize
- }
- }
- return func(ptr pointer, tagsize int) int {
- m := ptr.asPointerTo(t).Elem() // the map
- n := 0
- for _, k := range m.MapKeys() {
- ki := k.Interface()
- vi := m.MapIndex(k).Interface()
- kaddr := toAddrPointer(&ki, false, false) // pointer to key
- vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value
- siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
- n += siz + SizeVarint(uint64(siz)) + tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) {
- m := ptr.asPointerTo(t).Elem() // the map
- var err error
- keys := m.MapKeys()
- if len(keys) > 1 && deterministic {
- sort.Sort(mapKeys(keys))
- }
-
- var nerr nonFatal
- for _, k := range keys {
- ki := k.Interface()
- vi := m.MapIndex(k).Interface()
- kaddr := toAddrPointer(&ki, false, false) // pointer to key
- vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value
- b = appendVarint(b, tag)
- siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
- b = appendVarint(b, uint64(siz))
- b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic)
- if !nerr.Merge(err) {
- return b, err
- }
- b, err = valMarshaler(b, vaddr, valWireTag, deterministic)
- if err != ErrNil && !nerr.Merge(err) { // allow nil value in map
- return b, err
- }
- }
- return b, nerr.E
- }
-}
-
-// makeOneOfMarshaler returns the sizer and marshaler for a oneof field.
-// fi is the marshal info of the field.
-// f is the pointer to the reflect data structure of the field.
-func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) {
- // Oneof field is an interface. We need to get the actual data type on the fly.
- t := f.Type
- return func(ptr pointer, _ int) int {
- p := ptr.getInterfacePointer()
- if p.isNil() {
- return 0
- }
- v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct
- telem := v.Type()
- e := fi.oneofElems[telem]
- return e.sizer(p, e.tagsize)
- },
- func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) {
- p := ptr.getInterfacePointer()
- if p.isNil() {
- return b, nil
- }
- v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct
- telem := v.Type()
- if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() {
- return b, errOneofHasNil
- }
- e := fi.oneofElems[telem]
- return e.marshaler(b, p, e.wiretag, deterministic)
- }
-}
-
-// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field.
-func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int {
- m, mu := ext.extensionsRead()
- if m == nil {
- return 0
- }
- mu.Lock()
-
- n := 0
- for _, e := range m {
- if e.value == nil || e.desc == nil {
- // Extension is only in its encoded form.
- n += len(e.enc)
- continue
- }
-
- // We don't skip extensions that have an encoded form set,
- // because the extension value may have been mutated after
- // the last time this function was called.
- ei := u.getExtElemInfo(e.desc)
- v := e.value
- p := toAddrPointer(&v, ei.isptr, ei.deref)
- n += ei.sizer(p, ei.tagsize)
- }
- mu.Unlock()
- return n
-}
-
-// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b.
-func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) {
- m, mu := ext.extensionsRead()
- if m == nil {
- return b, nil
- }
- mu.Lock()
- defer mu.Unlock()
-
- var err error
- var nerr nonFatal
-
- // Fast-path for common cases: zero or one extensions.
- // Don't bother sorting the keys.
- if len(m) <= 1 {
- for _, e := range m {
- if e.value == nil || e.desc == nil {
- // Extension is only in its encoded form.
- b = append(b, e.enc...)
- continue
- }
-
- // We don't skip extensions that have an encoded form set,
- // because the extension value may have been mutated after
- // the last time this function was called.
-
- ei := u.getExtElemInfo(e.desc)
- v := e.value
- p := toAddrPointer(&v, ei.isptr, ei.deref)
- b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
- if !nerr.Merge(err) {
- return b, err
- }
- }
- return b, nerr.E
- }
-
- // Sort the keys to provide a deterministic encoding.
- // Not sure this is required, but the old code does it.
- keys := make([]int, 0, len(m))
- for k := range m {
- keys = append(keys, int(k))
- }
- sort.Ints(keys)
-
- for _, k := range keys {
- e := m[int32(k)]
- if e.value == nil || e.desc == nil {
- // Extension is only in its encoded form.
- b = append(b, e.enc...)
- continue
- }
-
- // We don't skip extensions that have an encoded form set,
- // because the extension value may have been mutated after
- // the last time this function was called.
-
- ei := u.getExtElemInfo(e.desc)
- v := e.value
- p := toAddrPointer(&v, ei.isptr, ei.deref)
- b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
- if !nerr.Merge(err) {
- return b, err
- }
- }
- return b, nerr.E
-}
-
-// message set format is:
-// message MessageSet {
-// repeated group Item = 1 {
-// required int32 type_id = 2;
-// required string message = 3;
-// };
-// }
-
-// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field
-// in message set format (above).
-func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int {
- m, mu := ext.extensionsRead()
- if m == nil {
- return 0
- }
- mu.Lock()
-
- n := 0
- for id, e := range m {
- n += 2 // start group, end group. tag = 1 (size=1)
- n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1)
-
- if e.value == nil || e.desc == nil {
- // Extension is only in its encoded form.
- msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
- siz := len(msgWithLen)
- n += siz + 1 // message, tag = 3 (size=1)
- continue
- }
-
- // We don't skip extensions that have an encoded form set,
- // because the extension value may have been mutated after
- // the last time this function was called.
-
- ei := u.getExtElemInfo(e.desc)
- v := e.value
- p := toAddrPointer(&v, ei.isptr, ei.deref)
- n += ei.sizer(p, 1) // message, tag = 3 (size=1)
- }
- mu.Unlock()
- return n
-}
-
-// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above)
-// to the end of byte slice b.
-func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) {
- m, mu := ext.extensionsRead()
- if m == nil {
- return b, nil
- }
- mu.Lock()
- defer mu.Unlock()
-
- var err error
- var nerr nonFatal
-
- // Fast-path for common cases: zero or one extensions.
- // Don't bother sorting the keys.
- if len(m) <= 1 {
- for id, e := range m {
- b = append(b, 1<<3|WireStartGroup)
- b = append(b, 2<<3|WireVarint)
- b = appendVarint(b, uint64(id))
-
- if e.value == nil || e.desc == nil {
- // Extension is only in its encoded form.
- msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
- b = append(b, 3<<3|WireBytes)
- b = append(b, msgWithLen...)
- b = append(b, 1<<3|WireEndGroup)
- continue
- }
-
- // We don't skip extensions that have an encoded form set,
- // because the extension value may have been mutated after
- // the last time this function was called.
-
- ei := u.getExtElemInfo(e.desc)
- v := e.value
- p := toAddrPointer(&v, ei.isptr, ei.deref)
- b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
- if !nerr.Merge(err) {
- return b, err
- }
- b = append(b, 1<<3|WireEndGroup)
- }
- return b, nerr.E
- }
-
- // Sort the keys to provide a deterministic encoding.
- keys := make([]int, 0, len(m))
- for k := range m {
- keys = append(keys, int(k))
- }
- sort.Ints(keys)
-
- for _, id := range keys {
- e := m[int32(id)]
- b = append(b, 1<<3|WireStartGroup)
- b = append(b, 2<<3|WireVarint)
- b = appendVarint(b, uint64(id))
-
- if e.value == nil || e.desc == nil {
- // Extension is only in its encoded form.
- msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
- b = append(b, 3<<3|WireBytes)
- b = append(b, msgWithLen...)
- b = append(b, 1<<3|WireEndGroup)
- continue
- }
-
- // We don't skip extensions that have an encoded form set,
- // because the extension value may have been mutated after
- // the last time this function was called.
-
- ei := u.getExtElemInfo(e.desc)
- v := e.value
- p := toAddrPointer(&v, ei.isptr, ei.deref)
- b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
- b = append(b, 1<<3|WireEndGroup)
- if !nerr.Merge(err) {
- return b, err
- }
- }
- return b, nerr.E
-}
-
-// sizeV1Extensions computes the size of encoded data for a V1-API extension field.
-func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int {
- if m == nil {
- return 0
- }
-
- n := 0
- for _, e := range m {
- if e.value == nil || e.desc == nil {
- // Extension is only in its encoded form.
- n += len(e.enc)
- continue
- }
-
- // We don't skip extensions that have an encoded form set,
- // because the extension value may have been mutated after
- // the last time this function was called.
-
- ei := u.getExtElemInfo(e.desc)
- v := e.value
- p := toAddrPointer(&v, ei.isptr, ei.deref)
- n += ei.sizer(p, ei.tagsize)
- }
- return n
-}
-
-// appendV1Extensions marshals a V1-API extension field to the end of byte slice b.
-func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) {
- if m == nil {
- return b, nil
- }
-
- // Sort the keys to provide a deterministic encoding.
- keys := make([]int, 0, len(m))
- for k := range m {
- keys = append(keys, int(k))
- }
- sort.Ints(keys)
-
- var err error
- var nerr nonFatal
- for _, k := range keys {
- e := m[int32(k)]
- if e.value == nil || e.desc == nil {
- // Extension is only in its encoded form.
- b = append(b, e.enc...)
- continue
- }
-
- // We don't skip extensions that have an encoded form set,
- // because the extension value may have been mutated after
- // the last time this function was called.
-
- ei := u.getExtElemInfo(e.desc)
- v := e.value
- p := toAddrPointer(&v, ei.isptr, ei.deref)
- b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
- if !nerr.Merge(err) {
- return b, err
- }
- }
- return b, nerr.E
-}
-
-// newMarshaler is the interface representing objects that can marshal themselves.
-//
-// This exists to support protoc-gen-go generated messages.
-// The proto package will stop type-asserting to this interface in the future.
-//
-// DO NOT DEPEND ON THIS.
-type newMarshaler interface {
- XXX_Size() int
- XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
-}
-
-// Size returns the encoded size of a protocol buffer message.
-// This is the main entry point.
-func Size(pb Message) int {
- if m, ok := pb.(newMarshaler); ok {
- return m.XXX_Size()
- }
- if m, ok := pb.(Marshaler); ok {
- // If the message can marshal itself, let it do it, for compatibility.
- // NOTE: This is not efficient.
- b, _ := m.Marshal()
- return len(b)
- }
- // in case somehow we didn't generate the wrapper
- if pb == nil {
- return 0
- }
- var info InternalMessageInfo
- return info.Size(pb)
-}
-
-// Marshal takes a protocol buffer message
-// and encodes it into the wire format, returning the data.
-// This is the main entry point.
-func Marshal(pb Message) ([]byte, error) {
- if m, ok := pb.(newMarshaler); ok {
- siz := m.XXX_Size()
- b := make([]byte, 0, siz)
- return m.XXX_Marshal(b, false)
- }
- if m, ok := pb.(Marshaler); ok {
- // If the message can marshal itself, let it do it, for compatibility.
- // NOTE: This is not efficient.
- return m.Marshal()
- }
- // in case somehow we didn't generate the wrapper
- if pb == nil {
- return nil, ErrNil
- }
- var info InternalMessageInfo
- siz := info.Size(pb)
- b := make([]byte, 0, siz)
- return info.Marshal(b, pb, false)
-}
-
-// Marshal takes a protocol buffer message
-// and encodes it into the wire format, writing the result to the
-// Buffer.
-// This is an alternative entry point. It is not necessary to use
-// a Buffer for most applications.
-func (p *Buffer) Marshal(pb Message) error {
- var err error
- if m, ok := pb.(newMarshaler); ok {
- siz := m.XXX_Size()
- p.grow(siz) // make sure buf has enough capacity
- p.buf, err = m.XXX_Marshal(p.buf, p.deterministic)
- return err
- }
- if m, ok := pb.(Marshaler); ok {
- // If the message can marshal itself, let it do it, for compatibility.
- // NOTE: This is not efficient.
- b, err := m.Marshal()
- p.buf = append(p.buf, b...)
- return err
- }
- // in case somehow we didn't generate the wrapper
- if pb == nil {
- return ErrNil
- }
- var info InternalMessageInfo
- siz := info.Size(pb)
- p.grow(siz) // make sure buf has enough capacity
- p.buf, err = info.Marshal(p.buf, pb, p.deterministic)
- return err
-}
-
-// grow grows the buffer's capacity, if necessary, to guarantee space for
-// another n bytes. After grow(n), at least n bytes can be written to the
-// buffer without another allocation.
-func (p *Buffer) grow(n int) {
- need := len(p.buf) + n
- if need <= cap(p.buf) {
- return
- }
- newCap := len(p.buf) * 2
- if newCap < need {
- newCap = need
- }
- p.buf = append(make([]byte, 0, newCap), p.buf...)
-}
diff --git a/vendor/github.com/golang/protobuf/proto/table_merge.go b/vendor/github.com/golang/protobuf/proto/table_merge.go
deleted file mode 100644
index 5525def6a..000000000
--- a/vendor/github.com/golang/protobuf/proto/table_merge.go
+++ /dev/null
@@ -1,654 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2016 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-import (
- "fmt"
- "reflect"
- "strings"
- "sync"
- "sync/atomic"
-)
-
-// Merge merges the src message into dst.
-// This assumes that dst and src of the same type and are non-nil.
-func (a *InternalMessageInfo) Merge(dst, src Message) {
- mi := atomicLoadMergeInfo(&a.merge)
- if mi == nil {
- mi = getMergeInfo(reflect.TypeOf(dst).Elem())
- atomicStoreMergeInfo(&a.merge, mi)
- }
- mi.merge(toPointer(&dst), toPointer(&src))
-}
-
-type mergeInfo struct {
- typ reflect.Type
-
- initialized int32 // 0: only typ is valid, 1: everything is valid
- lock sync.Mutex
-
- fields []mergeFieldInfo
- unrecognized field // Offset of XXX_unrecognized
-}
-
-type mergeFieldInfo struct {
- field field // Offset of field, guaranteed to be valid
-
- // isPointer reports whether the value in the field is a pointer.
- // This is true for the following situations:
- // * Pointer to struct
- // * Pointer to basic type (proto2 only)
- // * Slice (first value in slice header is a pointer)
- // * String (first value in string header is a pointer)
- isPointer bool
-
- // basicWidth reports the width of the field assuming that it is directly
- // embedded in the struct (as is the case for basic types in proto3).
- // The possible values are:
- // 0: invalid
- // 1: bool
- // 4: int32, uint32, float32
- // 8: int64, uint64, float64
- basicWidth int
-
- // Where dst and src are pointers to the types being merged.
- merge func(dst, src pointer)
-}
-
-var (
- mergeInfoMap = map[reflect.Type]*mergeInfo{}
- mergeInfoLock sync.Mutex
-)
-
-func getMergeInfo(t reflect.Type) *mergeInfo {
- mergeInfoLock.Lock()
- defer mergeInfoLock.Unlock()
- mi := mergeInfoMap[t]
- if mi == nil {
- mi = &mergeInfo{typ: t}
- mergeInfoMap[t] = mi
- }
- return mi
-}
-
-// merge merges src into dst assuming they are both of type *mi.typ.
-func (mi *mergeInfo) merge(dst, src pointer) {
- if dst.isNil() {
- panic("proto: nil destination")
- }
- if src.isNil() {
- return // Nothing to do.
- }
-
- if atomic.LoadInt32(&mi.initialized) == 0 {
- mi.computeMergeInfo()
- }
-
- for _, fi := range mi.fields {
- sfp := src.offset(fi.field)
-
- // As an optimization, we can avoid the merge function call cost
- // if we know for sure that the source will have no effect
- // by checking if it is the zero value.
- if unsafeAllowed {
- if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string
- continue
- }
- if fi.basicWidth > 0 {
- switch {
- case fi.basicWidth == 1 && !*sfp.toBool():
- continue
- case fi.basicWidth == 4 && *sfp.toUint32() == 0:
- continue
- case fi.basicWidth == 8 && *sfp.toUint64() == 0:
- continue
- }
- }
- }
-
- dfp := dst.offset(fi.field)
- fi.merge(dfp, sfp)
- }
-
- // TODO: Make this faster?
- out := dst.asPointerTo(mi.typ).Elem()
- in := src.asPointerTo(mi.typ).Elem()
- if emIn, err := extendable(in.Addr().Interface()); err == nil {
- emOut, _ := extendable(out.Addr().Interface())
- mIn, muIn := emIn.extensionsRead()
- if mIn != nil {
- mOut := emOut.extensionsWrite()
- muIn.Lock()
- mergeExtension(mOut, mIn)
- muIn.Unlock()
- }
- }
-
- if mi.unrecognized.IsValid() {
- if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 {
- *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...)
- }
- }
-}
-
-func (mi *mergeInfo) computeMergeInfo() {
- mi.lock.Lock()
- defer mi.lock.Unlock()
- if mi.initialized != 0 {
- return
- }
- t := mi.typ
- n := t.NumField()
-
- props := GetProperties(t)
- for i := 0; i < n; i++ {
- f := t.Field(i)
- if strings.HasPrefix(f.Name, "XXX_") {
- continue
- }
-
- mfi := mergeFieldInfo{field: toField(&f)}
- tf := f.Type
-
- // As an optimization, we can avoid the merge function call cost
- // if we know for sure that the source will have no effect
- // by checking if it is the zero value.
- if unsafeAllowed {
- switch tf.Kind() {
- case reflect.Ptr, reflect.Slice, reflect.String:
- // As a special case, we assume slices and strings are pointers
- // since we know that the first field in the SliceSlice or
- // StringHeader is a data pointer.
- mfi.isPointer = true
- case reflect.Bool:
- mfi.basicWidth = 1
- case reflect.Int32, reflect.Uint32, reflect.Float32:
- mfi.basicWidth = 4
- case reflect.Int64, reflect.Uint64, reflect.Float64:
- mfi.basicWidth = 8
- }
- }
-
- // Unwrap tf to get at its most basic type.
- var isPointer, isSlice bool
- if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
- isSlice = true
- tf = tf.Elem()
- }
- if tf.Kind() == reflect.Ptr {
- isPointer = true
- tf = tf.Elem()
- }
- if isPointer && isSlice && tf.Kind() != reflect.Struct {
- panic("both pointer and slice for basic type in " + tf.Name())
- }
-
- switch tf.Kind() {
- case reflect.Int32:
- switch {
- case isSlice: // E.g., []int32
- mfi.merge = func(dst, src pointer) {
- // NOTE: toInt32Slice is not defined (see pointer_reflect.go).
- /*
- sfsp := src.toInt32Slice()
- if *sfsp != nil {
- dfsp := dst.toInt32Slice()
- *dfsp = append(*dfsp, *sfsp...)
- if *dfsp == nil {
- *dfsp = []int64{}
- }
- }
- */
- sfs := src.getInt32Slice()
- if sfs != nil {
- dfs := dst.getInt32Slice()
- dfs = append(dfs, sfs...)
- if dfs == nil {
- dfs = []int32{}
- }
- dst.setInt32Slice(dfs)
- }
- }
- case isPointer: // E.g., *int32
- mfi.merge = func(dst, src pointer) {
- // NOTE: toInt32Ptr is not defined (see pointer_reflect.go).
- /*
- sfpp := src.toInt32Ptr()
- if *sfpp != nil {
- dfpp := dst.toInt32Ptr()
- if *dfpp == nil {
- *dfpp = Int32(**sfpp)
- } else {
- **dfpp = **sfpp
- }
- }
- */
- sfp := src.getInt32Ptr()
- if sfp != nil {
- dfp := dst.getInt32Ptr()
- if dfp == nil {
- dst.setInt32Ptr(*sfp)
- } else {
- *dfp = *sfp
- }
- }
- }
- default: // E.g., int32
- mfi.merge = func(dst, src pointer) {
- if v := *src.toInt32(); v != 0 {
- *dst.toInt32() = v
- }
- }
- }
- case reflect.Int64:
- switch {
- case isSlice: // E.g., []int64
- mfi.merge = func(dst, src pointer) {
- sfsp := src.toInt64Slice()
- if *sfsp != nil {
- dfsp := dst.toInt64Slice()
- *dfsp = append(*dfsp, *sfsp...)
- if *dfsp == nil {
- *dfsp = []int64{}
- }
- }
- }
- case isPointer: // E.g., *int64
- mfi.merge = func(dst, src pointer) {
- sfpp := src.toInt64Ptr()
- if *sfpp != nil {
- dfpp := dst.toInt64Ptr()
- if *dfpp == nil {
- *dfpp = Int64(**sfpp)
- } else {
- **dfpp = **sfpp
- }
- }
- }
- default: // E.g., int64
- mfi.merge = func(dst, src pointer) {
- if v := *src.toInt64(); v != 0 {
- *dst.toInt64() = v
- }
- }
- }
- case reflect.Uint32:
- switch {
- case isSlice: // E.g., []uint32
- mfi.merge = func(dst, src pointer) {
- sfsp := src.toUint32Slice()
- if *sfsp != nil {
- dfsp := dst.toUint32Slice()
- *dfsp = append(*dfsp, *sfsp...)
- if *dfsp == nil {
- *dfsp = []uint32{}
- }
- }
- }
- case isPointer: // E.g., *uint32
- mfi.merge = func(dst, src pointer) {
- sfpp := src.toUint32Ptr()
- if *sfpp != nil {
- dfpp := dst.toUint32Ptr()
- if *dfpp == nil {
- *dfpp = Uint32(**sfpp)
- } else {
- **dfpp = **sfpp
- }
- }
- }
- default: // E.g., uint32
- mfi.merge = func(dst, src pointer) {
- if v := *src.toUint32(); v != 0 {
- *dst.toUint32() = v
- }
- }
- }
- case reflect.Uint64:
- switch {
- case isSlice: // E.g., []uint64
- mfi.merge = func(dst, src pointer) {
- sfsp := src.toUint64Slice()
- if *sfsp != nil {
- dfsp := dst.toUint64Slice()
- *dfsp = append(*dfsp, *sfsp...)
- if *dfsp == nil {
- *dfsp = []uint64{}
- }
- }
- }
- case isPointer: // E.g., *uint64
- mfi.merge = func(dst, src pointer) {
- sfpp := src.toUint64Ptr()
- if *sfpp != nil {
- dfpp := dst.toUint64Ptr()
- if *dfpp == nil {
- *dfpp = Uint64(**sfpp)
- } else {
- **dfpp = **sfpp
- }
- }
- }
- default: // E.g., uint64
- mfi.merge = func(dst, src pointer) {
- if v := *src.toUint64(); v != 0 {
- *dst.toUint64() = v
- }
- }
- }
- case reflect.Float32:
- switch {
- case isSlice: // E.g., []float32
- mfi.merge = func(dst, src pointer) {
- sfsp := src.toFloat32Slice()
- if *sfsp != nil {
- dfsp := dst.toFloat32Slice()
- *dfsp = append(*dfsp, *sfsp...)
- if *dfsp == nil {
- *dfsp = []float32{}
- }
- }
- }
- case isPointer: // E.g., *float32
- mfi.merge = func(dst, src pointer) {
- sfpp := src.toFloat32Ptr()
- if *sfpp != nil {
- dfpp := dst.toFloat32Ptr()
- if *dfpp == nil {
- *dfpp = Float32(**sfpp)
- } else {
- **dfpp = **sfpp
- }
- }
- }
- default: // E.g., float32
- mfi.merge = func(dst, src pointer) {
- if v := *src.toFloat32(); v != 0 {
- *dst.toFloat32() = v
- }
- }
- }
- case reflect.Float64:
- switch {
- case isSlice: // E.g., []float64
- mfi.merge = func(dst, src pointer) {
- sfsp := src.toFloat64Slice()
- if *sfsp != nil {
- dfsp := dst.toFloat64Slice()
- *dfsp = append(*dfsp, *sfsp...)
- if *dfsp == nil {
- *dfsp = []float64{}
- }
- }
- }
- case isPointer: // E.g., *float64
- mfi.merge = func(dst, src pointer) {
- sfpp := src.toFloat64Ptr()
- if *sfpp != nil {
- dfpp := dst.toFloat64Ptr()
- if *dfpp == nil {
- *dfpp = Float64(**sfpp)
- } else {
- **dfpp = **sfpp
- }
- }
- }
- default: // E.g., float64
- mfi.merge = func(dst, src pointer) {
- if v := *src.toFloat64(); v != 0 {
- *dst.toFloat64() = v
- }
- }
- }
- case reflect.Bool:
- switch {
- case isSlice: // E.g., []bool
- mfi.merge = func(dst, src pointer) {
- sfsp := src.toBoolSlice()
- if *sfsp != nil {
- dfsp := dst.toBoolSlice()
- *dfsp = append(*dfsp, *sfsp...)
- if *dfsp == nil {
- *dfsp = []bool{}
- }
- }
- }
- case isPointer: // E.g., *bool
- mfi.merge = func(dst, src pointer) {
- sfpp := src.toBoolPtr()
- if *sfpp != nil {
- dfpp := dst.toBoolPtr()
- if *dfpp == nil {
- *dfpp = Bool(**sfpp)
- } else {
- **dfpp = **sfpp
- }
- }
- }
- default: // E.g., bool
- mfi.merge = func(dst, src pointer) {
- if v := *src.toBool(); v {
- *dst.toBool() = v
- }
- }
- }
- case reflect.String:
- switch {
- case isSlice: // E.g., []string
- mfi.merge = func(dst, src pointer) {
- sfsp := src.toStringSlice()
- if *sfsp != nil {
- dfsp := dst.toStringSlice()
- *dfsp = append(*dfsp, *sfsp...)
- if *dfsp == nil {
- *dfsp = []string{}
- }
- }
- }
- case isPointer: // E.g., *string
- mfi.merge = func(dst, src pointer) {
- sfpp := src.toStringPtr()
- if *sfpp != nil {
- dfpp := dst.toStringPtr()
- if *dfpp == nil {
- *dfpp = String(**sfpp)
- } else {
- **dfpp = **sfpp
- }
- }
- }
- default: // E.g., string
- mfi.merge = func(dst, src pointer) {
- if v := *src.toString(); v != "" {
- *dst.toString() = v
- }
- }
- }
- case reflect.Slice:
- isProto3 := props.Prop[i].proto3
- switch {
- case isPointer:
- panic("bad pointer in byte slice case in " + tf.Name())
- case tf.Elem().Kind() != reflect.Uint8:
- panic("bad element kind in byte slice case in " + tf.Name())
- case isSlice: // E.g., [][]byte
- mfi.merge = func(dst, src pointer) {
- sbsp := src.toBytesSlice()
- if *sbsp != nil {
- dbsp := dst.toBytesSlice()
- for _, sb := range *sbsp {
- if sb == nil {
- *dbsp = append(*dbsp, nil)
- } else {
- *dbsp = append(*dbsp, append([]byte{}, sb...))
- }
- }
- if *dbsp == nil {
- *dbsp = [][]byte{}
- }
- }
- }
- default: // E.g., []byte
- mfi.merge = func(dst, src pointer) {
- sbp := src.toBytes()
- if *sbp != nil {
- dbp := dst.toBytes()
- if !isProto3 || len(*sbp) > 0 {
- *dbp = append([]byte{}, *sbp...)
- }
- }
- }
- }
- case reflect.Struct:
- switch {
- case !isPointer:
- panic(fmt.Sprintf("message field %s without pointer", tf))
- case isSlice: // E.g., []*pb.T
- mi := getMergeInfo(tf)
- mfi.merge = func(dst, src pointer) {
- sps := src.getPointerSlice()
- if sps != nil {
- dps := dst.getPointerSlice()
- for _, sp := range sps {
- var dp pointer
- if !sp.isNil() {
- dp = valToPointer(reflect.New(tf))
- mi.merge(dp, sp)
- }
- dps = append(dps, dp)
- }
- if dps == nil {
- dps = []pointer{}
- }
- dst.setPointerSlice(dps)
- }
- }
- default: // E.g., *pb.T
- mi := getMergeInfo(tf)
- mfi.merge = func(dst, src pointer) {
- sp := src.getPointer()
- if !sp.isNil() {
- dp := dst.getPointer()
- if dp.isNil() {
- dp = valToPointer(reflect.New(tf))
- dst.setPointer(dp)
- }
- mi.merge(dp, sp)
- }
- }
- }
- case reflect.Map:
- switch {
- case isPointer || isSlice:
- panic("bad pointer or slice in map case in " + tf.Name())
- default: // E.g., map[K]V
- mfi.merge = func(dst, src pointer) {
- sm := src.asPointerTo(tf).Elem()
- if sm.Len() == 0 {
- return
- }
- dm := dst.asPointerTo(tf).Elem()
- if dm.IsNil() {
- dm.Set(reflect.MakeMap(tf))
- }
-
- switch tf.Elem().Kind() {
- case reflect.Ptr: // Proto struct (e.g., *T)
- for _, key := range sm.MapKeys() {
- val := sm.MapIndex(key)
- val = reflect.ValueOf(Clone(val.Interface().(Message)))
- dm.SetMapIndex(key, val)
- }
- case reflect.Slice: // E.g. Bytes type (e.g., []byte)
- for _, key := range sm.MapKeys() {
- val := sm.MapIndex(key)
- val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
- dm.SetMapIndex(key, val)
- }
- default: // Basic type (e.g., string)
- for _, key := range sm.MapKeys() {
- val := sm.MapIndex(key)
- dm.SetMapIndex(key, val)
- }
- }
- }
- }
- case reflect.Interface:
- // Must be oneof field.
- switch {
- case isPointer || isSlice:
- panic("bad pointer or slice in interface case in " + tf.Name())
- default: // E.g., interface{}
- // TODO: Make this faster?
- mfi.merge = func(dst, src pointer) {
- su := src.asPointerTo(tf).Elem()
- if !su.IsNil() {
- du := dst.asPointerTo(tf).Elem()
- typ := su.Elem().Type()
- if du.IsNil() || du.Elem().Type() != typ {
- du.Set(reflect.New(typ.Elem())) // Initialize interface if empty
- }
- sv := su.Elem().Elem().Field(0)
- if sv.Kind() == reflect.Ptr && sv.IsNil() {
- return
- }
- dv := du.Elem().Elem().Field(0)
- if dv.Kind() == reflect.Ptr && dv.IsNil() {
- dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty
- }
- switch sv.Type().Kind() {
- case reflect.Ptr: // Proto struct (e.g., *T)
- Merge(dv.Interface().(Message), sv.Interface().(Message))
- case reflect.Slice: // E.g. Bytes type (e.g., []byte)
- dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...)))
- default: // Basic type (e.g., string)
- dv.Set(sv)
- }
- }
- }
- }
- default:
- panic(fmt.Sprintf("merger not found for type:%s", tf))
- }
- mi.fields = append(mi.fields, mfi)
- }
-
- mi.unrecognized = invalidField
- if f, ok := t.FieldByName("XXX_unrecognized"); ok {
- if f.Type != reflect.TypeOf([]byte{}) {
- panic("expected XXX_unrecognized to be of type []byte")
- }
- mi.unrecognized = toField(&f)
- }
-
- atomic.StoreInt32(&mi.initialized, 1)
-}
diff --git a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go
deleted file mode 100644
index acee2fc52..000000000
--- a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go
+++ /dev/null
@@ -1,2053 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2016 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-import (
- "errors"
- "fmt"
- "io"
- "math"
- "reflect"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
- "unicode/utf8"
-)
-
-// Unmarshal is the entry point from the generated .pb.go files.
-// This function is not intended to be used by non-generated code.
-// This function is not subject to any compatibility guarantee.
-// msg contains a pointer to a protocol buffer struct.
-// b is the data to be unmarshaled into the protocol buffer.
-// a is a pointer to a place to store cached unmarshal information.
-func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error {
- // Load the unmarshal information for this message type.
- // The atomic load ensures memory consistency.
- u := atomicLoadUnmarshalInfo(&a.unmarshal)
- if u == nil {
- // Slow path: find unmarshal info for msg, update a with it.
- u = getUnmarshalInfo(reflect.TypeOf(msg).Elem())
- atomicStoreUnmarshalInfo(&a.unmarshal, u)
- }
- // Then do the unmarshaling.
- err := u.unmarshal(toPointer(&msg), b)
- return err
-}
-
-type unmarshalInfo struct {
- typ reflect.Type // type of the protobuf struct
-
- // 0 = only typ field is initialized
- // 1 = completely initialized
- initialized int32
- lock sync.Mutex // prevents double initialization
- dense []unmarshalFieldInfo // fields indexed by tag #
- sparse map[uint64]unmarshalFieldInfo // fields indexed by tag #
- reqFields []string // names of required fields
- reqMask uint64 // 1<<len(reqFields)-1
- unrecognized field // offset of []byte to put unrecognized data (or invalidField if we should throw it away)
- extensions field // offset of extensions field (of type proto.XXX_InternalExtensions), or invalidField if it does not exist
- oldExtensions field // offset of old-form extensions field (of type map[int]Extension)
- extensionRanges []ExtensionRange // if non-nil, implies extensions field is valid
- isMessageSet bool // if true, implies extensions field is valid
-}
-
-// An unmarshaler takes a stream of bytes and a pointer to a field of a message.
-// It decodes the field, stores it at f, and returns the unused bytes.
-// w is the wire encoding.
-// b is the data after the tag and wire encoding have been read.
-type unmarshaler func(b []byte, f pointer, w int) ([]byte, error)
-
-type unmarshalFieldInfo struct {
- // location of the field in the proto message structure.
- field field
-
- // function to unmarshal the data for the field.
- unmarshal unmarshaler
-
- // if a required field, contains a single set bit at this field's index in the required field list.
- reqMask uint64
-
- name string // name of the field, for error reporting
-}
-
-var (
- unmarshalInfoMap = map[reflect.Type]*unmarshalInfo{}
- unmarshalInfoLock sync.Mutex
-)
-
-// getUnmarshalInfo returns the data structure which can be
-// subsequently used to unmarshal a message of the given type.
-// t is the type of the message (note: not pointer to message).
-func getUnmarshalInfo(t reflect.Type) *unmarshalInfo {
- // It would be correct to return a new unmarshalInfo
- // unconditionally. We would end up allocating one
- // per occurrence of that type as a message or submessage.
- // We use a cache here just to reduce memory usage.
- unmarshalInfoLock.Lock()
- defer unmarshalInfoLock.Unlock()
- u := unmarshalInfoMap[t]
- if u == nil {
- u = &unmarshalInfo{typ: t}
- // Note: we just set the type here. The rest of the fields
- // will be initialized on first use.
- unmarshalInfoMap[t] = u
- }
- return u
-}
-
-// unmarshal does the main work of unmarshaling a message.
-// u provides type information used to unmarshal the message.
-// m is a pointer to a protocol buffer message.
-// b is a byte stream to unmarshal into m.
-// This is top routine used when recursively unmarshaling submessages.
-func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error {
- if atomic.LoadInt32(&u.initialized) == 0 {
- u.computeUnmarshalInfo()
- }
- if u.isMessageSet {
- return unmarshalMessageSet(b, m.offset(u.extensions).toExtensions())
- }
- var reqMask uint64 // bitmask of required fields we've seen.
- var errLater error
- for len(b) > 0 {
- // Read tag and wire type.
- // Special case 1 and 2 byte varints.
- var x uint64
- if b[0] < 128 {
- x = uint64(b[0])
- b = b[1:]
- } else if len(b) >= 2 && b[1] < 128 {
- x = uint64(b[0]&0x7f) + uint64(b[1])<<7
- b = b[2:]
- } else {
- var n int
- x, n = decodeVarint(b)
- if n == 0 {
- return io.ErrUnexpectedEOF
- }
- b = b[n:]
- }
- tag := x >> 3
- wire := int(x) & 7
-
- // Dispatch on the tag to one of the unmarshal* functions below.
- var f unmarshalFieldInfo
- if tag < uint64(len(u.dense)) {
- f = u.dense[tag]
- } else {
- f = u.sparse[tag]
- }
- if fn := f.unmarshal; fn != nil {
- var err error
- b, err = fn(b, m.offset(f.field), wire)
- if err == nil {
- reqMask |= f.reqMask
- continue
- }
- if r, ok := err.(*RequiredNotSetError); ok {
- // Remember this error, but keep parsing. We need to produce
- // a full parse even if a required field is missing.
- if errLater == nil {
- errLater = r
- }
- reqMask |= f.reqMask
- continue
- }
- if err != errInternalBadWireType {
- if err == errInvalidUTF8 {
- if errLater == nil {
- fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
- errLater = &invalidUTF8Error{fullName}
- }
- continue
- }
- return err
- }
- // Fragments with bad wire type are treated as unknown fields.
- }
-
- // Unknown tag.
- if !u.unrecognized.IsValid() {
- // Don't keep unrecognized data; just skip it.
- var err error
- b, err = skipField(b, wire)
- if err != nil {
- return err
- }
- continue
- }
- // Keep unrecognized data around.
- // maybe in extensions, maybe in the unrecognized field.
- z := m.offset(u.unrecognized).toBytes()
- var emap map[int32]Extension
- var e Extension
- for _, r := range u.extensionRanges {
- if uint64(r.Start) <= tag && tag <= uint64(r.End) {
- if u.extensions.IsValid() {
- mp := m.offset(u.extensions).toExtensions()
- emap = mp.extensionsWrite()
- e = emap[int32(tag)]
- z = &e.enc
- break
- }
- if u.oldExtensions.IsValid() {
- p := m.offset(u.oldExtensions).toOldExtensions()
- emap = *p
- if emap == nil {
- emap = map[int32]Extension{}
- *p = emap
- }
- e = emap[int32(tag)]
- z = &e.enc
- break
- }
- panic("no extensions field available")
- }
- }
-
- // Use wire type to skip data.
- var err error
- b0 := b
- b, err = skipField(b, wire)
- if err != nil {
- return err
- }
- *z = encodeVarint(*z, tag<<3|uint64(wire))
- *z = append(*z, b0[:len(b0)-len(b)]...)
-
- if emap != nil {
- emap[int32(tag)] = e
- }
- }
- if reqMask != u.reqMask && errLater == nil {
- // A required field of this message is missing.
- for _, n := range u.reqFields {
- if reqMask&1 == 0 {
- errLater = &RequiredNotSetError{n}
- }
- reqMask >>= 1
- }
- }
- return errLater
-}
-
-// computeUnmarshalInfo fills in u with information for use
-// in unmarshaling protocol buffers of type u.typ.
-func (u *unmarshalInfo) computeUnmarshalInfo() {
- u.lock.Lock()
- defer u.lock.Unlock()
- if u.initialized != 0 {
- return
- }
- t := u.typ
- n := t.NumField()
-
- // Set up the "not found" value for the unrecognized byte buffer.
- // This is the default for proto3.
- u.unrecognized = invalidField
- u.extensions = invalidField
- u.oldExtensions = invalidField
-
- // List of the generated type and offset for each oneof field.
- type oneofField struct {
- ityp reflect.Type // interface type of oneof field
- field field // offset in containing message
- }
- var oneofFields []oneofField
-
- for i := 0; i < n; i++ {
- f := t.Field(i)
- if f.Name == "XXX_unrecognized" {
- // The byte slice used to hold unrecognized input is special.
- if f.Type != reflect.TypeOf(([]byte)(nil)) {
- panic("bad type for XXX_unrecognized field: " + f.Type.Name())
- }
- u.unrecognized = toField(&f)
- continue
- }
- if f.Name == "XXX_InternalExtensions" {
- // Ditto here.
- if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) {
- panic("bad type for XXX_InternalExtensions field: " + f.Type.Name())
- }
- u.extensions = toField(&f)
- if f.Tag.Get("protobuf_messageset") == "1" {
- u.isMessageSet = true
- }
- continue
- }
- if f.Name == "XXX_extensions" {
- // An older form of the extensions field.
- if f.Type != reflect.TypeOf((map[int32]Extension)(nil)) {
- panic("bad type for XXX_extensions field: " + f.Type.Name())
- }
- u.oldExtensions = toField(&f)
- continue
- }
- if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" {
- continue
- }
-
- oneof := f.Tag.Get("protobuf_oneof")
- if oneof != "" {
- oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)})
- // The rest of oneof processing happens below.
- continue
- }
-
- tags := f.Tag.Get("protobuf")
- tagArray := strings.Split(tags, ",")
- if len(tagArray) < 2 {
- panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags)
- }
- tag, err := strconv.Atoi(tagArray[1])
- if err != nil {
- panic("protobuf tag field not an integer: " + tagArray[1])
- }
-
- name := ""
- for _, tag := range tagArray[3:] {
- if strings.HasPrefix(tag, "name=") {
- name = tag[5:]
- }
- }
-
- // Extract unmarshaling function from the field (its type and tags).
- unmarshal := fieldUnmarshaler(&f)
-
- // Required field?
- var reqMask uint64
- if tagArray[2] == "req" {
- bit := len(u.reqFields)
- u.reqFields = append(u.reqFields, name)
- reqMask = uint64(1) << uint(bit)
- // TODO: if we have more than 64 required fields, we end up
- // not verifying that all required fields are present.
- // Fix this, perhaps using a count of required fields?
- }
-
- // Store the info in the correct slot in the message.
- u.setTag(tag, toField(&f), unmarshal, reqMask, name)
- }
-
- // Find any types associated with oneof fields.
- var oneofImplementers []interface{}
- switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
- case oneofFuncsIface:
- _, _, _, oneofImplementers = m.XXX_OneofFuncs()
- case oneofWrappersIface:
- oneofImplementers = m.XXX_OneofWrappers()
- }
- for _, v := range oneofImplementers {
- tptr := reflect.TypeOf(v) // *Msg_X
- typ := tptr.Elem() // Msg_X
-
- f := typ.Field(0) // oneof implementers have one field
- baseUnmarshal := fieldUnmarshaler(&f)
- tags := strings.Split(f.Tag.Get("protobuf"), ",")
- fieldNum, err := strconv.Atoi(tags[1])
- if err != nil {
- panic("protobuf tag field not an integer: " + tags[1])
- }
- var name string
- for _, tag := range tags {
- if strings.HasPrefix(tag, "name=") {
- name = strings.TrimPrefix(tag, "name=")
- break
- }
- }
-
- // Find the oneof field that this struct implements.
- // Might take O(n^2) to process all of the oneofs, but who cares.
- for _, of := range oneofFields {
- if tptr.Implements(of.ityp) {
- // We have found the corresponding interface for this struct.
- // That lets us know where this struct should be stored
- // when we encounter it during unmarshaling.
- unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal)
- u.setTag(fieldNum, of.field, unmarshal, 0, name)
- }
- }
-
- }
-
- // Get extension ranges, if any.
- fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray")
- if fn.IsValid() {
- if !u.extensions.IsValid() && !u.oldExtensions.IsValid() {
- panic("a message with extensions, but no extensions field in " + t.Name())
- }
- u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange)
- }
-
- // Explicitly disallow tag 0. This will ensure we flag an error
- // when decoding a buffer of all zeros. Without this code, we
- // would decode and skip an all-zero buffer of even length.
- // [0 0] is [tag=0/wiretype=varint varint-encoded-0].
- u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) {
- return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w)
- }, 0, "")
-
- // Set mask for required field check.
- u.reqMask = uint64(1)<<uint(len(u.reqFields)) - 1
-
- atomic.StoreInt32(&u.initialized, 1)
-}
-
-// setTag stores the unmarshal information for the given tag.
-// tag = tag # for field
-// field/unmarshal = unmarshal info for that field.
-// reqMask = if required, bitmask for field position in required field list. 0 otherwise.
-// name = short name of the field.
-func (u *unmarshalInfo) setTag(tag int, field field, unmarshal unmarshaler, reqMask uint64, name string) {
- i := unmarshalFieldInfo{field: field, unmarshal: unmarshal, reqMask: reqMask, name: name}
- n := u.typ.NumField()
- if tag >= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here?
- for len(u.dense) <= tag {
- u.dense = append(u.dense, unmarshalFieldInfo{})
- }
- u.dense[tag] = i
- return
- }
- if u.sparse == nil {
- u.sparse = map[uint64]unmarshalFieldInfo{}
- }
- u.sparse[uint64(tag)] = i
-}
-
-// fieldUnmarshaler returns an unmarshaler for the given field.
-func fieldUnmarshaler(f *reflect.StructField) unmarshaler {
- if f.Type.Kind() == reflect.Map {
- return makeUnmarshalMap(f)
- }
- return typeUnmarshaler(f.Type, f.Tag.Get("protobuf"))
-}
-
-// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair.
-func typeUnmarshaler(t reflect.Type, tags string) unmarshaler {
- tagArray := strings.Split(tags, ",")
- encoding := tagArray[0]
- name := "unknown"
- proto3 := false
- validateUTF8 := true
- for _, tag := range tagArray[3:] {
- if strings.HasPrefix(tag, "name=") {
- name = tag[5:]
- }
- if tag == "proto3" {
- proto3 = true
- }
- }
- validateUTF8 = validateUTF8 && proto3
-
- // Figure out packaging (pointer, slice, or both)
- slice := false
- pointer := false
- if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
- slice = true
- t = t.Elem()
- }
- if t.Kind() == reflect.Ptr {
- pointer = true
- t = t.Elem()
- }
-
- // We'll never have both pointer and slice for basic types.
- if pointer && slice && t.Kind() != reflect.Struct {
- panic("both pointer and slice for basic type in " + t.Name())
- }
-
- switch t.Kind() {
- case reflect.Bool:
- if pointer {
- return unmarshalBoolPtr
- }
- if slice {
- return unmarshalBoolSlice
- }
- return unmarshalBoolValue
- case reflect.Int32:
- switch encoding {
- case "fixed32":
- if pointer {
- return unmarshalFixedS32Ptr
- }
- if slice {
- return unmarshalFixedS32Slice
- }
- return unmarshalFixedS32Value
- case "varint":
- // this could be int32 or enum
- if pointer {
- return unmarshalInt32Ptr
- }
- if slice {
- return unmarshalInt32Slice
- }
- return unmarshalInt32Value
- case "zigzag32":
- if pointer {
- return unmarshalSint32Ptr
- }
- if slice {
- return unmarshalSint32Slice
- }
- return unmarshalSint32Value
- }
- case reflect.Int64:
- switch encoding {
- case "fixed64":
- if pointer {
- return unmarshalFixedS64Ptr
- }
- if slice {
- return unmarshalFixedS64Slice
- }
- return unmarshalFixedS64Value
- case "varint":
- if pointer {
- return unmarshalInt64Ptr
- }
- if slice {
- return unmarshalInt64Slice
- }
- return unmarshalInt64Value
- case "zigzag64":
- if pointer {
- return unmarshalSint64Ptr
- }
- if slice {
- return unmarshalSint64Slice
- }
- return unmarshalSint64Value
- }
- case reflect.Uint32:
- switch encoding {
- case "fixed32":
- if pointer {
- return unmarshalFixed32Ptr
- }
- if slice {
- return unmarshalFixed32Slice
- }
- return unmarshalFixed32Value
- case "varint":
- if pointer {
- return unmarshalUint32Ptr
- }
- if slice {
- return unmarshalUint32Slice
- }
- return unmarshalUint32Value
- }
- case reflect.Uint64:
- switch encoding {
- case "fixed64":
- if pointer {
- return unmarshalFixed64Ptr
- }
- if slice {
- return unmarshalFixed64Slice
- }
- return unmarshalFixed64Value
- case "varint":
- if pointer {
- return unmarshalUint64Ptr
- }
- if slice {
- return unmarshalUint64Slice
- }
- return unmarshalUint64Value
- }
- case reflect.Float32:
- if pointer {
- return unmarshalFloat32Ptr
- }
- if slice {
- return unmarshalFloat32Slice
- }
- return unmarshalFloat32Value
- case reflect.Float64:
- if pointer {
- return unmarshalFloat64Ptr
- }
- if slice {
- return unmarshalFloat64Slice
- }
- return unmarshalFloat64Value
- case reflect.Map:
- panic("map type in typeUnmarshaler in " + t.Name())
- case reflect.Slice:
- if pointer {
- panic("bad pointer in slice case in " + t.Name())
- }
- if slice {
- return unmarshalBytesSlice
- }
- return unmarshalBytesValue
- case reflect.String:
- if validateUTF8 {
- if pointer {
- return unmarshalUTF8StringPtr
- }
- if slice {
- return unmarshalUTF8StringSlice
- }
- return unmarshalUTF8StringValue
- }
- if pointer {
- return unmarshalStringPtr
- }
- if slice {
- return unmarshalStringSlice
- }
- return unmarshalStringValue
- case reflect.Struct:
- // message or group field
- if !pointer {
- panic(fmt.Sprintf("message/group field %s:%s without pointer", t, encoding))
- }
- switch encoding {
- case "bytes":
- if slice {
- return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name)
- }
- return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name)
- case "group":
- if slice {
- return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name)
- }
- return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name)
- }
- }
- panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding))
-}
-
-// Below are all the unmarshalers for individual fields of various types.
-
-func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int64(x)
- *f.toInt64() = v
- return b, nil
-}
-
-func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int64(x)
- *f.toInt64Ptr() = &v
- return b, nil
-}
-
-func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- x, n = decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int64(x)
- s := f.toInt64Slice()
- *s = append(*s, v)
- }
- return res, nil
- }
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int64(x)
- s := f.toInt64Slice()
- *s = append(*s, v)
- return b, nil
-}
-
-func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int64(x>>1) ^ int64(x)<<63>>63
- *f.toInt64() = v
- return b, nil
-}
-
-func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int64(x>>1) ^ int64(x)<<63>>63
- *f.toInt64Ptr() = &v
- return b, nil
-}
-
-func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- x, n = decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int64(x>>1) ^ int64(x)<<63>>63
- s := f.toInt64Slice()
- *s = append(*s, v)
- }
- return res, nil
- }
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int64(x>>1) ^ int64(x)<<63>>63
- s := f.toInt64Slice()
- *s = append(*s, v)
- return b, nil
-}
-
-func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := uint64(x)
- *f.toUint64() = v
- return b, nil
-}
-
-func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := uint64(x)
- *f.toUint64Ptr() = &v
- return b, nil
-}
-
-func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- x, n = decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := uint64(x)
- s := f.toUint64Slice()
- *s = append(*s, v)
- }
- return res, nil
- }
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := uint64(x)
- s := f.toUint64Slice()
- *s = append(*s, v)
- return b, nil
-}
-
-func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int32(x)
- *f.toInt32() = v
- return b, nil
-}
-
-func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int32(x)
- f.setInt32Ptr(v)
- return b, nil
-}
-
-func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- x, n = decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int32(x)
- f.appendInt32Slice(v)
- }
- return res, nil
- }
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int32(x)
- f.appendInt32Slice(v)
- return b, nil
-}
-
-func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int32(x>>1) ^ int32(x)<<31>>31
- *f.toInt32() = v
- return b, nil
-}
-
-func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int32(x>>1) ^ int32(x)<<31>>31
- f.setInt32Ptr(v)
- return b, nil
-}
-
-func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- x, n = decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int32(x>>1) ^ int32(x)<<31>>31
- f.appendInt32Slice(v)
- }
- return res, nil
- }
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int32(x>>1) ^ int32(x)<<31>>31
- f.appendInt32Slice(v)
- return b, nil
-}
-
-func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := uint32(x)
- *f.toUint32() = v
- return b, nil
-}
-
-func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := uint32(x)
- *f.toUint32Ptr() = &v
- return b, nil
-}
-
-func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- x, n = decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := uint32(x)
- s := f.toUint32Slice()
- *s = append(*s, v)
- }
- return res, nil
- }
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := uint32(x)
- s := f.toUint32Slice()
- *s = append(*s, v)
- return b, nil
-}
-
-func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed64 {
- return b, errInternalBadWireType
- }
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
- *f.toUint64() = v
- return b[8:], nil
-}
-
-func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed64 {
- return b, errInternalBadWireType
- }
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
- *f.toUint64Ptr() = &v
- return b[8:], nil
-}
-
-func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
- s := f.toUint64Slice()
- *s = append(*s, v)
- b = b[8:]
- }
- return res, nil
- }
- if w != WireFixed64 {
- return b, errInternalBadWireType
- }
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
- s := f.toUint64Slice()
- *s = append(*s, v)
- return b[8:], nil
-}
-
-func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed64 {
- return b, errInternalBadWireType
- }
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
- *f.toInt64() = v
- return b[8:], nil
-}
-
-func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed64 {
- return b, errInternalBadWireType
- }
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
- *f.toInt64Ptr() = &v
- return b[8:], nil
-}
-
-func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
- s := f.toInt64Slice()
- *s = append(*s, v)
- b = b[8:]
- }
- return res, nil
- }
- if w != WireFixed64 {
- return b, errInternalBadWireType
- }
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
- s := f.toInt64Slice()
- *s = append(*s, v)
- return b[8:], nil
-}
-
-func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed32 {
- return b, errInternalBadWireType
- }
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
- *f.toUint32() = v
- return b[4:], nil
-}
-
-func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed32 {
- return b, errInternalBadWireType
- }
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
- *f.toUint32Ptr() = &v
- return b[4:], nil
-}
-
-func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
- s := f.toUint32Slice()
- *s = append(*s, v)
- b = b[4:]
- }
- return res, nil
- }
- if w != WireFixed32 {
- return b, errInternalBadWireType
- }
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
- s := f.toUint32Slice()
- *s = append(*s, v)
- return b[4:], nil
-}
-
-func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed32 {
- return b, errInternalBadWireType
- }
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
- *f.toInt32() = v
- return b[4:], nil
-}
-
-func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed32 {
- return b, errInternalBadWireType
- }
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
- f.setInt32Ptr(v)
- return b[4:], nil
-}
-
-func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
- f.appendInt32Slice(v)
- b = b[4:]
- }
- return res, nil
- }
- if w != WireFixed32 {
- return b, errInternalBadWireType
- }
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
- f.appendInt32Slice(v)
- return b[4:], nil
-}
-
-func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- // Note: any length varint is allowed, even though any sane
- // encoder will use one byte.
- // See https://github.com/golang/protobuf/issues/76
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- // TODO: check if x>1? Tests seem to indicate no.
- v := x != 0
- *f.toBool() = v
- return b[n:], nil
-}
-
-func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- v := x != 0
- *f.toBoolPtr() = &v
- return b[n:], nil
-}
-
-func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- x, n = decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- v := x != 0
- s := f.toBoolSlice()
- *s = append(*s, v)
- b = b[n:]
- }
- return res, nil
- }
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- v := x != 0
- s := f.toBoolSlice()
- *s = append(*s, v)
- return b[n:], nil
-}
-
-func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed64 {
- return b, errInternalBadWireType
- }
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
- *f.toFloat64() = v
- return b[8:], nil
-}
-
-func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed64 {
- return b, errInternalBadWireType
- }
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
- *f.toFloat64Ptr() = &v
- return b[8:], nil
-}
-
-func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
- s := f.toFloat64Slice()
- *s = append(*s, v)
- b = b[8:]
- }
- return res, nil
- }
- if w != WireFixed64 {
- return b, errInternalBadWireType
- }
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
- s := f.toFloat64Slice()
- *s = append(*s, v)
- return b[8:], nil
-}
-
-func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed32 {
- return b, errInternalBadWireType
- }
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
- *f.toFloat32() = v
- return b[4:], nil
-}
-
-func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed32 {
- return b, errInternalBadWireType
- }
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
- *f.toFloat32Ptr() = &v
- return b[4:], nil
-}
-
-func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
- s := f.toFloat32Slice()
- *s = append(*s, v)
- b = b[4:]
- }
- return res, nil
- }
- if w != WireFixed32 {
- return b, errInternalBadWireType
- }
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
- s := f.toFloat32Slice()
- *s = append(*s, v)
- return b[4:], nil
-}
-
-func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- v := string(b[:x])
- *f.toString() = v
- return b[x:], nil
-}
-
-func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- v := string(b[:x])
- *f.toStringPtr() = &v
- return b[x:], nil
-}
-
-func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- v := string(b[:x])
- s := f.toStringSlice()
- *s = append(*s, v)
- return b[x:], nil
-}
-
-func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- v := string(b[:x])
- *f.toString() = v
- if !utf8.ValidString(v) {
- return b[x:], errInvalidUTF8
- }
- return b[x:], nil
-}
-
-func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- v := string(b[:x])
- *f.toStringPtr() = &v
- if !utf8.ValidString(v) {
- return b[x:], errInvalidUTF8
- }
- return b[x:], nil
-}
-
-func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- v := string(b[:x])
- s := f.toStringSlice()
- *s = append(*s, v)
- if !utf8.ValidString(v) {
- return b[x:], errInvalidUTF8
- }
- return b[x:], nil
-}
-
-var emptyBuf [0]byte
-
-func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- // The use of append here is a trick which avoids the zeroing
- // that would be required if we used a make/copy pair.
- // We append to emptyBuf instead of nil because we want
- // a non-nil result even when the length is 0.
- v := append(emptyBuf[:], b[:x]...)
- *f.toBytes() = v
- return b[x:], nil
-}
-
-func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- v := append(emptyBuf[:], b[:x]...)
- s := f.toBytesSlice()
- *s = append(*s, v)
- return b[x:], nil
-}
-
-func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- // First read the message field to see if something is there.
- // The semantics of multiple submessages are weird. Instead of
- // the last one winning (as it is for all other fields), multiple
- // submessages are merged.
- v := f.getPointer()
- if v.isNil() {
- v = valToPointer(reflect.New(sub.typ))
- f.setPointer(v)
- }
- err := sub.unmarshal(v, b[:x])
- if err != nil {
- if r, ok := err.(*RequiredNotSetError); ok {
- r.field = name + "." + r.field
- } else {
- return nil, err
- }
- }
- return b[x:], err
- }
-}
-
-func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- v := valToPointer(reflect.New(sub.typ))
- err := sub.unmarshal(v, b[:x])
- if err != nil {
- if r, ok := err.(*RequiredNotSetError); ok {
- r.field = name + "." + r.field
- } else {
- return nil, err
- }
- }
- f.appendPointer(v)
- return b[x:], err
- }
-}
-
-func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireStartGroup {
- return b, errInternalBadWireType
- }
- x, y := findEndGroup(b)
- if x < 0 {
- return nil, io.ErrUnexpectedEOF
- }
- v := f.getPointer()
- if v.isNil() {
- v = valToPointer(reflect.New(sub.typ))
- f.setPointer(v)
- }
- err := sub.unmarshal(v, b[:x])
- if err != nil {
- if r, ok := err.(*RequiredNotSetError); ok {
- r.field = name + "." + r.field
- } else {
- return nil, err
- }
- }
- return b[y:], err
- }
-}
-
-func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireStartGroup {
- return b, errInternalBadWireType
- }
- x, y := findEndGroup(b)
- if x < 0 {
- return nil, io.ErrUnexpectedEOF
- }
- v := valToPointer(reflect.New(sub.typ))
- err := sub.unmarshal(v, b[:x])
- if err != nil {
- if r, ok := err.(*RequiredNotSetError); ok {
- r.field = name + "." + r.field
- } else {
- return nil, err
- }
- }
- f.appendPointer(v)
- return b[y:], err
- }
-}
-
-func makeUnmarshalMap(f *reflect.StructField) unmarshaler {
- t := f.Type
- kt := t.Key()
- vt := t.Elem()
- unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key"))
- unmarshalVal := typeUnmarshaler(vt, f.Tag.Get("protobuf_val"))
- return func(b []byte, f pointer, w int) ([]byte, error) {
- // The map entry is a submessage. Figure out how big it is.
- if w != WireBytes {
- return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes)
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- r := b[x:] // unused data to return
- b = b[:x] // data for map entry
-
- // Note: we could use #keys * #values ~= 200 functions
- // to do map decoding without reflection. Probably not worth it.
- // Maps will be somewhat slow. Oh well.
-
- // Read key and value from data.
- var nerr nonFatal
- k := reflect.New(kt)
- v := reflect.New(vt)
- for len(b) > 0 {
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- wire := int(x) & 7
- b = b[n:]
-
- var err error
- switch x >> 3 {
- case 1:
- b, err = unmarshalKey(b, valToPointer(k), wire)
- case 2:
- b, err = unmarshalVal(b, valToPointer(v), wire)
- default:
- err = errInternalBadWireType // skip unknown tag
- }
-
- if nerr.Merge(err) {
- continue
- }
- if err != errInternalBadWireType {
- return nil, err
- }
-
- // Skip past unknown fields.
- b, err = skipField(b, wire)
- if err != nil {
- return nil, err
- }
- }
-
- // Get map, allocate if needed.
- m := f.asPointerTo(t).Elem() // an addressable map[K]T
- if m.IsNil() {
- m.Set(reflect.MakeMap(t))
- }
-
- // Insert into map.
- m.SetMapIndex(k.Elem(), v.Elem())
-
- return r, nerr.E
- }
-}
-
-// makeUnmarshalOneof makes an unmarshaler for oneof fields.
-// for:
-// message Msg {
-// oneof F {
-// int64 X = 1;
-// float64 Y = 2;
-// }
-// }
-// typ is the type of the concrete entry for a oneof case (e.g. Msg_X).
-// ityp is the interface type of the oneof field (e.g. isMsg_F).
-// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64).
-// Note that this function will be called once for each case in the oneof.
-func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler {
- sf := typ.Field(0)
- field0 := toField(&sf)
- return func(b []byte, f pointer, w int) ([]byte, error) {
- // Allocate holder for value.
- v := reflect.New(typ)
-
- // Unmarshal data into holder.
- // We unmarshal into the first field of the holder object.
- var err error
- var nerr nonFatal
- b, err = unmarshal(b, valToPointer(v).offset(field0), w)
- if !nerr.Merge(err) {
- return nil, err
- }
-
- // Write pointer to holder into target field.
- f.asPointerTo(ityp).Elem().Set(v)
-
- return b, nerr.E
- }
-}
-
-// Error used by decode internally.
-var errInternalBadWireType = errors.New("proto: internal error: bad wiretype")
-
-// skipField skips past a field of type wire and returns the remaining bytes.
-func skipField(b []byte, wire int) ([]byte, error) {
- switch wire {
- case WireVarint:
- _, k := decodeVarint(b)
- if k == 0 {
- return b, io.ErrUnexpectedEOF
- }
- b = b[k:]
- case WireFixed32:
- if len(b) < 4 {
- return b, io.ErrUnexpectedEOF
- }
- b = b[4:]
- case WireFixed64:
- if len(b) < 8 {
- return b, io.ErrUnexpectedEOF
- }
- b = b[8:]
- case WireBytes:
- m, k := decodeVarint(b)
- if k == 0 || uint64(len(b)-k) < m {
- return b, io.ErrUnexpectedEOF
- }
- b = b[uint64(k)+m:]
- case WireStartGroup:
- _, i := findEndGroup(b)
- if i == -1 {
- return b, io.ErrUnexpectedEOF
- }
- b = b[i:]
- default:
- return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire)
- }
- return b, nil
-}
-
-// findEndGroup finds the index of the next EndGroup tag.
-// Groups may be nested, so the "next" EndGroup tag is the first
-// unpaired EndGroup.
-// findEndGroup returns the indexes of the start and end of the EndGroup tag.
-// Returns (-1,-1) if it can't find one.
-func findEndGroup(b []byte) (int, int) {
- depth := 1
- i := 0
- for {
- x, n := decodeVarint(b[i:])
- if n == 0 {
- return -1, -1
- }
- j := i
- i += n
- switch x & 7 {
- case WireVarint:
- _, k := decodeVarint(b[i:])
- if k == 0 {
- return -1, -1
- }
- i += k
- case WireFixed32:
- if len(b)-4 < i {
- return -1, -1
- }
- i += 4
- case WireFixed64:
- if len(b)-8 < i {
- return -1, -1
- }
- i += 8
- case WireBytes:
- m, k := decodeVarint(b[i:])
- if k == 0 {
- return -1, -1
- }
- i += k
- if uint64(len(b)-i) < m {
- return -1, -1
- }
- i += int(m)
- case WireStartGroup:
- depth++
- case WireEndGroup:
- depth--
- if depth == 0 {
- return j, i
- }
- default:
- return -1, -1
- }
- }
-}
-
-// encodeVarint appends a varint-encoded integer to b and returns the result.
-func encodeVarint(b []byte, x uint64) []byte {
- for x >= 1<<7 {
- b = append(b, byte(x&0x7f|0x80))
- x >>= 7
- }
- return append(b, byte(x))
-}
-
-// decodeVarint reads a varint-encoded integer from b.
-// Returns the decoded integer and the number of bytes read.
-// If there is an error, it returns 0,0.
-func decodeVarint(b []byte) (uint64, int) {
- var x, y uint64
- if len(b) == 0 {
- goto bad
- }
- x = uint64(b[0])
- if x < 0x80 {
- return x, 1
- }
- x -= 0x80
-
- if len(b) <= 1 {
- goto bad
- }
- y = uint64(b[1])
- x += y << 7
- if y < 0x80 {
- return x, 2
- }
- x -= 0x80 << 7
-
- if len(b) <= 2 {
- goto bad
- }
- y = uint64(b[2])
- x += y << 14
- if y < 0x80 {
- return x, 3
- }
- x -= 0x80 << 14
-
- if len(b) <= 3 {
- goto bad
- }
- y = uint64(b[3])
- x += y << 21
- if y < 0x80 {
- return x, 4
- }
- x -= 0x80 << 21
-
- if len(b) <= 4 {
- goto bad
- }
- y = uint64(b[4])
- x += y << 28
- if y < 0x80 {
- return x, 5
- }
- x -= 0x80 << 28
-
- if len(b) <= 5 {
- goto bad
- }
- y = uint64(b[5])
- x += y << 35
- if y < 0x80 {
- return x, 6
- }
- x -= 0x80 << 35
-
- if len(b) <= 6 {
- goto bad
- }
- y = uint64(b[6])
- x += y << 42
- if y < 0x80 {
- return x, 7
- }
- x -= 0x80 << 42
-
- if len(b) <= 7 {
- goto bad
- }
- y = uint64(b[7])
- x += y << 49
- if y < 0x80 {
- return x, 8
- }
- x -= 0x80 << 49
-
- if len(b) <= 8 {
- goto bad
- }
- y = uint64(b[8])
- x += y << 56
- if y < 0x80 {
- return x, 9
- }
- x -= 0x80 << 56
-
- if len(b) <= 9 {
- goto bad
- }
- y = uint64(b[9])
- x += y << 63
- if y < 2 {
- return x, 10
- }
-
-bad:
- return 0, 0
-}
diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go
deleted file mode 100644
index 1aaee725b..000000000
--- a/vendor/github.com/golang/protobuf/proto/text.go
+++ /dev/null
@@ -1,843 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-// Functions for writing the text protocol buffer format.
-
-import (
- "bufio"
- "bytes"
- "encoding"
- "errors"
- "fmt"
- "io"
- "log"
- "math"
- "reflect"
- "sort"
- "strings"
-)
-
-var (
- newline = []byte("\n")
- spaces = []byte(" ")
- endBraceNewline = []byte("}\n")
- backslashN = []byte{'\\', 'n'}
- backslashR = []byte{'\\', 'r'}
- backslashT = []byte{'\\', 't'}
- backslashDQ = []byte{'\\', '"'}
- backslashBS = []byte{'\\', '\\'}
- posInf = []byte("inf")
- negInf = []byte("-inf")
- nan = []byte("nan")
-)
-
-type writer interface {
- io.Writer
- WriteByte(byte) error
-}
-
-// textWriter is an io.Writer that tracks its indentation level.
-type textWriter struct {
- ind int
- complete bool // if the current position is a complete line
- compact bool // whether to write out as a one-liner
- w writer
-}
-
-func (w *textWriter) WriteString(s string) (n int, err error) {
- if !strings.Contains(s, "\n") {
- if !w.compact && w.complete {
- w.writeIndent()
- }
- w.complete = false
- return io.WriteString(w.w, s)
- }
- // WriteString is typically called without newlines, so this
- // codepath and its copy are rare. We copy to avoid
- // duplicating all of Write's logic here.
- return w.Write([]byte(s))
-}
-
-func (w *textWriter) Write(p []byte) (n int, err error) {
- newlines := bytes.Count(p, newline)
- if newlines == 0 {
- if !w.compact && w.complete {
- w.writeIndent()
- }
- n, err = w.w.Write(p)
- w.complete = false
- return n, err
- }
-
- frags := bytes.SplitN(p, newline, newlines+1)
- if w.compact {
- for i, frag := range frags {
- if i > 0 {
- if err := w.w.WriteByte(' '); err != nil {
- return n, err
- }
- n++
- }
- nn, err := w.w.Write(frag)
- n += nn
- if err != nil {
- return n, err
- }
- }
- return n, nil
- }
-
- for i, frag := range frags {
- if w.complete {
- w.writeIndent()
- }
- nn, err := w.w.Write(frag)
- n += nn
- if err != nil {
- return n, err
- }
- if i+1 < len(frags) {
- if err := w.w.WriteByte('\n'); err != nil {
- return n, err
- }
- n++
- }
- }
- w.complete = len(frags[len(frags)-1]) == 0
- return n, nil
-}
-
-func (w *textWriter) WriteByte(c byte) error {
- if w.compact && c == '\n' {
- c = ' '
- }
- if !w.compact && w.complete {
- w.writeIndent()
- }
- err := w.w.WriteByte(c)
- w.complete = c == '\n'
- return err
-}
-
-func (w *textWriter) indent() { w.ind++ }
-
-func (w *textWriter) unindent() {
- if w.ind == 0 {
- log.Print("proto: textWriter unindented too far")
- return
- }
- w.ind--
-}
-
-func writeName(w *textWriter, props *Properties) error {
- if _, err := w.WriteString(props.OrigName); err != nil {
- return err
- }
- if props.Wire != "group" {
- return w.WriteByte(':')
- }
- return nil
-}
-
-func requiresQuotes(u string) bool {
- // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
- for _, ch := range u {
- switch {
- case ch == '.' || ch == '/' || ch == '_':
- continue
- case '0' <= ch && ch <= '9':
- continue
- case 'A' <= ch && ch <= 'Z':
- continue
- case 'a' <= ch && ch <= 'z':
- continue
- default:
- return true
- }
- }
- return false
-}
-
-// isAny reports whether sv is a google.protobuf.Any message
-func isAny(sv reflect.Value) bool {
- type wkt interface {
- XXX_WellKnownType() string
- }
- t, ok := sv.Addr().Interface().(wkt)
- return ok && t.XXX_WellKnownType() == "Any"
-}
-
-// writeProto3Any writes an expanded google.protobuf.Any message.
-//
-// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
-// required messages are not linked in).
-//
-// It returns (true, error) when sv was written in expanded format or an error
-// was encountered.
-func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
- turl := sv.FieldByName("TypeUrl")
- val := sv.FieldByName("Value")
- if !turl.IsValid() || !val.IsValid() {
- return true, errors.New("proto: invalid google.protobuf.Any message")
- }
-
- b, ok := val.Interface().([]byte)
- if !ok {
- return true, errors.New("proto: invalid google.protobuf.Any message")
- }
-
- parts := strings.Split(turl.String(), "/")
- mt := MessageType(parts[len(parts)-1])
- if mt == nil {
- return false, nil
- }
- m := reflect.New(mt.Elem())
- if err := Unmarshal(b, m.Interface().(Message)); err != nil {
- return false, nil
- }
- w.Write([]byte("["))
- u := turl.String()
- if requiresQuotes(u) {
- writeString(w, u)
- } else {
- w.Write([]byte(u))
- }
- if w.compact {
- w.Write([]byte("]:<"))
- } else {
- w.Write([]byte("]: <\n"))
- w.ind++
- }
- if err := tm.writeStruct(w, m.Elem()); err != nil {
- return true, err
- }
- if w.compact {
- w.Write([]byte("> "))
- } else {
- w.ind--
- w.Write([]byte(">\n"))
- }
- return true, nil
-}
-
-func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
- if tm.ExpandAny && isAny(sv) {
- if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
- return err
- }
- }
- st := sv.Type()
- sprops := GetProperties(st)
- for i := 0; i < sv.NumField(); i++ {
- fv := sv.Field(i)
- props := sprops.Prop[i]
- name := st.Field(i).Name
-
- if name == "XXX_NoUnkeyedLiteral" {
- continue
- }
-
- if strings.HasPrefix(name, "XXX_") {
- // There are two XXX_ fields:
- // XXX_unrecognized []byte
- // XXX_extensions map[int32]proto.Extension
- // The first is handled here;
- // the second is handled at the bottom of this function.
- if name == "XXX_unrecognized" && !fv.IsNil() {
- if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
- return err
- }
- }
- continue
- }
- if fv.Kind() == reflect.Ptr && fv.IsNil() {
- // Field not filled in. This could be an optional field or
- // a required field that wasn't filled in. Either way, there
- // isn't anything we can show for it.
- continue
- }
- if fv.Kind() == reflect.Slice && fv.IsNil() {
- // Repeated field that is empty, or a bytes field that is unused.
- continue
- }
-
- if props.Repeated && fv.Kind() == reflect.Slice {
- // Repeated field.
- for j := 0; j < fv.Len(); j++ {
- if err := writeName(w, props); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte(' '); err != nil {
- return err
- }
- }
- v := fv.Index(j)
- if v.Kind() == reflect.Ptr && v.IsNil() {
- // A nil message in a repeated field is not valid,
- // but we can handle that more gracefully than panicking.
- if _, err := w.Write([]byte("<nil>\n")); err != nil {
- return err
- }
- continue
- }
- if err := tm.writeAny(w, v, props); err != nil {
- return err
- }
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- }
- continue
- }
- if fv.Kind() == reflect.Map {
- // Map fields are rendered as a repeated struct with key/value fields.
- keys := fv.MapKeys()
- sort.Sort(mapKeys(keys))
- for _, key := range keys {
- val := fv.MapIndex(key)
- if err := writeName(w, props); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte(' '); err != nil {
- return err
- }
- }
- // open struct
- if err := w.WriteByte('<'); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- }
- w.indent()
- // key
- if _, err := w.WriteString("key:"); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte(' '); err != nil {
- return err
- }
- }
- if err := tm.writeAny(w, key, props.MapKeyProp); err != nil {
- return err
- }
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- // nil values aren't legal, but we can avoid panicking because of them.
- if val.Kind() != reflect.Ptr || !val.IsNil() {
- // value
- if _, err := w.WriteString("value:"); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte(' '); err != nil {
- return err
- }
- }
- if err := tm.writeAny(w, val, props.MapValProp); err != nil {
- return err
- }
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- }
- // close struct
- w.unindent()
- if err := w.WriteByte('>'); err != nil {
- return err
- }
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- }
- continue
- }
- if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
- // empty bytes field
- continue
- }
- if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
- // proto3 non-repeated scalar field; skip if zero value
- if isProto3Zero(fv) {
- continue
- }
- }
-
- if fv.Kind() == reflect.Interface {
- // Check if it is a oneof.
- if st.Field(i).Tag.Get("protobuf_oneof") != "" {
- // fv is nil, or holds a pointer to generated struct.
- // That generated struct has exactly one field,
- // which has a protobuf struct tag.
- if fv.IsNil() {
- continue
- }
- inner := fv.Elem().Elem() // interface -> *T -> T
- tag := inner.Type().Field(0).Tag.Get("protobuf")
- props = new(Properties) // Overwrite the outer props var, but not its pointee.
- props.Parse(tag)
- // Write the value in the oneof, not the oneof itself.
- fv = inner.Field(0)
-
- // Special case to cope with malformed messages gracefully:
- // If the value in the oneof is a nil pointer, don't panic
- // in writeAny.
- if fv.Kind() == reflect.Ptr && fv.IsNil() {
- // Use errors.New so writeAny won't render quotes.
- msg := errors.New("/* nil */")
- fv = reflect.ValueOf(&msg).Elem()
- }
- }
- }
-
- if err := writeName(w, props); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte(' '); err != nil {
- return err
- }
- }
-
- // Enums have a String method, so writeAny will work fine.
- if err := tm.writeAny(w, fv, props); err != nil {
- return err
- }
-
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- }
-
- // Extensions (the XXX_extensions field).
- pv := sv.Addr()
- if _, err := extendable(pv.Interface()); err == nil {
- if err := tm.writeExtensions(w, pv); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// writeAny writes an arbitrary field.
-func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
- v = reflect.Indirect(v)
-
- // Floats have special cases.
- if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
- x := v.Float()
- var b []byte
- switch {
- case math.IsInf(x, 1):
- b = posInf
- case math.IsInf(x, -1):
- b = negInf
- case math.IsNaN(x):
- b = nan
- }
- if b != nil {
- _, err := w.Write(b)
- return err
- }
- // Other values are handled below.
- }
-
- // We don't attempt to serialise every possible value type; only those
- // that can occur in protocol buffers.
- switch v.Kind() {
- case reflect.Slice:
- // Should only be a []byte; repeated fields are handled in writeStruct.
- if err := writeString(w, string(v.Bytes())); err != nil {
- return err
- }
- case reflect.String:
- if err := writeString(w, v.String()); err != nil {
- return err
- }
- case reflect.Struct:
- // Required/optional group/message.
- var bra, ket byte = '<', '>'
- if props != nil && props.Wire == "group" {
- bra, ket = '{', '}'
- }
- if err := w.WriteByte(bra); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- }
- w.indent()
- if v.CanAddr() {
- // Calling v.Interface on a struct causes the reflect package to
- // copy the entire struct. This is racy with the new Marshaler
- // since we atomically update the XXX_sizecache.
- //
- // Thus, we retrieve a pointer to the struct if possible to avoid
- // a race since v.Interface on the pointer doesn't copy the struct.
- //
- // If v is not addressable, then we are not worried about a race
- // since it implies that the binary Marshaler cannot possibly be
- // mutating this value.
- v = v.Addr()
- }
- if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
- text, err := etm.MarshalText()
- if err != nil {
- return err
- }
- if _, err = w.Write(text); err != nil {
- return err
- }
- } else {
- if v.Kind() == reflect.Ptr {
- v = v.Elem()
- }
- if err := tm.writeStruct(w, v); err != nil {
- return err
- }
- }
- w.unindent()
- if err := w.WriteByte(ket); err != nil {
- return err
- }
- default:
- _, err := fmt.Fprint(w, v.Interface())
- return err
- }
- return nil
-}
-
-// equivalent to C's isprint.
-func isprint(c byte) bool {
- return c >= 0x20 && c < 0x7f
-}
-
-// writeString writes a string in the protocol buffer text format.
-// It is similar to strconv.Quote except we don't use Go escape sequences,
-// we treat the string as a byte sequence, and we use octal escapes.
-// These differences are to maintain interoperability with the other
-// languages' implementations of the text format.
-func writeString(w *textWriter, s string) error {
- // use WriteByte here to get any needed indent
- if err := w.WriteByte('"'); err != nil {
- return err
- }
- // Loop over the bytes, not the runes.
- for i := 0; i < len(s); i++ {
- var err error
- // Divergence from C++: we don't escape apostrophes.
- // There's no need to escape them, and the C++ parser
- // copes with a naked apostrophe.
- switch c := s[i]; c {
- case '\n':
- _, err = w.w.Write(backslashN)
- case '\r':
- _, err = w.w.Write(backslashR)
- case '\t':
- _, err = w.w.Write(backslashT)
- case '"':
- _, err = w.w.Write(backslashDQ)
- case '\\':
- _, err = w.w.Write(backslashBS)
- default:
- if isprint(c) {
- err = w.w.WriteByte(c)
- } else {
- _, err = fmt.Fprintf(w.w, "\\%03o", c)
- }
- }
- if err != nil {
- return err
- }
- }
- return w.WriteByte('"')
-}
-
-func writeUnknownStruct(w *textWriter, data []byte) (err error) {
- if !w.compact {
- if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
- return err
- }
- }
- b := NewBuffer(data)
- for b.index < len(b.buf) {
- x, err := b.DecodeVarint()
- if err != nil {
- _, err := fmt.Fprintf(w, "/* %v */\n", err)
- return err
- }
- wire, tag := x&7, x>>3
- if wire == WireEndGroup {
- w.unindent()
- if _, err := w.Write(endBraceNewline); err != nil {
- return err
- }
- continue
- }
- if _, err := fmt.Fprint(w, tag); err != nil {
- return err
- }
- if wire != WireStartGroup {
- if err := w.WriteByte(':'); err != nil {
- return err
- }
- }
- if !w.compact || wire == WireStartGroup {
- if err := w.WriteByte(' '); err != nil {
- return err
- }
- }
- switch wire {
- case WireBytes:
- buf, e := b.DecodeRawBytes(false)
- if e == nil {
- _, err = fmt.Fprintf(w, "%q", buf)
- } else {
- _, err = fmt.Fprintf(w, "/* %v */", e)
- }
- case WireFixed32:
- x, err = b.DecodeFixed32()
- err = writeUnknownInt(w, x, err)
- case WireFixed64:
- x, err = b.DecodeFixed64()
- err = writeUnknownInt(w, x, err)
- case WireStartGroup:
- err = w.WriteByte('{')
- w.indent()
- case WireVarint:
- x, err = b.DecodeVarint()
- err = writeUnknownInt(w, x, err)
- default:
- _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
- }
- if err != nil {
- return err
- }
- if err = w.WriteByte('\n'); err != nil {
- return err
- }
- }
- return nil
-}
-
-func writeUnknownInt(w *textWriter, x uint64, err error) error {
- if err == nil {
- _, err = fmt.Fprint(w, x)
- } else {
- _, err = fmt.Fprintf(w, "/* %v */", err)
- }
- return err
-}
-
-type int32Slice []int32
-
-func (s int32Slice) Len() int { return len(s) }
-func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
-func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-
-// writeExtensions writes all the extensions in pv.
-// pv is assumed to be a pointer to a protocol message struct that is extendable.
-func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
- emap := extensionMaps[pv.Type().Elem()]
- ep, _ := extendable(pv.Interface())
-
- // Order the extensions by ID.
- // This isn't strictly necessary, but it will give us
- // canonical output, which will also make testing easier.
- m, mu := ep.extensionsRead()
- if m == nil {
- return nil
- }
- mu.Lock()
- ids := make([]int32, 0, len(m))
- for id := range m {
- ids = append(ids, id)
- }
- sort.Sort(int32Slice(ids))
- mu.Unlock()
-
- for _, extNum := range ids {
- ext := m[extNum]
- var desc *ExtensionDesc
- if emap != nil {
- desc = emap[extNum]
- }
- if desc == nil {
- // Unknown extension.
- if err := writeUnknownStruct(w, ext.enc); err != nil {
- return err
- }
- continue
- }
-
- pb, err := GetExtension(ep, desc)
- if err != nil {
- return fmt.Errorf("failed getting extension: %v", err)
- }
-
- // Repeated extensions will appear as a slice.
- if !desc.repeated() {
- if err := tm.writeExtension(w, desc.Name, pb); err != nil {
- return err
- }
- } else {
- v := reflect.ValueOf(pb)
- for i := 0; i < v.Len(); i++ {
- if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
- return err
- }
- }
- }
- }
- return nil
-}
-
-func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
- if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte(' '); err != nil {
- return err
- }
- }
- if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
- return err
- }
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- return nil
-}
-
-func (w *textWriter) writeIndent() {
- if !w.complete {
- return
- }
- remain := w.ind * 2
- for remain > 0 {
- n := remain
- if n > len(spaces) {
- n = len(spaces)
- }
- w.w.Write(spaces[:n])
- remain -= n
- }
- w.complete = false
-}
-
-// TextMarshaler is a configurable text format marshaler.
-type TextMarshaler struct {
- Compact bool // use compact text format (one line).
- ExpandAny bool // expand google.protobuf.Any messages of known types
-}
-
-// Marshal writes a given protocol buffer in text format.
-// The only errors returned are from w.
-func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
- val := reflect.ValueOf(pb)
- if pb == nil || val.IsNil() {
- w.Write([]byte("<nil>"))
- return nil
- }
- var bw *bufio.Writer
- ww, ok := w.(writer)
- if !ok {
- bw = bufio.NewWriter(w)
- ww = bw
- }
- aw := &textWriter{
- w: ww,
- complete: true,
- compact: tm.Compact,
- }
-
- if etm, ok := pb.(encoding.TextMarshaler); ok {
- text, err := etm.MarshalText()
- if err != nil {
- return err
- }
- if _, err = aw.Write(text); err != nil {
- return err
- }
- if bw != nil {
- return bw.Flush()
- }
- return nil
- }
- // Dereference the received pointer so we don't have outer < and >.
- v := reflect.Indirect(val)
- if err := tm.writeStruct(aw, v); err != nil {
- return err
- }
- if bw != nil {
- return bw.Flush()
- }
- return nil
-}
-
-// Text is the same as Marshal, but returns the string directly.
-func (tm *TextMarshaler) Text(pb Message) string {
- var buf bytes.Buffer
- tm.Marshal(&buf, pb)
- return buf.String()
-}
-
-var (
- defaultTextMarshaler = TextMarshaler{}
- compactTextMarshaler = TextMarshaler{Compact: true}
-)
-
-// TODO: consider removing some of the Marshal functions below.
-
-// MarshalText writes a given protocol buffer in text format.
-// The only errors returned are from w.
-func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) }
-
-// MarshalTextString is the same as MarshalText, but returns the string directly.
-func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) }
-
-// CompactText writes a given protocol buffer in compact text format (one line).
-func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) }
-
-// CompactTextString is the same as CompactText, but returns the string directly.
-func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }
diff --git a/vendor/github.com/golang/protobuf/proto/text_decode.go b/vendor/github.com/golang/protobuf/proto/text_decode.go
new file mode 100644
index 000000000..4a5931009
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text_decode.go
@@ -0,0 +1,801 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "encoding"
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/encoding/prototext"
+ protoV2 "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+)
+
+const wrapTextUnmarshalV2 = false
+
+// ParseError is returned by UnmarshalText.
+type ParseError struct {
+ Message string
+
+ // Deprecated: Do not use.
+ Line, Offset int
+}
+
+func (e *ParseError) Error() string {
+ if wrapTextUnmarshalV2 {
+ return e.Message
+ }
+ if e.Line == 1 {
+ return fmt.Sprintf("line 1.%d: %v", e.Offset, e.Message)
+ }
+ return fmt.Sprintf("line %d: %v", e.Line, e.Message)
+}
+
+// UnmarshalText parses a proto text formatted string into m.
+func UnmarshalText(s string, m Message) error {
+ if u, ok := m.(encoding.TextUnmarshaler); ok {
+ return u.UnmarshalText([]byte(s))
+ }
+
+ m.Reset()
+ mi := MessageV2(m)
+
+ if wrapTextUnmarshalV2 {
+ err := prototext.UnmarshalOptions{
+ AllowPartial: true,
+ }.Unmarshal([]byte(s), mi)
+ if err != nil {
+ return &ParseError{Message: err.Error()}
+ }
+ return checkRequiredNotSet(mi)
+ } else {
+ if err := newTextParser(s).unmarshalMessage(mi.ProtoReflect(), ""); err != nil {
+ return err
+ }
+ return checkRequiredNotSet(mi)
+ }
+}
+
+type textParser struct {
+ s string // remaining input
+ done bool // whether the parsing is finished (success or error)
+ backed bool // whether back() was called
+ offset, line int
+ cur token
+}
+
+type token struct {
+ value string
+ err *ParseError
+ line int // line number
+ offset int // byte number from start of input, not start of line
+ unquoted string // the unquoted version of value, if it was a quoted string
+}
+
+func newTextParser(s string) *textParser {
+ p := new(textParser)
+ p.s = s
+ p.line = 1
+ p.cur.line = 1
+ return p
+}
+
+func (p *textParser) unmarshalMessage(m protoreflect.Message, terminator string) (err error) {
+ md := m.Descriptor()
+ fds := md.Fields()
+
+ // A struct is a sequence of "name: value", terminated by one of
+ // '>' or '}', or the end of the input. A name may also be
+ // "[extension]" or "[type/url]".
+ //
+ // The whole struct can also be an expanded Any message, like:
+ // [type/url] < ... struct contents ... >
+ seen := make(map[protoreflect.FieldNumber]bool)
+ for {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == terminator {
+ break
+ }
+ if tok.value == "[" {
+ if err := p.unmarshalExtensionOrAny(m, seen); err != nil {
+ return err
+ }
+ continue
+ }
+
+ // This is a normal, non-extension field.
+ name := protoreflect.Name(tok.value)
+ fd := fds.ByName(name)
+ switch {
+ case fd == nil:
+ gd := fds.ByName(protoreflect.Name(strings.ToLower(string(name))))
+ if gd != nil && gd.Kind() == protoreflect.GroupKind && gd.Message().Name() == name {
+ fd = gd
+ }
+ case fd.Kind() == protoreflect.GroupKind && fd.Message().Name() != name:
+ fd = nil
+ case fd.IsWeak() && fd.Message().IsPlaceholder():
+ fd = nil
+ }
+ if fd == nil {
+ typeName := string(md.FullName())
+ if m, ok := m.Interface().(Message); ok {
+ t := reflect.TypeOf(m)
+ if t.Kind() == reflect.Ptr {
+ typeName = t.Elem().String()
+ }
+ }
+ return p.errorf("unknown field name %q in %v", name, typeName)
+ }
+ if od := fd.ContainingOneof(); od != nil && m.WhichOneof(od) != nil {
+ return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, od.Name())
+ }
+ if fd.Cardinality() != protoreflect.Repeated && seen[fd.Number()] {
+ return p.errorf("non-repeated field %q was repeated", fd.Name())
+ }
+ seen[fd.Number()] = true
+
+ // Consume any colon.
+ if err := p.checkForColon(fd); err != nil {
+ return err
+ }
+
+ // Parse into the field.
+ v := m.Get(fd)
+ if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) {
+ v = m.Mutable(fd)
+ }
+ if v, err = p.unmarshalValue(v, fd); err != nil {
+ return err
+ }
+ m.Set(fd, v)
+
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (p *textParser) unmarshalExtensionOrAny(m protoreflect.Message, seen map[protoreflect.FieldNumber]bool) error {
+ name, err := p.consumeExtensionOrAnyName()
+ if err != nil {
+ return err
+ }
+
+ // If it contains a slash, it's an Any type URL.
+ if slashIdx := strings.LastIndex(name, "/"); slashIdx >= 0 {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ // consume an optional colon
+ if tok.value == ":" {
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ }
+
+ var terminator string
+ switch tok.value {
+ case "<":
+ terminator = ">"
+ case "{":
+ terminator = "}"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+
+ mt, err := protoregistry.GlobalTypes.FindMessageByURL(name)
+ if err != nil {
+ return p.errorf("unrecognized message %q in google.protobuf.Any", name[slashIdx+len("/"):])
+ }
+ m2 := mt.New()
+ if err := p.unmarshalMessage(m2, terminator); err != nil {
+ return err
+ }
+ b, err := protoV2.Marshal(m2.Interface())
+ if err != nil {
+ return p.errorf("failed to marshal message of type %q: %v", name[slashIdx+len("/"):], err)
+ }
+
+ urlFD := m.Descriptor().Fields().ByName("type_url")
+ valFD := m.Descriptor().Fields().ByName("value")
+ if seen[urlFD.Number()] {
+ return p.errorf("Any message unpacked multiple times, or %q already set", urlFD.Name())
+ }
+ if seen[valFD.Number()] {
+ return p.errorf("Any message unpacked multiple times, or %q already set", valFD.Name())
+ }
+ m.Set(urlFD, protoreflect.ValueOfString(name))
+ m.Set(valFD, protoreflect.ValueOfBytes(b))
+ seen[urlFD.Number()] = true
+ seen[valFD.Number()] = true
+ return nil
+ }
+
+ xname := protoreflect.FullName(name)
+ xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname)
+ if xt == nil && isMessageSet(m.Descriptor()) {
+ xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension"))
+ }
+ if xt == nil {
+ return p.errorf("unrecognized extension %q", name)
+ }
+ fd := xt.TypeDescriptor()
+ if fd.ContainingMessage().FullName() != m.Descriptor().FullName() {
+ return p.errorf("extension field %q does not extend message %q", name, m.Descriptor().FullName())
+ }
+
+ if err := p.checkForColon(fd); err != nil {
+ return err
+ }
+
+ v := m.Get(fd)
+ if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) {
+ v = m.Mutable(fd)
+ }
+ v, err = p.unmarshalValue(v, fd)
+ if err != nil {
+ return err
+ }
+ m.Set(fd, v)
+ return p.consumeOptionalSeparator()
+}
+
+func (p *textParser) unmarshalValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
+ tok := p.next()
+ if tok.err != nil {
+ return v, tok.err
+ }
+ if tok.value == "" {
+ return v, p.errorf("unexpected EOF")
+ }
+
+ switch {
+ case fd.IsList():
+ lv := v.List()
+ var err error
+ if tok.value == "[" {
+ // Repeated field with list notation, like [1,2,3].
+ for {
+ vv := lv.NewElement()
+ vv, err = p.unmarshalSingularValue(vv, fd)
+ if err != nil {
+ return v, err
+ }
+ lv.Append(vv)
+
+ tok := p.next()
+ if tok.err != nil {
+ return v, tok.err
+ }
+ if tok.value == "]" {
+ break
+ }
+ if tok.value != "," {
+ return v, p.errorf("Expected ']' or ',' found %q", tok.value)
+ }
+ }
+ return v, nil
+ }
+
+ // One value of the repeated field.
+ p.back()
+ vv := lv.NewElement()
+ vv, err = p.unmarshalSingularValue(vv, fd)
+ if err != nil {
+ return v, err
+ }
+ lv.Append(vv)
+ return v, nil
+ case fd.IsMap():
+ // The map entry should be this sequence of tokens:
+ // < key : KEY value : VALUE >
+ // However, implementations may omit key or value, and technically
+ // we should support them in any order.
+ var terminator string
+ switch tok.value {
+ case "<":
+ terminator = ">"
+ case "{":
+ terminator = "}"
+ default:
+ return v, p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+
+ keyFD := fd.MapKey()
+ valFD := fd.MapValue()
+
+ mv := v.Map()
+ kv := keyFD.Default()
+ vv := mv.NewValue()
+ for {
+ tok := p.next()
+ if tok.err != nil {
+ return v, tok.err
+ }
+ if tok.value == terminator {
+ break
+ }
+ var err error
+ switch tok.value {
+ case "key":
+ if err := p.consumeToken(":"); err != nil {
+ return v, err
+ }
+ if kv, err = p.unmarshalSingularValue(kv, keyFD); err != nil {
+ return v, err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return v, err
+ }
+ case "value":
+ if err := p.checkForColon(valFD); err != nil {
+ return v, err
+ }
+ if vv, err = p.unmarshalSingularValue(vv, valFD); err != nil {
+ return v, err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return v, err
+ }
+ default:
+ p.back()
+ return v, p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
+ }
+ }
+ mv.Set(kv.MapKey(), vv)
+ return v, nil
+ default:
+ p.back()
+ return p.unmarshalSingularValue(v, fd)
+ }
+}
+
+func (p *textParser) unmarshalSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
+ tok := p.next()
+ if tok.err != nil {
+ return v, tok.err
+ }
+ if tok.value == "" {
+ return v, p.errorf("unexpected EOF")
+ }
+
+ switch fd.Kind() {
+ case protoreflect.BoolKind:
+ switch tok.value {
+ case "true", "1", "t", "True":
+ return protoreflect.ValueOfBool(true), nil
+ case "false", "0", "f", "False":
+ return protoreflect.ValueOfBool(false), nil
+ }
+ case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
+ if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
+ return protoreflect.ValueOfInt32(int32(x)), nil
+ }
+
+ // The C++ parser accepts large positive hex numbers that uses
+ // two's complement arithmetic to represent negative numbers.
+ // This feature is here for backwards compatibility with C++.
+ if strings.HasPrefix(tok.value, "0x") {
+ if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
+ return protoreflect.ValueOfInt32(int32(-(int64(^x) + 1))), nil
+ }
+ }
+ case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
+ if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
+ return protoreflect.ValueOfInt64(int64(x)), nil
+ }
+
+ // The C++ parser accepts large positive hex numbers that uses
+ // two's complement arithmetic to represent negative numbers.
+ // This feature is here for backwards compatibility with C++.
+ if strings.HasPrefix(tok.value, "0x") {
+ if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
+ return protoreflect.ValueOfInt64(int64(-(int64(^x) + 1))), nil
+ }
+ }
+ case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
+ if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
+ return protoreflect.ValueOfUint32(uint32(x)), nil
+ }
+ case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
+ if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
+ return protoreflect.ValueOfUint64(uint64(x)), nil
+ }
+ case protoreflect.FloatKind:
+ // Ignore 'f' for compatibility with output generated by C++,
+ // but don't remove 'f' when the value is "-inf" or "inf".
+ v := tok.value
+ if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" {
+ v = v[:len(v)-len("f")]
+ }
+ if x, err := strconv.ParseFloat(v, 32); err == nil {
+ return protoreflect.ValueOfFloat32(float32(x)), nil
+ }
+ case protoreflect.DoubleKind:
+ // Ignore 'f' for compatibility with output generated by C++,
+ // but don't remove 'f' when the value is "-inf" or "inf".
+ v := tok.value
+ if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" {
+ v = v[:len(v)-len("f")]
+ }
+ if x, err := strconv.ParseFloat(v, 64); err == nil {
+ return protoreflect.ValueOfFloat64(float64(x)), nil
+ }
+ case protoreflect.StringKind:
+ if isQuote(tok.value[0]) {
+ return protoreflect.ValueOfString(tok.unquoted), nil
+ }
+ case protoreflect.BytesKind:
+ if isQuote(tok.value[0]) {
+ return protoreflect.ValueOfBytes([]byte(tok.unquoted)), nil
+ }
+ case protoreflect.EnumKind:
+ if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
+ return protoreflect.ValueOfEnum(protoreflect.EnumNumber(x)), nil
+ }
+ vd := fd.Enum().Values().ByName(protoreflect.Name(tok.value))
+ if vd != nil {
+ return protoreflect.ValueOfEnum(vd.Number()), nil
+ }
+ case protoreflect.MessageKind, protoreflect.GroupKind:
+ var terminator string
+ switch tok.value {
+ case "{":
+ terminator = "}"
+ case "<":
+ terminator = ">"
+ default:
+ return v, p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ err := p.unmarshalMessage(v.Message(), terminator)
+ return v, err
+ default:
+ panic(fmt.Sprintf("invalid kind %v", fd.Kind()))
+ }
+ return v, p.errorf("invalid %v: %v", fd.Kind(), tok.value)
+}
+
+// Consume a ':' from the input stream (if the next token is a colon),
+// returning an error if a colon is needed but not present.
+func (p *textParser) checkForColon(fd protoreflect.FieldDescriptor) *ParseError {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ":" {
+ if fd.Message() == nil {
+ return p.errorf("expected ':', found %q", tok.value)
+ }
+ p.back()
+ }
+ return nil
+}
+
+// consumeExtensionOrAnyName consumes an extension name or an Any type URL and
+// the following ']'. It returns the name or URL consumed.
+func (p *textParser) consumeExtensionOrAnyName() (string, error) {
+ tok := p.next()
+ if tok.err != nil {
+ return "", tok.err
+ }
+
+ // If extension name or type url is quoted, it's a single token.
+ if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
+ name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
+ if err != nil {
+ return "", err
+ }
+ return name, p.consumeToken("]")
+ }
+
+ // Consume everything up to "]"
+ var parts []string
+ for tok.value != "]" {
+ parts = append(parts, tok.value)
+ tok = p.next()
+ if tok.err != nil {
+ return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
+ }
+ if p.done && tok.value != "]" {
+ return "", p.errorf("unclosed type_url or extension name")
+ }
+ }
+ return strings.Join(parts, ""), nil
+}
+
+// consumeOptionalSeparator consumes an optional semicolon or comma.
+// It is used in unmarshalMessage to provide backward compatibility.
+func (p *textParser) consumeOptionalSeparator() error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ";" && tok.value != "," {
+ p.back()
+ }
+ return nil
+}
+
+func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
+ pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
+ p.cur.err = pe
+ p.done = true
+ return pe
+}
+
+func (p *textParser) skipWhitespace() {
+ i := 0
+ for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
+ if p.s[i] == '#' {
+ // comment; skip to end of line or input
+ for i < len(p.s) && p.s[i] != '\n' {
+ i++
+ }
+ if i == len(p.s) {
+ break
+ }
+ }
+ if p.s[i] == '\n' {
+ p.line++
+ }
+ i++
+ }
+ p.offset += i
+ p.s = p.s[i:len(p.s)]
+ if len(p.s) == 0 {
+ p.done = true
+ }
+}
+
+func (p *textParser) advance() {
+ // Skip whitespace
+ p.skipWhitespace()
+ if p.done {
+ return
+ }
+
+ // Start of non-whitespace
+ p.cur.err = nil
+ p.cur.offset, p.cur.line = p.offset, p.line
+ p.cur.unquoted = ""
+ switch p.s[0] {
+ case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
+ // Single symbol
+ p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
+ case '"', '\'':
+ // Quoted string
+ i := 1
+ for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
+ if p.s[i] == '\\' && i+1 < len(p.s) {
+ // skip escaped char
+ i++
+ }
+ i++
+ }
+ if i >= len(p.s) || p.s[i] != p.s[0] {
+ p.errorf("unmatched quote")
+ return
+ }
+ unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
+ if err != nil {
+ p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
+ return
+ }
+ p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
+ p.cur.unquoted = unq
+ default:
+ i := 0
+ for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
+ i++
+ }
+ if i == 0 {
+ p.errorf("unexpected byte %#x", p.s[0])
+ return
+ }
+ p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
+ }
+ p.offset += len(p.cur.value)
+}
+
+// Back off the parser by one token. Can only be done between calls to next().
+// It makes the next advance() a no-op.
+func (p *textParser) back() { p.backed = true }
+
+// Advances the parser and returns the new current token.
+func (p *textParser) next() *token {
+ if p.backed || p.done {
+ p.backed = false
+ return &p.cur
+ }
+ p.advance()
+ if p.done {
+ p.cur.value = ""
+ } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
+ // Look for multiple quoted strings separated by whitespace,
+ // and concatenate them.
+ cat := p.cur
+ for {
+ p.skipWhitespace()
+ if p.done || !isQuote(p.s[0]) {
+ break
+ }
+ p.advance()
+ if p.cur.err != nil {
+ return &p.cur
+ }
+ cat.value += " " + p.cur.value
+ cat.unquoted += p.cur.unquoted
+ }
+ p.done = false // parser may have seen EOF, but we want to return cat
+ p.cur = cat
+ }
+ return &p.cur
+}
+
+func (p *textParser) consumeToken(s string) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != s {
+ p.back()
+ return p.errorf("expected %q, found %q", s, tok.value)
+ }
+ return nil
+}
+
+var errBadUTF8 = errors.New("proto: bad UTF-8")
+
+func unquoteC(s string, quote rune) (string, error) {
+ // This is based on C++'s tokenizer.cc.
+ // Despite its name, this is *not* parsing C syntax.
+ // For instance, "\0" is an invalid quoted string.
+
+ // Avoid allocation in trivial cases.
+ simple := true
+ for _, r := range s {
+ if r == '\\' || r == quote {
+ simple = false
+ break
+ }
+ }
+ if simple {
+ return s, nil
+ }
+
+ buf := make([]byte, 0, 3*len(s)/2)
+ for len(s) > 0 {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", errBadUTF8
+ }
+ s = s[n:]
+ if r != '\\' {
+ if r < utf8.RuneSelf {
+ buf = append(buf, byte(r))
+ } else {
+ buf = append(buf, string(r)...)
+ }
+ continue
+ }
+
+ ch, tail, err := unescape(s)
+ if err != nil {
+ return "", err
+ }
+ buf = append(buf, ch...)
+ s = tail
+ }
+ return string(buf), nil
+}
+
+func unescape(s string) (ch string, tail string, err error) {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", "", errBadUTF8
+ }
+ s = s[n:]
+ switch r {
+ case 'a':
+ return "\a", s, nil
+ case 'b':
+ return "\b", s, nil
+ case 'f':
+ return "\f", s, nil
+ case 'n':
+ return "\n", s, nil
+ case 'r':
+ return "\r", s, nil
+ case 't':
+ return "\t", s, nil
+ case 'v':
+ return "\v", s, nil
+ case '?':
+ return "?", s, nil // trigraph workaround
+ case '\'', '"', '\\':
+ return string(r), s, nil
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ if len(s) < 2 {
+ return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
+ }
+ ss := string(r) + s[:2]
+ s = s[2:]
+ i, err := strconv.ParseUint(ss, 8, 8)
+ if err != nil {
+ return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
+ }
+ return string([]byte{byte(i)}), s, nil
+ case 'x', 'X', 'u', 'U':
+ var n int
+ switch r {
+ case 'x', 'X':
+ n = 2
+ case 'u':
+ n = 4
+ case 'U':
+ n = 8
+ }
+ if len(s) < n {
+ return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
+ }
+ ss := s[:n]
+ s = s[n:]
+ i, err := strconv.ParseUint(ss, 16, 64)
+ if err != nil {
+ return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
+ }
+ if r == 'x' || r == 'X' {
+ return string([]byte{byte(i)}), s, nil
+ }
+ if i > utf8.MaxRune {
+ return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
+ }
+ return string(i), s, nil
+ }
+ return "", "", fmt.Errorf(`unknown escape \%c`, r)
+}
+
+func isIdentOrNumberChar(c byte) bool {
+ switch {
+ case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
+ return true
+ case '0' <= c && c <= '9':
+ return true
+ }
+ switch c {
+ case '-', '+', '.', '_':
+ return true
+ }
+ return false
+}
+
+func isWhitespace(c byte) bool {
+ switch c {
+ case ' ', '\t', '\n', '\r':
+ return true
+ }
+ return false
+}
+
+func isQuote(c byte) bool {
+ switch c {
+ case '"', '\'':
+ return true
+ }
+ return false
+}
diff --git a/vendor/github.com/golang/protobuf/proto/text_encode.go b/vendor/github.com/golang/protobuf/proto/text_encode.go
new file mode 100644
index 000000000..a31134eeb
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text_encode.go
@@ -0,0 +1,560 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "bytes"
+ "encoding"
+ "fmt"
+ "io"
+ "math"
+ "sort"
+ "strings"
+
+ "google.golang.org/protobuf/encoding/prototext"
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+)
+
+const wrapTextMarshalV2 = false
+
+// TextMarshaler is a configurable text format marshaler.
+type TextMarshaler struct {
+ Compact bool // use compact text format (one line)
+ ExpandAny bool // expand google.protobuf.Any messages of known types
+}
+
+// Marshal writes the proto text format of m to w.
+func (tm *TextMarshaler) Marshal(w io.Writer, m Message) error {
+ b, err := tm.marshal(m)
+ if len(b) > 0 {
+ if _, err := w.Write(b); err != nil {
+ return err
+ }
+ }
+ return err
+}
+
+// Text returns a proto text formatted string of m.
+func (tm *TextMarshaler) Text(m Message) string {
+ b, _ := tm.marshal(m)
+ return string(b)
+}
+
+func (tm *TextMarshaler) marshal(m Message) ([]byte, error) {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() {
+ return []byte("<nil>"), nil
+ }
+
+ if wrapTextMarshalV2 {
+ if m, ok := m.(encoding.TextMarshaler); ok {
+ return m.MarshalText()
+ }
+
+ opts := prototext.MarshalOptions{
+ AllowPartial: true,
+ EmitUnknown: true,
+ }
+ if !tm.Compact {
+ opts.Indent = " "
+ }
+ if !tm.ExpandAny {
+ opts.Resolver = (*protoregistry.Types)(nil)
+ }
+ return opts.Marshal(mr.Interface())
+ } else {
+ w := &textWriter{
+ compact: tm.Compact,
+ expandAny: tm.ExpandAny,
+ complete: true,
+ }
+
+ if m, ok := m.(encoding.TextMarshaler); ok {
+ b, err := m.MarshalText()
+ if err != nil {
+ return nil, err
+ }
+ w.Write(b)
+ return w.buf, nil
+ }
+
+ err := w.writeMessage(mr)
+ return w.buf, err
+ }
+}
+
+var (
+ defaultTextMarshaler = TextMarshaler{}
+ compactTextMarshaler = TextMarshaler{Compact: true}
+)
+
+// MarshalText writes the proto text format of m to w.
+func MarshalText(w io.Writer, m Message) error { return defaultTextMarshaler.Marshal(w, m) }
+
+// MarshalTextString returns a proto text formatted string of m.
+func MarshalTextString(m Message) string { return defaultTextMarshaler.Text(m) }
+
+// CompactText writes the compact proto text format of m to w.
+func CompactText(w io.Writer, m Message) error { return compactTextMarshaler.Marshal(w, m) }
+
+// CompactTextString returns a compact proto text formatted string of m.
+func CompactTextString(m Message) string { return compactTextMarshaler.Text(m) }
+
+var (
+ newline = []byte("\n")
+ endBraceNewline = []byte("}\n")
+ posInf = []byte("inf")
+ negInf = []byte("-inf")
+ nan = []byte("nan")
+)
+
+// textWriter is an io.Writer that tracks its indentation level.
+type textWriter struct {
+ compact bool // same as TextMarshaler.Compact
+ expandAny bool // same as TextMarshaler.ExpandAny
+ complete bool // whether the current position is a complete line
+ indent int // indentation level; never negative
+ buf []byte
+}
+
+func (w *textWriter) Write(p []byte) (n int, _ error) {
+ newlines := bytes.Count(p, newline)
+ if newlines == 0 {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ w.buf = append(w.buf, p...)
+ w.complete = false
+ return len(p), nil
+ }
+
+ frags := bytes.SplitN(p, newline, newlines+1)
+ if w.compact {
+ for i, frag := range frags {
+ if i > 0 {
+ w.buf = append(w.buf, ' ')
+ n++
+ }
+ w.buf = append(w.buf, frag...)
+ n += len(frag)
+ }
+ return n, nil
+ }
+
+ for i, frag := range frags {
+ if w.complete {
+ w.writeIndent()
+ }
+ w.buf = append(w.buf, frag...)
+ n += len(frag)
+ if i+1 < len(frags) {
+ w.buf = append(w.buf, '\n')
+ n++
+ }
+ }
+ w.complete = len(frags[len(frags)-1]) == 0
+ return n, nil
+}
+
+func (w *textWriter) WriteByte(c byte) error {
+ if w.compact && c == '\n' {
+ c = ' '
+ }
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ w.buf = append(w.buf, c)
+ w.complete = c == '\n'
+ return nil
+}
+
+func (w *textWriter) writeName(fd protoreflect.FieldDescriptor) {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ w.complete = false
+
+ if fd.Kind() != protoreflect.GroupKind {
+ w.buf = append(w.buf, fd.Name()...)
+ w.WriteByte(':')
+ } else {
+ // Use message type name for group field name.
+ w.buf = append(w.buf, fd.Message().Name()...)
+ }
+
+ if !w.compact {
+ w.WriteByte(' ')
+ }
+}
+
+func requiresQuotes(u string) bool {
+ // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
+ for _, ch := range u {
+ switch {
+ case ch == '.' || ch == '/' || ch == '_':
+ continue
+ case '0' <= ch && ch <= '9':
+ continue
+ case 'A' <= ch && ch <= 'Z':
+ continue
+ case 'a' <= ch && ch <= 'z':
+ continue
+ default:
+ return true
+ }
+ }
+ return false
+}
+
+// writeProto3Any writes an expanded google.protobuf.Any message.
+//
+// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
+// required messages are not linked in).
+//
+// It returns (true, error) when sv was written in expanded format or an error
+// was encountered.
+func (w *textWriter) writeProto3Any(m protoreflect.Message) (bool, error) {
+ md := m.Descriptor()
+ fdURL := md.Fields().ByName("type_url")
+ fdVal := md.Fields().ByName("value")
+
+ url := m.Get(fdURL).String()
+ mt, err := protoregistry.GlobalTypes.FindMessageByURL(url)
+ if err != nil {
+ return false, nil
+ }
+
+ b := m.Get(fdVal).Bytes()
+ m2 := mt.New()
+ if err := proto.Unmarshal(b, m2.Interface()); err != nil {
+ return false, nil
+ }
+ w.Write([]byte("["))
+ if requiresQuotes(url) {
+ w.writeQuotedString(url)
+ } else {
+ w.Write([]byte(url))
+ }
+ if w.compact {
+ w.Write([]byte("]:<"))
+ } else {
+ w.Write([]byte("]: <\n"))
+ w.indent++
+ }
+ if err := w.writeMessage(m2); err != nil {
+ return true, err
+ }
+ if w.compact {
+ w.Write([]byte("> "))
+ } else {
+ w.indent--
+ w.Write([]byte(">\n"))
+ }
+ return true, nil
+}
+
+func (w *textWriter) writeMessage(m protoreflect.Message) error {
+ md := m.Descriptor()
+ if w.expandAny && md.FullName() == "google.protobuf.Any" {
+ if canExpand, err := w.writeProto3Any(m); canExpand {
+ return err
+ }
+ }
+
+ fds := md.Fields()
+ for i := 0; i < fds.Len(); {
+ fd := fds.Get(i)
+ if od := fd.ContainingOneof(); od != nil {
+ fd = m.WhichOneof(od)
+ i += od.Fields().Len()
+ } else {
+ i++
+ }
+ if fd == nil || !m.Has(fd) {
+ continue
+ }
+
+ switch {
+ case fd.IsList():
+ lv := m.Get(fd).List()
+ for j := 0; j < lv.Len(); j++ {
+ w.writeName(fd)
+ v := lv.Get(j)
+ if err := w.writeSingularValue(v, fd); err != nil {
+ return err
+ }
+ w.WriteByte('\n')
+ }
+ case fd.IsMap():
+ kfd := fd.MapKey()
+ vfd := fd.MapValue()
+ mv := m.Get(fd).Map()
+
+ type entry struct{ key, val protoreflect.Value }
+ var entries []entry
+ mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
+ entries = append(entries, entry{k.Value(), v})
+ return true
+ })
+ sort.Slice(entries, func(i, j int) bool {
+ switch kfd.Kind() {
+ case protoreflect.BoolKind:
+ return !entries[i].key.Bool() && entries[j].key.Bool()
+ case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
+ return entries[i].key.Int() < entries[j].key.Int()
+ case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
+ return entries[i].key.Uint() < entries[j].key.Uint()
+ case protoreflect.StringKind:
+ return entries[i].key.String() < entries[j].key.String()
+ default:
+ panic("invalid kind")
+ }
+ })
+ for _, entry := range entries {
+ w.writeName(fd)
+ w.WriteByte('<')
+ if !w.compact {
+ w.WriteByte('\n')
+ }
+ w.indent++
+ w.writeName(kfd)
+ if err := w.writeSingularValue(entry.key, kfd); err != nil {
+ return err
+ }
+ w.WriteByte('\n')
+ w.writeName(vfd)
+ if err := w.writeSingularValue(entry.val, vfd); err != nil {
+ return err
+ }
+ w.WriteByte('\n')
+ w.indent--
+ w.WriteByte('>')
+ w.WriteByte('\n')
+ }
+ default:
+ w.writeName(fd)
+ if err := w.writeSingularValue(m.Get(fd), fd); err != nil {
+ return err
+ }
+ w.WriteByte('\n')
+ }
+ }
+
+ if b := m.GetUnknown(); len(b) > 0 {
+ w.writeUnknownFields(b)
+ }
+ return w.writeExtensions(m)
+}
+
+func (w *textWriter) writeSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) error {
+ switch fd.Kind() {
+ case protoreflect.FloatKind, protoreflect.DoubleKind:
+ switch vf := v.Float(); {
+ case math.IsInf(vf, +1):
+ w.Write(posInf)
+ case math.IsInf(vf, -1):
+ w.Write(negInf)
+ case math.IsNaN(vf):
+ w.Write(nan)
+ default:
+ fmt.Fprint(w, v.Interface())
+ }
+ case protoreflect.StringKind:
+ // NOTE: This does not validate UTF-8 for historical reasons.
+ w.writeQuotedString(string(v.String()))
+ case protoreflect.BytesKind:
+ w.writeQuotedString(string(v.Bytes()))
+ case protoreflect.MessageKind, protoreflect.GroupKind:
+ var bra, ket byte = '<', '>'
+ if fd.Kind() == protoreflect.GroupKind {
+ bra, ket = '{', '}'
+ }
+ w.WriteByte(bra)
+ if !w.compact {
+ w.WriteByte('\n')
+ }
+ w.indent++
+ m := v.Message()
+ if m2, ok := m.Interface().(encoding.TextMarshaler); ok {
+ b, err := m2.MarshalText()
+ if err != nil {
+ return err
+ }
+ w.Write(b)
+ } else {
+ w.writeMessage(m)
+ }
+ w.indent--
+ w.WriteByte(ket)
+ case protoreflect.EnumKind:
+ if ev := fd.Enum().Values().ByNumber(v.Enum()); ev != nil {
+ fmt.Fprint(w, ev.Name())
+ } else {
+ fmt.Fprint(w, v.Enum())
+ }
+ default:
+ fmt.Fprint(w, v.Interface())
+ }
+ return nil
+}
+
+// writeQuotedString writes a quoted string in the protocol buffer text format.
+func (w *textWriter) writeQuotedString(s string) {
+ w.WriteByte('"')
+ for i := 0; i < len(s); i++ {
+ switch c := s[i]; c {
+ case '\n':
+ w.buf = append(w.buf, `\n`...)
+ case '\r':
+ w.buf = append(w.buf, `\r`...)
+ case '\t':
+ w.buf = append(w.buf, `\t`...)
+ case '"':
+ w.buf = append(w.buf, `\"`...)
+ case '\\':
+ w.buf = append(w.buf, `\\`...)
+ default:
+ if isPrint := c >= 0x20 && c < 0x7f; isPrint {
+ w.buf = append(w.buf, c)
+ } else {
+ w.buf = append(w.buf, fmt.Sprintf(`\%03o`, c)...)
+ }
+ }
+ }
+ w.WriteByte('"')
+}
+
+func (w *textWriter) writeUnknownFields(b []byte) {
+ if !w.compact {
+ fmt.Fprintf(w, "/* %d unknown bytes */\n", len(b))
+ }
+
+ for len(b) > 0 {
+ num, wtyp, n := protowire.ConsumeTag(b)
+ if n < 0 {
+ return
+ }
+ b = b[n:]
+
+ if wtyp == protowire.EndGroupType {
+ w.indent--
+ w.Write(endBraceNewline)
+ continue
+ }
+ fmt.Fprint(w, num)
+ if wtyp != protowire.StartGroupType {
+ w.WriteByte(':')
+ }
+ if !w.compact || wtyp == protowire.StartGroupType {
+ w.WriteByte(' ')
+ }
+ switch wtyp {
+ case protowire.VarintType:
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return
+ }
+ b = b[n:]
+ fmt.Fprint(w, v)
+ case protowire.Fixed32Type:
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return
+ }
+ b = b[n:]
+ fmt.Fprint(w, v)
+ case protowire.Fixed64Type:
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return
+ }
+ b = b[n:]
+ fmt.Fprint(w, v)
+ case protowire.BytesType:
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return
+ }
+ b = b[n:]
+ fmt.Fprintf(w, "%q", v)
+ case protowire.StartGroupType:
+ w.WriteByte('{')
+ w.indent++
+ default:
+ fmt.Fprintf(w, "/* unknown wire type %d */", wtyp)
+ }
+ w.WriteByte('\n')
+ }
+}
+
+// writeExtensions writes all the extensions in m.
+func (w *textWriter) writeExtensions(m protoreflect.Message) error {
+ md := m.Descriptor()
+ if md.ExtensionRanges().Len() == 0 {
+ return nil
+ }
+
+ type ext struct {
+ desc protoreflect.FieldDescriptor
+ val protoreflect.Value
+ }
+ var exts []ext
+ m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+ if fd.IsExtension() {
+ exts = append(exts, ext{fd, v})
+ }
+ return true
+ })
+ sort.Slice(exts, func(i, j int) bool {
+ return exts[i].desc.Number() < exts[j].desc.Number()
+ })
+
+ for _, ext := range exts {
+ // For message set, use the name of the message as the extension name.
+ name := string(ext.desc.FullName())
+ if isMessageSet(ext.desc.ContainingMessage()) {
+ name = strings.TrimSuffix(name, ".message_set_extension")
+ }
+
+ if !ext.desc.IsList() {
+ if err := w.writeSingularExtension(name, ext.val, ext.desc); err != nil {
+ return err
+ }
+ } else {
+ lv := ext.val.List()
+ for i := 0; i < lv.Len(); i++ {
+ if err := w.writeSingularExtension(name, lv.Get(i), ext.desc); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func (w *textWriter) writeSingularExtension(name string, v protoreflect.Value, fd protoreflect.FieldDescriptor) error {
+ fmt.Fprintf(w, "[%s]:", name)
+ if !w.compact {
+ w.WriteByte(' ')
+ }
+ if err := w.writeSingularValue(v, fd); err != nil {
+ return err
+ }
+ w.WriteByte('\n')
+ return nil
+}
+
+func (w *textWriter) writeIndent() {
+ if !w.complete {
+ return
+ }
+ for i := 0; i < w.indent*2; i++ {
+ w.buf = append(w.buf, ' ')
+ }
+ w.complete = false
+}
diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go
deleted file mode 100644
index bb55a3af2..000000000
--- a/vendor/github.com/golang/protobuf/proto/text_parser.go
+++ /dev/null
@@ -1,880 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-// Functions for parsing the Text protocol buffer format.
-// TODO: message sets.
-
-import (
- "encoding"
- "errors"
- "fmt"
- "reflect"
- "strconv"
- "strings"
- "unicode/utf8"
-)
-
-// Error string emitted when deserializing Any and fields are already set
-const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set"
-
-type ParseError struct {
- Message string
- Line int // 1-based line number
- Offset int // 0-based byte offset from start of input
-}
-
-func (p *ParseError) Error() string {
- if p.Line == 1 {
- // show offset only for first line
- return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
- }
- return fmt.Sprintf("line %d: %v", p.Line, p.Message)
-}
-
-type token struct {
- value string
- err *ParseError
- line int // line number
- offset int // byte number from start of input, not start of line
- unquoted string // the unquoted version of value, if it was a quoted string
-}
-
-func (t *token) String() string {
- if t.err == nil {
- return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
- }
- return fmt.Sprintf("parse error: %v", t.err)
-}
-
-type textParser struct {
- s string // remaining input
- done bool // whether the parsing is finished (success or error)
- backed bool // whether back() was called
- offset, line int
- cur token
-}
-
-func newTextParser(s string) *textParser {
- p := new(textParser)
- p.s = s
- p.line = 1
- p.cur.line = 1
- return p
-}
-
-func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
- pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
- p.cur.err = pe
- p.done = true
- return pe
-}
-
-// Numbers and identifiers are matched by [-+._A-Za-z0-9]
-func isIdentOrNumberChar(c byte) bool {
- switch {
- case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
- return true
- case '0' <= c && c <= '9':
- return true
- }
- switch c {
- case '-', '+', '.', '_':
- return true
- }
- return false
-}
-
-func isWhitespace(c byte) bool {
- switch c {
- case ' ', '\t', '\n', '\r':
- return true
- }
- return false
-}
-
-func isQuote(c byte) bool {
- switch c {
- case '"', '\'':
- return true
- }
- return false
-}
-
-func (p *textParser) skipWhitespace() {
- i := 0
- for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
- if p.s[i] == '#' {
- // comment; skip to end of line or input
- for i < len(p.s) && p.s[i] != '\n' {
- i++
- }
- if i == len(p.s) {
- break
- }
- }
- if p.s[i] == '\n' {
- p.line++
- }
- i++
- }
- p.offset += i
- p.s = p.s[i:len(p.s)]
- if len(p.s) == 0 {
- p.done = true
- }
-}
-
-func (p *textParser) advance() {
- // Skip whitespace
- p.skipWhitespace()
- if p.done {
- return
- }
-
- // Start of non-whitespace
- p.cur.err = nil
- p.cur.offset, p.cur.line = p.offset, p.line
- p.cur.unquoted = ""
- switch p.s[0] {
- case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
- // Single symbol
- p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
- case '"', '\'':
- // Quoted string
- i := 1
- for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
- if p.s[i] == '\\' && i+1 < len(p.s) {
- // skip escaped char
- i++
- }
- i++
- }
- if i >= len(p.s) || p.s[i] != p.s[0] {
- p.errorf("unmatched quote")
- return
- }
- unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
- if err != nil {
- p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
- return
- }
- p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
- p.cur.unquoted = unq
- default:
- i := 0
- for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
- i++
- }
- if i == 0 {
- p.errorf("unexpected byte %#x", p.s[0])
- return
- }
- p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
- }
- p.offset += len(p.cur.value)
-}
-
-var (
- errBadUTF8 = errors.New("proto: bad UTF-8")
-)
-
-func unquoteC(s string, quote rune) (string, error) {
- // This is based on C++'s tokenizer.cc.
- // Despite its name, this is *not* parsing C syntax.
- // For instance, "\0" is an invalid quoted string.
-
- // Avoid allocation in trivial cases.
- simple := true
- for _, r := range s {
- if r == '\\' || r == quote {
- simple = false
- break
- }
- }
- if simple {
- return s, nil
- }
-
- buf := make([]byte, 0, 3*len(s)/2)
- for len(s) > 0 {
- r, n := utf8.DecodeRuneInString(s)
- if r == utf8.RuneError && n == 1 {
- return "", errBadUTF8
- }
- s = s[n:]
- if r != '\\' {
- if r < utf8.RuneSelf {
- buf = append(buf, byte(r))
- } else {
- buf = append(buf, string(r)...)
- }
- continue
- }
-
- ch, tail, err := unescape(s)
- if err != nil {
- return "", err
- }
- buf = append(buf, ch...)
- s = tail
- }
- return string(buf), nil
-}
-
-func unescape(s string) (ch string, tail string, err error) {
- r, n := utf8.DecodeRuneInString(s)
- if r == utf8.RuneError && n == 1 {
- return "", "", errBadUTF8
- }
- s = s[n:]
- switch r {
- case 'a':
- return "\a", s, nil
- case 'b':
- return "\b", s, nil
- case 'f':
- return "\f", s, nil
- case 'n':
- return "\n", s, nil
- case 'r':
- return "\r", s, nil
- case 't':
- return "\t", s, nil
- case 'v':
- return "\v", s, nil
- case '?':
- return "?", s, nil // trigraph workaround
- case '\'', '"', '\\':
- return string(r), s, nil
- case '0', '1', '2', '3', '4', '5', '6', '7':
- if len(s) < 2 {
- return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
- }
- ss := string(r) + s[:2]
- s = s[2:]
- i, err := strconv.ParseUint(ss, 8, 8)
- if err != nil {
- return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
- }
- return string([]byte{byte(i)}), s, nil
- case 'x', 'X', 'u', 'U':
- var n int
- switch r {
- case 'x', 'X':
- n = 2
- case 'u':
- n = 4
- case 'U':
- n = 8
- }
- if len(s) < n {
- return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
- }
- ss := s[:n]
- s = s[n:]
- i, err := strconv.ParseUint(ss, 16, 64)
- if err != nil {
- return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
- }
- if r == 'x' || r == 'X' {
- return string([]byte{byte(i)}), s, nil
- }
- if i > utf8.MaxRune {
- return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
- }
- return string(i), s, nil
- }
- return "", "", fmt.Errorf(`unknown escape \%c`, r)
-}
-
-// Back off the parser by one token. Can only be done between calls to next().
-// It makes the next advance() a no-op.
-func (p *textParser) back() { p.backed = true }
-
-// Advances the parser and returns the new current token.
-func (p *textParser) next() *token {
- if p.backed || p.done {
- p.backed = false
- return &p.cur
- }
- p.advance()
- if p.done {
- p.cur.value = ""
- } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
- // Look for multiple quoted strings separated by whitespace,
- // and concatenate them.
- cat := p.cur
- for {
- p.skipWhitespace()
- if p.done || !isQuote(p.s[0]) {
- break
- }
- p.advance()
- if p.cur.err != nil {
- return &p.cur
- }
- cat.value += " " + p.cur.value
- cat.unquoted += p.cur.unquoted
- }
- p.done = false // parser may have seen EOF, but we want to return cat
- p.cur = cat
- }
- return &p.cur
-}
-
-func (p *textParser) consumeToken(s string) error {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value != s {
- p.back()
- return p.errorf("expected %q, found %q", s, tok.value)
- }
- return nil
-}
-
-// Return a RequiredNotSetError indicating which required field was not set.
-func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
- st := sv.Type()
- sprops := GetProperties(st)
- for i := 0; i < st.NumField(); i++ {
- if !isNil(sv.Field(i)) {
- continue
- }
-
- props := sprops.Prop[i]
- if props.Required {
- return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
- }
- }
- return &RequiredNotSetError{fmt.Sprintf("%v.<unknown field name>", st)} // should not happen
-}
-
-// Returns the index in the struct for the named field, as well as the parsed tag properties.
-func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
- i, ok := sprops.decoderOrigNames[name]
- if ok {
- return i, sprops.Prop[i], true
- }
- return -1, nil, false
-}
-
-// Consume a ':' from the input stream (if the next token is a colon),
-// returning an error if a colon is needed but not present.
-func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value != ":" {
- // Colon is optional when the field is a group or message.
- needColon := true
- switch props.Wire {
- case "group":
- needColon = false
- case "bytes":
- // A "bytes" field is either a message, a string, or a repeated field;
- // those three become *T, *string and []T respectively, so we can check for
- // this field being a pointer to a non-string.
- if typ.Kind() == reflect.Ptr {
- // *T or *string
- if typ.Elem().Kind() == reflect.String {
- break
- }
- } else if typ.Kind() == reflect.Slice {
- // []T or []*T
- if typ.Elem().Kind() != reflect.Ptr {
- break
- }
- } else if typ.Kind() == reflect.String {
- // The proto3 exception is for a string field,
- // which requires a colon.
- break
- }
- needColon = false
- }
- if needColon {
- return p.errorf("expected ':', found %q", tok.value)
- }
- p.back()
- }
- return nil
-}
-
-func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
- st := sv.Type()
- sprops := GetProperties(st)
- reqCount := sprops.reqCount
- var reqFieldErr error
- fieldSet := make(map[string]bool)
- // A struct is a sequence of "name: value", terminated by one of
- // '>' or '}', or the end of the input. A name may also be
- // "[extension]" or "[type/url]".
- //
- // The whole struct can also be an expanded Any message, like:
- // [type/url] < ... struct contents ... >
- for {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value == terminator {
- break
- }
- if tok.value == "[" {
- // Looks like an extension or an Any.
- //
- // TODO: Check whether we need to handle
- // namespace rooted names (e.g. ".something.Foo").
- extName, err := p.consumeExtName()
- if err != nil {
- return err
- }
-
- if s := strings.LastIndex(extName, "/"); s >= 0 {
- // If it contains a slash, it's an Any type URL.
- messageName := extName[s+1:]
- mt := MessageType(messageName)
- if mt == nil {
- return p.errorf("unrecognized message %q in google.protobuf.Any", messageName)
- }
- tok = p.next()
- if tok.err != nil {
- return tok.err
- }
- // consume an optional colon
- if tok.value == ":" {
- tok = p.next()
- if tok.err != nil {
- return tok.err
- }
- }
- var terminator string
- switch tok.value {
- case "<":
- terminator = ">"
- case "{":
- terminator = "}"
- default:
- return p.errorf("expected '{' or '<', found %q", tok.value)
- }
- v := reflect.New(mt.Elem())
- if pe := p.readStruct(v.Elem(), terminator); pe != nil {
- return pe
- }
- b, err := Marshal(v.Interface().(Message))
- if err != nil {
- return p.errorf("failed to marshal message of type %q: %v", messageName, err)
- }
- if fieldSet["type_url"] {
- return p.errorf(anyRepeatedlyUnpacked, "type_url")
- }
- if fieldSet["value"] {
- return p.errorf(anyRepeatedlyUnpacked, "value")
- }
- sv.FieldByName("TypeUrl").SetString(extName)
- sv.FieldByName("Value").SetBytes(b)
- fieldSet["type_url"] = true
- fieldSet["value"] = true
- continue
- }
-
- var desc *ExtensionDesc
- // This could be faster, but it's functional.
- // TODO: Do something smarter than a linear scan.
- for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
- if d.Name == extName {
- desc = d
- break
- }
- }
- if desc == nil {
- return p.errorf("unrecognized extension %q", extName)
- }
-
- props := &Properties{}
- props.Parse(desc.Tag)
-
- typ := reflect.TypeOf(desc.ExtensionType)
- if err := p.checkForColon(props, typ); err != nil {
- return err
- }
-
- rep := desc.repeated()
-
- // Read the extension structure, and set it in
- // the value we're constructing.
- var ext reflect.Value
- if !rep {
- ext = reflect.New(typ).Elem()
- } else {
- ext = reflect.New(typ.Elem()).Elem()
- }
- if err := p.readAny(ext, props); err != nil {
- if _, ok := err.(*RequiredNotSetError); !ok {
- return err
- }
- reqFieldErr = err
- }
- ep := sv.Addr().Interface().(Message)
- if !rep {
- SetExtension(ep, desc, ext.Interface())
- } else {
- old, err := GetExtension(ep, desc)
- var sl reflect.Value
- if err == nil {
- sl = reflect.ValueOf(old) // existing slice
- } else {
- sl = reflect.MakeSlice(typ, 0, 1)
- }
- sl = reflect.Append(sl, ext)
- SetExtension(ep, desc, sl.Interface())
- }
- if err := p.consumeOptionalSeparator(); err != nil {
- return err
- }
- continue
- }
-
- // This is a normal, non-extension field.
- name := tok.value
- var dst reflect.Value
- fi, props, ok := structFieldByName(sprops, name)
- if ok {
- dst = sv.Field(fi)
- } else if oop, ok := sprops.OneofTypes[name]; ok {
- // It is a oneof.
- props = oop.Prop
- nv := reflect.New(oop.Type.Elem())
- dst = nv.Elem().Field(0)
- field := sv.Field(oop.Field)
- if !field.IsNil() {
- return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name)
- }
- field.Set(nv)
- }
- if !dst.IsValid() {
- return p.errorf("unknown field name %q in %v", name, st)
- }
-
- if dst.Kind() == reflect.Map {
- // Consume any colon.
- if err := p.checkForColon(props, dst.Type()); err != nil {
- return err
- }
-
- // Construct the map if it doesn't already exist.
- if dst.IsNil() {
- dst.Set(reflect.MakeMap(dst.Type()))
- }
- key := reflect.New(dst.Type().Key()).Elem()
- val := reflect.New(dst.Type().Elem()).Elem()
-
- // The map entry should be this sequence of tokens:
- // < key : KEY value : VALUE >
- // However, implementations may omit key or value, and technically
- // we should support them in any order. See b/28924776 for a time
- // this went wrong.
-
- tok := p.next()
- var terminator string
- switch tok.value {
- case "<":
- terminator = ">"
- case "{":
- terminator = "}"
- default:
- return p.errorf("expected '{' or '<', found %q", tok.value)
- }
- for {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value == terminator {
- break
- }
- switch tok.value {
- case "key":
- if err := p.consumeToken(":"); err != nil {
- return err
- }
- if err := p.readAny(key, props.MapKeyProp); err != nil {
- return err
- }
- if err := p.consumeOptionalSeparator(); err != nil {
- return err
- }
- case "value":
- if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil {
- return err
- }
- if err := p.readAny(val, props.MapValProp); err != nil {
- return err
- }
- if err := p.consumeOptionalSeparator(); err != nil {
- return err
- }
- default:
- p.back()
- return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
- }
- }
-
- dst.SetMapIndex(key, val)
- continue
- }
-
- // Check that it's not already set if it's not a repeated field.
- if !props.Repeated && fieldSet[name] {
- return p.errorf("non-repeated field %q was repeated", name)
- }
-
- if err := p.checkForColon(props, dst.Type()); err != nil {
- return err
- }
-
- // Parse into the field.
- fieldSet[name] = true
- if err := p.readAny(dst, props); err != nil {
- if _, ok := err.(*RequiredNotSetError); !ok {
- return err
- }
- reqFieldErr = err
- }
- if props.Required {
- reqCount--
- }
-
- if err := p.consumeOptionalSeparator(); err != nil {
- return err
- }
-
- }
-
- if reqCount > 0 {
- return p.missingRequiredFieldError(sv)
- }
- return reqFieldErr
-}
-
-// consumeExtName consumes extension name or expanded Any type URL and the
-// following ']'. It returns the name or URL consumed.
-func (p *textParser) consumeExtName() (string, error) {
- tok := p.next()
- if tok.err != nil {
- return "", tok.err
- }
-
- // If extension name or type url is quoted, it's a single token.
- if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
- name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
- if err != nil {
- return "", err
- }
- return name, p.consumeToken("]")
- }
-
- // Consume everything up to "]"
- var parts []string
- for tok.value != "]" {
- parts = append(parts, tok.value)
- tok = p.next()
- if tok.err != nil {
- return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
- }
- if p.done && tok.value != "]" {
- return "", p.errorf("unclosed type_url or extension name")
- }
- }
- return strings.Join(parts, ""), nil
-}
-
-// consumeOptionalSeparator consumes an optional semicolon or comma.
-// It is used in readStruct to provide backward compatibility.
-func (p *textParser) consumeOptionalSeparator() error {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value != ";" && tok.value != "," {
- p.back()
- }
- return nil
-}
-
-func (p *textParser) readAny(v reflect.Value, props *Properties) error {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value == "" {
- return p.errorf("unexpected EOF")
- }
-
- switch fv := v; fv.Kind() {
- case reflect.Slice:
- at := v.Type()
- if at.Elem().Kind() == reflect.Uint8 {
- // Special case for []byte
- if tok.value[0] != '"' && tok.value[0] != '\'' {
- // Deliberately written out here, as the error after
- // this switch statement would write "invalid []byte: ...",
- // which is not as user-friendly.
- return p.errorf("invalid string: %v", tok.value)
- }
- bytes := []byte(tok.unquoted)
- fv.Set(reflect.ValueOf(bytes))
- return nil
- }
- // Repeated field.
- if tok.value == "[" {
- // Repeated field with list notation, like [1,2,3].
- for {
- fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
- err := p.readAny(fv.Index(fv.Len()-1), props)
- if err != nil {
- return err
- }
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value == "]" {
- break
- }
- if tok.value != "," {
- return p.errorf("Expected ']' or ',' found %q", tok.value)
- }
- }
- return nil
- }
- // One value of the repeated field.
- p.back()
- fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
- return p.readAny(fv.Index(fv.Len()-1), props)
- case reflect.Bool:
- // true/1/t/True or false/f/0/False.
- switch tok.value {
- case "true", "1", "t", "True":
- fv.SetBool(true)
- return nil
- case "false", "0", "f", "False":
- fv.SetBool(false)
- return nil
- }
- case reflect.Float32, reflect.Float64:
- v := tok.value
- // Ignore 'f' for compatibility with output generated by C++, but don't
- // remove 'f' when the value is "-inf" or "inf".
- if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
- v = v[:len(v)-1]
- }
- if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
- fv.SetFloat(f)
- return nil
- }
- case reflect.Int32:
- if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
- fv.SetInt(x)
- return nil
- }
-
- if len(props.Enum) == 0 {
- break
- }
- m, ok := enumValueMaps[props.Enum]
- if !ok {
- break
- }
- x, ok := m[tok.value]
- if !ok {
- break
- }
- fv.SetInt(int64(x))
- return nil
- case reflect.Int64:
- if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
- fv.SetInt(x)
- return nil
- }
-
- case reflect.Ptr:
- // A basic field (indirected through pointer), or a repeated message/group
- p.back()
- fv.Set(reflect.New(fv.Type().Elem()))
- return p.readAny(fv.Elem(), props)
- case reflect.String:
- if tok.value[0] == '"' || tok.value[0] == '\'' {
- fv.SetString(tok.unquoted)
- return nil
- }
- case reflect.Struct:
- var terminator string
- switch tok.value {
- case "{":
- terminator = "}"
- case "<":
- terminator = ">"
- default:
- return p.errorf("expected '{' or '<', found %q", tok.value)
- }
- // TODO: Handle nested messages which implement encoding.TextUnmarshaler.
- return p.readStruct(fv, terminator)
- case reflect.Uint32:
- if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
- fv.SetUint(uint64(x))
- return nil
- }
- case reflect.Uint64:
- if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
- fv.SetUint(x)
- return nil
- }
- }
- return p.errorf("invalid %v: %v", v.Type(), tok.value)
-}
-
-// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
-// before starting to unmarshal, so any existing data in pb is always removed.
-// If a required field is not set and no other error occurs,
-// UnmarshalText returns *RequiredNotSetError.
-func UnmarshalText(s string, pb Message) error {
- if um, ok := pb.(encoding.TextUnmarshaler); ok {
- return um.UnmarshalText([]byte(s))
- }
- pb.Reset()
- v := reflect.ValueOf(pb)
- return newTextParser(s).readStruct(v.Elem(), "")
-}
diff --git a/vendor/github.com/golang/protobuf/proto/wire.go b/vendor/github.com/golang/protobuf/proto/wire.go
new file mode 100644
index 000000000..d7c28da5a
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/wire.go
@@ -0,0 +1,78 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ protoV2 "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/runtime/protoiface"
+)
+
+// Size returns the size in bytes of the wire-format encoding of m.
+func Size(m Message) int {
+ if m == nil {
+ return 0
+ }
+ mi := MessageV2(m)
+ return protoV2.Size(mi)
+}
+
+// Marshal returns the wire-format encoding of m.
+func Marshal(m Message) ([]byte, error) {
+ b, err := marshalAppend(nil, m, false)
+ if b == nil {
+ b = zeroBytes
+ }
+ return b, err
+}
+
+var zeroBytes = make([]byte, 0, 0)
+
+func marshalAppend(buf []byte, m Message, deterministic bool) ([]byte, error) {
+ if m == nil {
+ return nil, ErrNil
+ }
+ mi := MessageV2(m)
+ nbuf, err := protoV2.MarshalOptions{
+ Deterministic: deterministic,
+ AllowPartial: true,
+ }.MarshalAppend(buf, mi)
+ if err != nil {
+ return buf, err
+ }
+ if len(buf) == len(nbuf) {
+ if !mi.ProtoReflect().IsValid() {
+ return buf, ErrNil
+ }
+ }
+ return nbuf, checkRequiredNotSet(mi)
+}
+
+// Unmarshal parses a wire-format message in b and places the decoded results in m.
+//
+// Unmarshal resets m before starting to unmarshal, so any existing data in m is always
+// removed. Use UnmarshalMerge to preserve and append to existing data.
+func Unmarshal(b []byte, m Message) error {
+ m.Reset()
+ return UnmarshalMerge(b, m)
+}
+
+// UnmarshalMerge parses a wire-format message in b and places the decoded results in m.
+func UnmarshalMerge(b []byte, m Message) error {
+ mi := MessageV2(m)
+ out, err := protoV2.UnmarshalOptions{
+ AllowPartial: true,
+ Merge: true,
+ }.UnmarshalState(protoiface.UnmarshalInput{
+ Buf: b,
+ Message: mi.ProtoReflect(),
+ })
+ if err != nil {
+ return err
+ }
+ if out.Flags&protoiface.UnmarshalInitialized > 0 {
+ return nil
+ }
+ return checkRequiredNotSet(mi)
+}
diff --git a/vendor/github.com/golang/protobuf/proto/wrappers.go b/vendor/github.com/golang/protobuf/proto/wrappers.go
new file mode 100644
index 000000000..398e34859
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/wrappers.go
@@ -0,0 +1,34 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+// Bool stores v in a new bool value and returns a pointer to it.
+func Bool(v bool) *bool { return &v }
+
+// Int stores v in a new int32 value and returns a pointer to it.
+//
+// Deprecated: Use Int32 instead.
+func Int(v int) *int32 { return Int32(int32(v)) }
+
+// Int32 stores v in a new int32 value and returns a pointer to it.
+func Int32(v int32) *int32 { return &v }
+
+// Int64 stores v in a new int64 value and returns a pointer to it.
+func Int64(v int64) *int64 { return &v }
+
+// Uint32 stores v in a new uint32 value and returns a pointer to it.
+func Uint32(v uint32) *uint32 { return &v }
+
+// Uint64 stores v in a new uint64 value and returns a pointer to it.
+func Uint64(v uint64) *uint64 { return &v }
+
+// Float32 stores v in a new float32 value and returns a pointer to it.
+func Float32(v float32) *float32 { return &v }
+
+// Float64 stores v in a new float64 value and returns a pointer to it.
+func Float64(v float64) *float64 { return &v }
+
+// String stores v in a new string value and returns a pointer to it.
+func String(v string) *string { return &v }
diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go
index 70276e8f5..e729dcff1 100644
--- a/vendor/github.com/golang/protobuf/ptypes/any.go
+++ b/vendor/github.com/golang/protobuf/ptypes/any.go
@@ -1,141 +1,165 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2016 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
package ptypes
-// This file implements functions to marshal proto.Message to/from
-// google.protobuf.Any message.
-
import (
"fmt"
- "reflect"
"strings"
"github.com/golang/protobuf/proto"
- "github.com/golang/protobuf/ptypes/any"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+
+ anypb "github.com/golang/protobuf/ptypes/any"
)
-const googleApis = "type.googleapis.com/"
+const urlPrefix = "type.googleapis.com/"
-// AnyMessageName returns the name of the message contained in a google.protobuf.Any message.
-//
-// Note that regular type assertions should be done using the Is
-// function. AnyMessageName is provided for less common use cases like filtering a
-// sequence of Any messages based on a set of allowed message type names.
-func AnyMessageName(any *any.Any) (string, error) {
+// AnyMessageName returns the message name contained in an anypb.Any message.
+// Most type assertions should use the Is function instead.
+func AnyMessageName(any *anypb.Any) (string, error) {
+ name, err := anyMessageName(any)
+ return string(name), err
+}
+func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) {
if any == nil {
return "", fmt.Errorf("message is nil")
}
- slash := strings.LastIndex(any.TypeUrl, "/")
- if slash < 0 {
+ name := protoreflect.FullName(any.TypeUrl)
+ if i := strings.LastIndex(any.TypeUrl, "/"); i >= 0 {
+ name = name[i+len("/"):]
+ }
+ if !name.IsValid() {
return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
}
- return any.TypeUrl[slash+1:], nil
+ return name, nil
}
-// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any.
-func MarshalAny(pb proto.Message) (*any.Any, error) {
- value, err := proto.Marshal(pb)
+// MarshalAny marshals the given message m into an anypb.Any message.
+func MarshalAny(m proto.Message) (*anypb.Any, error) {
+ switch dm := m.(type) {
+ case DynamicAny:
+ m = dm.Message
+ case *DynamicAny:
+ if dm == nil {
+ return nil, proto.ErrNil
+ }
+ m = dm.Message
+ }
+ b, err := proto.Marshal(m)
if err != nil {
return nil, err
}
- return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil
-}
-
-// DynamicAny is a value that can be passed to UnmarshalAny to automatically
-// allocate a proto.Message for the type specified in a google.protobuf.Any
-// message. The allocated message is stored in the embedded proto.Message.
-//
-// Example:
-//
-// var x ptypes.DynamicAny
-// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
-// fmt.Printf("unmarshaled message: %v", x.Message)
-type DynamicAny struct {
- proto.Message
+ return &anypb.Any{TypeUrl: urlPrefix + proto.MessageName(m), Value: b}, nil
}
-// Empty returns a new proto.Message of the type specified in a
-// google.protobuf.Any message. It returns an error if corresponding message
-// type isn't linked in.
-func Empty(any *any.Any) (proto.Message, error) {
- aname, err := AnyMessageName(any)
+// Empty returns a new message of the type specified in an anypb.Any message.
+// It returns protoregistry.NotFound if the corresponding message type could not
+// be resolved in the global registry.
+func Empty(any *anypb.Any) (proto.Message, error) {
+ name, err := anyMessageName(any)
if err != nil {
return nil, err
}
-
- t := proto.MessageType(aname)
- if t == nil {
- return nil, fmt.Errorf("any: message type %q isn't linked in", aname)
+ mt, err := protoregistry.GlobalTypes.FindMessageByName(name)
+ if err != nil {
+ return nil, err
}
- return reflect.New(t.Elem()).Interface().(proto.Message), nil
+ return proto.MessageV1(mt.New().Interface()), nil
}
-// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any
-// message and places the decoded result in pb. It returns an error if type of
-// contents of Any message does not match type of pb message.
+// UnmarshalAny unmarshals the encoded value contained in the anypb.Any message
+// into the provided message m. It returns an error if the target message
+// does not match the type in the Any message or if an unmarshal error occurs.
//
-// pb can be a proto.Message, or a *DynamicAny.
-func UnmarshalAny(any *any.Any, pb proto.Message) error {
- if d, ok := pb.(*DynamicAny); ok {
- if d.Message == nil {
+// The target message m may be a *DynamicAny message. If the underlying message
+// type could not be resolved, then this returns protoregistry.NotFound.
+func UnmarshalAny(any *anypb.Any, m proto.Message) error {
+ if dm, ok := m.(*DynamicAny); ok {
+ if dm.Message == nil {
var err error
- d.Message, err = Empty(any)
+ dm.Message, err = Empty(any)
if err != nil {
return err
}
}
- return UnmarshalAny(any, d.Message)
+ m = dm.Message
}
- aname, err := AnyMessageName(any)
+ anyName, err := AnyMessageName(any)
if err != nil {
return err
}
-
- mname := proto.MessageName(pb)
- if aname != mname {
- return fmt.Errorf("mismatched message type: got %q want %q", aname, mname)
+ msgName := proto.MessageName(m)
+ if anyName != msgName {
+ return fmt.Errorf("mismatched message type: got %q want %q", anyName, msgName)
}
- return proto.Unmarshal(any.Value, pb)
+ return proto.Unmarshal(any.Value, m)
}
-// Is returns true if any value contains a given message type.
-func Is(any *any.Any, pb proto.Message) bool {
- // The following is equivalent to AnyMessageName(any) == proto.MessageName(pb),
- // but it avoids scanning TypeUrl for the slash.
- if any == nil {
+// Is reports whether the Any message contains a message of the specified type.
+func Is(any *anypb.Any, m proto.Message) bool {
+ if any == nil || m == nil {
return false
}
- name := proto.MessageName(pb)
- prefix := len(any.TypeUrl) - len(name)
- return prefix >= 1 && any.TypeUrl[prefix-1] == '/' && any.TypeUrl[prefix:] == name
+ name := proto.MessageName(m)
+ if !strings.HasSuffix(any.TypeUrl, name) {
+ return false
+ }
+ return len(any.TypeUrl) == len(name) || any.TypeUrl[len(any.TypeUrl)-len(name)-1] == '/'
+}
+
+// DynamicAny is a value that can be passed to UnmarshalAny to automatically
+// allocate a proto.Message for the type specified in an anypb.Any message.
+// The allocated message is stored in the embedded proto.Message.
+//
+// Example:
+// var x ptypes.DynamicAny
+// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
+// fmt.Printf("unmarshaled message: %v", x.Message)
+type DynamicAny struct{ proto.Message }
+
+func (m DynamicAny) String() string {
+ if m.Message == nil {
+ return "<nil>"
+ }
+ return m.Message.String()
+}
+func (m DynamicAny) Reset() {
+ if m.Message == nil {
+ return
+ }
+ m.Message.Reset()
+}
+func (m DynamicAny) ProtoMessage() {
+ return
+}
+func (m DynamicAny) ProtoReflect() protoreflect.Message {
+ if m.Message == nil {
+ return nil
+ }
+ return dynamicAny{proto.MessageReflect(m.Message)}
+}
+
+type dynamicAny struct{ protoreflect.Message }
+
+func (m dynamicAny) Type() protoreflect.MessageType {
+ return dynamicAnyType{m.Message.Type()}
+}
+func (m dynamicAny) New() protoreflect.Message {
+ return dynamicAnyType{m.Message.Type()}.New()
+}
+func (m dynamicAny) Interface() protoreflect.ProtoMessage {
+ return DynamicAny{proto.MessageV1(m.Message.Interface())}
+}
+
+type dynamicAnyType struct{ protoreflect.MessageType }
+
+func (t dynamicAnyType) New() protoreflect.Message {
+ return dynamicAny{t.MessageType.New()}
+}
+func (t dynamicAnyType) Zero() protoreflect.Message {
+ return dynamicAny{t.MessageType.Zero()}
}
diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
index 78ee52334..0ef27d33d 100644
--- a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
+++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
@@ -1,200 +1,62 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google/protobuf/any.proto
+// source: github.com/golang/protobuf/ptypes/any/any.proto
package any
import (
- fmt "fmt"
- proto "github.com/golang/protobuf/proto"
- math "math"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ reflect "reflect"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+// Symbols defined in public import of google/protobuf/any.proto.
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+type Any = anypb.Any
-// `Any` contains an arbitrary serialized protocol buffer message along with a
-// URL that describes the type of the serialized message.
-//
-// Protobuf library provides support to pack/unpack Any values in the form
-// of utility functions or additional generated methods of the Any type.
-//
-// Example 1: Pack and unpack a message in C++.
-//
-// Foo foo = ...;
-// Any any;
-// any.PackFrom(foo);
-// ...
-// if (any.UnpackTo(&foo)) {
-// ...
-// }
-//
-// Example 2: Pack and unpack a message in Java.
-//
-// Foo foo = ...;
-// Any any = Any.pack(foo);
-// ...
-// if (any.is(Foo.class)) {
-// foo = any.unpack(Foo.class);
-// }
-//
-// Example 3: Pack and unpack a message in Python.
-//
-// foo = Foo(...)
-// any = Any()
-// any.Pack(foo)
-// ...
-// if any.Is(Foo.DESCRIPTOR):
-// any.Unpack(foo)
-// ...
-//
-// Example 4: Pack and unpack a message in Go
-//
-// foo := &pb.Foo{...}
-// any, err := ptypes.MarshalAny(foo)
-// ...
-// foo := &pb.Foo{}
-// if err := ptypes.UnmarshalAny(any, foo); err != nil {
-// ...
-// }
-//
-// The pack methods provided by protobuf library will by default use
-// 'type.googleapis.com/full.type.name' as the type URL and the unpack
-// methods only use the fully qualified type name after the last '/'
-// in the type URL, for example "foo.bar.com/x/y.z" will yield type
-// name "y.z".
-//
-//
-// JSON
-// ====
-// The JSON representation of an `Any` value uses the regular
-// representation of the deserialized, embedded message, with an
-// additional field `@type` which contains the type URL. Example:
-//
-// package google.profile;
-// message Person {
-// string first_name = 1;
-// string last_name = 2;
-// }
-//
-// {
-// "@type": "type.googleapis.com/google.profile.Person",
-// "firstName": <string>,
-// "lastName": <string>
-// }
-//
-// If the embedded message type is well-known and has a custom JSON
-// representation, that representation will be embedded adding a field
-// `value` which holds the custom JSON in addition to the `@type`
-// field. Example (for message [google.protobuf.Duration][]):
-//
-// {
-// "@type": "type.googleapis.com/google.protobuf.Duration",
-// "value": "1.212s"
-// }
-//
-type Any struct {
- // A URL/resource name that uniquely identifies the type of the serialized
- // protocol buffer message. The last segment of the URL's path must represent
- // the fully qualified name of the type (as in
- // `path/google.protobuf.Duration`). The name should be in a canonical form
- // (e.g., leading "." is not accepted).
- //
- // In practice, teams usually precompile into the binary all types that they
- // expect it to use in the context of Any. However, for URLs which use the
- // scheme `http`, `https`, or no scheme, one can optionally set up a type
- // server that maps type URLs to message definitions as follows:
- //
- // * If no scheme is provided, `https` is assumed.
- // * An HTTP GET on the URL must yield a [google.protobuf.Type][]
- // value in binary format, or produce an error.
- // * Applications are allowed to cache lookup results based on the
- // URL, or have them precompiled into a binary to avoid any
- // lookup. Therefore, binary compatibility needs to be preserved
- // on changes to types. (Use versioned type names to manage
- // breaking changes.)
- //
- // Note: this functionality is not currently available in the official
- // protobuf release, and it is not used for type URLs beginning with
- // type.googleapis.com.
- //
- // Schemes other than `http`, `https` (or the empty scheme) might be
- // used with implementation specific semantics.
- //
- TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"`
- // Must be a valid serialized protocol buffer of the above specified type.
- Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
+var File_github_com_golang_protobuf_ptypes_any_any_proto protoreflect.FileDescriptor
-func (m *Any) Reset() { *m = Any{} }
-func (m *Any) String() string { return proto.CompactTextString(m) }
-func (*Any) ProtoMessage() {}
-func (*Any) Descriptor() ([]byte, []int) {
- return fileDescriptor_b53526c13ae22eb4, []int{0}
+var file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = []byte{
+ 0x0a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
+ 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
+ 0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x2b, 0x5a, 0x29,
+ 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e,
+ 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65,
+ 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x3b, 0x61, 0x6e, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x33,
}
-func (*Any) XXX_WellKnownType() string { return "Any" }
-
-func (m *Any) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Any.Unmarshal(m, b)
-}
-func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Any.Marshal(b, m, deterministic)
-}
-func (m *Any) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Any.Merge(m, src)
-}
-func (m *Any) XXX_Size() int {
- return xxx_messageInfo_Any.Size(m)
-}
-func (m *Any) XXX_DiscardUnknown() {
- xxx_messageInfo_Any.DiscardUnknown(m)
+var file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = []interface{}{}
+var file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
}
-var xxx_messageInfo_Any proto.InternalMessageInfo
-
-func (m *Any) GetTypeUrl() string {
- if m != nil {
- return m.TypeUrl
+func init() { file_github_com_golang_protobuf_ptypes_any_any_proto_init() }
+func file_github_com_golang_protobuf_ptypes_any_any_proto_init() {
+ if File_github_com_golang_protobuf_ptypes_any_any_proto != nil {
+ return
}
- return ""
-}
-
-func (m *Any) GetValue() []byte {
- if m != nil {
- return m.Value
- }
- return nil
-}
-
-func init() {
- proto.RegisterType((*Any)(nil), "google.protobuf.Any")
-}
-
-func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4) }
-
-var fileDescriptor_b53526c13ae22eb4 = []byte{
- // 185 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f,
- 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4,
- 0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a,
- 0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46,
- 0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7,
- 0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xca, 0xe7, 0x12, 0x4e, 0xce,
- 0xcf, 0xd5, 0x43, 0x33, 0xce, 0x89, 0xc3, 0x31, 0xaf, 0x32, 0x00, 0xc4, 0x09, 0x60, 0x8c, 0x52,
- 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc,
- 0x4b, 0x47, 0xb8, 0xa8, 0x00, 0x64, 0x7a, 0x31, 0xc8, 0x61, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c,
- 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x8c, 0x0a, 0x80, 0x2a, 0xd1, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce,
- 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0x29, 0x4d, 0x62, 0x03, 0xeb, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff,
- 0xff, 0x13, 0xf8, 0xe8, 0x42, 0xdd, 0x00, 0x00, 0x00,
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 0,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes,
+ DependencyIndexes: file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs,
+ }.Build()
+ File_github_com_golang_protobuf_ptypes_any_any_proto = out.File
+ file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = nil
+ file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = nil
+ file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = nil
}
diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.proto b/vendor/github.com/golang/protobuf/ptypes/any/any.proto
deleted file mode 100644
index 493294255..000000000
--- a/vendor/github.com/golang/protobuf/ptypes/any/any.proto
+++ /dev/null
@@ -1,154 +0,0 @@
-// Protocol Buffers - Google's data interchange format
-// Copyright 2008 Google Inc. All rights reserved.
-// https://developers.google.com/protocol-buffers/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-syntax = "proto3";
-
-package google.protobuf;
-
-option csharp_namespace = "Google.Protobuf.WellKnownTypes";
-option go_package = "github.com/golang/protobuf/ptypes/any";
-option java_package = "com.google.protobuf";
-option java_outer_classname = "AnyProto";
-option java_multiple_files = true;
-option objc_class_prefix = "GPB";
-
-// `Any` contains an arbitrary serialized protocol buffer message along with a
-// URL that describes the type of the serialized message.
-//
-// Protobuf library provides support to pack/unpack Any values in the form
-// of utility functions or additional generated methods of the Any type.
-//
-// Example 1: Pack and unpack a message in C++.
-//
-// Foo foo = ...;
-// Any any;
-// any.PackFrom(foo);
-// ...
-// if (any.UnpackTo(&foo)) {
-// ...
-// }
-//
-// Example 2: Pack and unpack a message in Java.
-//
-// Foo foo = ...;
-// Any any = Any.pack(foo);
-// ...
-// if (any.is(Foo.class)) {
-// foo = any.unpack(Foo.class);
-// }
-//
-// Example 3: Pack and unpack a message in Python.
-//
-// foo = Foo(...)
-// any = Any()
-// any.Pack(foo)
-// ...
-// if any.Is(Foo.DESCRIPTOR):
-// any.Unpack(foo)
-// ...
-//
-// Example 4: Pack and unpack a message in Go
-//
-// foo := &pb.Foo{...}
-// any, err := ptypes.MarshalAny(foo)
-// ...
-// foo := &pb.Foo{}
-// if err := ptypes.UnmarshalAny(any, foo); err != nil {
-// ...
-// }
-//
-// The pack methods provided by protobuf library will by default use
-// 'type.googleapis.com/full.type.name' as the type URL and the unpack
-// methods only use the fully qualified type name after the last '/'
-// in the type URL, for example "foo.bar.com/x/y.z" will yield type
-// name "y.z".
-//
-//
-// JSON
-// ====
-// The JSON representation of an `Any` value uses the regular
-// representation of the deserialized, embedded message, with an
-// additional field `@type` which contains the type URL. Example:
-//
-// package google.profile;
-// message Person {
-// string first_name = 1;
-// string last_name = 2;
-// }
-//
-// {
-// "@type": "type.googleapis.com/google.profile.Person",
-// "firstName": <string>,
-// "lastName": <string>
-// }
-//
-// If the embedded message type is well-known and has a custom JSON
-// representation, that representation will be embedded adding a field
-// `value` which holds the custom JSON in addition to the `@type`
-// field. Example (for message [google.protobuf.Duration][]):
-//
-// {
-// "@type": "type.googleapis.com/google.protobuf.Duration",
-// "value": "1.212s"
-// }
-//
-message Any {
- // A URL/resource name that uniquely identifies the type of the serialized
- // protocol buffer message. The last segment of the URL's path must represent
- // the fully qualified name of the type (as in
- // `path/google.protobuf.Duration`). The name should be in a canonical form
- // (e.g., leading "." is not accepted).
- //
- // In practice, teams usually precompile into the binary all types that they
- // expect it to use in the context of Any. However, for URLs which use the
- // scheme `http`, `https`, or no scheme, one can optionally set up a type
- // server that maps type URLs to message definitions as follows:
- //
- // * If no scheme is provided, `https` is assumed.
- // * An HTTP GET on the URL must yield a [google.protobuf.Type][]
- // value in binary format, or produce an error.
- // * Applications are allowed to cache lookup results based on the
- // URL, or have them precompiled into a binary to avoid any
- // lookup. Therefore, binary compatibility needs to be preserved
- // on changes to types. (Use versioned type names to manage
- // breaking changes.)
- //
- // Note: this functionality is not currently available in the official
- // protobuf release, and it is not used for type URLs beginning with
- // type.googleapis.com.
- //
- // Schemes other than `http`, `https` (or the empty scheme) might be
- // used with implementation specific semantics.
- //
- string type_url = 1;
-
- // Must be a valid serialized protocol buffer of the above specified type.
- bytes value = 2;
-}
diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go
index c0d595da7..fb9edd5c6 100644
--- a/vendor/github.com/golang/protobuf/ptypes/doc.go
+++ b/vendor/github.com/golang/protobuf/ptypes/doc.go
@@ -1,35 +1,6 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2016 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
-/*
-Package ptypes contains code for interacting with well-known types.
-*/
+// Package ptypes provides functionality for interacting with well-known types.
package ptypes
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go
index 26d1ca2fb..6110ae8a4 100644
--- a/vendor/github.com/golang/protobuf/ptypes/duration.go
+++ b/vendor/github.com/golang/protobuf/ptypes/duration.go
@@ -1,102 +1,72 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2016 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
package ptypes
-// This file implements conversions between google.protobuf.Duration
-// and time.Duration.
-
import (
"errors"
"fmt"
"time"
- durpb "github.com/golang/protobuf/ptypes/duration"
+ durationpb "github.com/golang/protobuf/ptypes/duration"
)
+// Range of google.protobuf.Duration as specified in duration.proto.
+// This is about 10,000 years in seconds.
const (
- // Range of a durpb.Duration in seconds, as specified in
- // google/protobuf/duration.proto. This is about 10,000 years in seconds.
maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
minSeconds = -maxSeconds
)
-// validateDuration determines whether the durpb.Duration is valid according to the
-// definition in google/protobuf/duration.proto. A valid durpb.Duration
-// may still be too large to fit into a time.Duration (the range of durpb.Duration
-// is about 10,000 years, and the range of time.Duration is about 290).
-func validateDuration(d *durpb.Duration) error {
- if d == nil {
- return errors.New("duration: nil Duration")
- }
- if d.Seconds < minSeconds || d.Seconds > maxSeconds {
- return fmt.Errorf("duration: %v: seconds out of range", d)
- }
- if d.Nanos <= -1e9 || d.Nanos >= 1e9 {
- return fmt.Errorf("duration: %v: nanos out of range", d)
- }
- // Seconds and Nanos must have the same sign, unless d.Nanos is zero.
- if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) {
- return fmt.Errorf("duration: %v: seconds and nanos have different signs", d)
- }
- return nil
-}
-
-// Duration converts a durpb.Duration to a time.Duration. Duration
-// returns an error if the durpb.Duration is invalid or is too large to be
-// represented in a time.Duration.
-func Duration(p *durpb.Duration) (time.Duration, error) {
- if err := validateDuration(p); err != nil {
+// Duration converts a durationpb.Duration to a time.Duration.
+// Duration returns an error if dur is invalid or overflows a time.Duration.
+func Duration(dur *durationpb.Duration) (time.Duration, error) {
+ if err := validateDuration(dur); err != nil {
return 0, err
}
- d := time.Duration(p.Seconds) * time.Second
- if int64(d/time.Second) != p.Seconds {
- return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
+ d := time.Duration(dur.Seconds) * time.Second
+ if int64(d/time.Second) != dur.Seconds {
+ return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur)
}
- if p.Nanos != 0 {
- d += time.Duration(p.Nanos) * time.Nanosecond
- if (d < 0) != (p.Nanos < 0) {
- return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
+ if dur.Nanos != 0 {
+ d += time.Duration(dur.Nanos) * time.Nanosecond
+ if (d < 0) != (dur.Nanos < 0) {
+ return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur)
}
}
return d, nil
}
-// DurationProto converts a time.Duration to a durpb.Duration.
-func DurationProto(d time.Duration) *durpb.Duration {
+// DurationProto converts a time.Duration to a durationpb.Duration.
+func DurationProto(d time.Duration) *durationpb.Duration {
nanos := d.Nanoseconds()
secs := nanos / 1e9
nanos -= secs * 1e9
- return &durpb.Duration{
- Seconds: secs,
+ return &durationpb.Duration{
+ Seconds: int64(secs),
Nanos: int32(nanos),
}
}
+
+// validateDuration determines whether the durationpb.Duration is valid
+// according to the definition in google/protobuf/duration.proto.
+// A valid durpb.Duration may still be too large to fit into a time.Duration
+// Note that the range of durationpb.Duration is about 10,000 years,
+// while the range of time.Duration is about 290 years.
+func validateDuration(dur *durationpb.Duration) error {
+ if dur == nil {
+ return errors.New("duration: nil Duration")
+ }
+ if dur.Seconds < minSeconds || dur.Seconds > maxSeconds {
+ return fmt.Errorf("duration: %v: seconds out of range", dur)
+ }
+ if dur.Nanos <= -1e9 || dur.Nanos >= 1e9 {
+ return fmt.Errorf("duration: %v: nanos out of range", dur)
+ }
+ // Seconds and Nanos must have the same sign, unless d.Nanos is zero.
+ if (dur.Seconds < 0 && dur.Nanos > 0) || (dur.Seconds > 0 && dur.Nanos < 0) {
+ return fmt.Errorf("duration: %v: seconds and nanos have different signs", dur)
+ }
+ return nil
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
index 0d681ee21..d0079ee3e 100644
--- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
+++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
@@ -1,161 +1,63 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google/protobuf/duration.proto
+// source: github.com/golang/protobuf/ptypes/duration/duration.proto
package duration
import (
- fmt "fmt"
- proto "github.com/golang/protobuf/proto"
- math "math"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ durationpb "google.golang.org/protobuf/types/known/durationpb"
+ reflect "reflect"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+// Symbols defined in public import of google/protobuf/duration.proto.
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+type Duration = durationpb.Duration
-// A Duration represents a signed, fixed-length span of time represented
-// as a count of seconds and fractions of seconds at nanosecond
-// resolution. It is independent of any calendar and concepts like "day"
-// or "month". It is related to Timestamp in that the difference between
-// two Timestamp values is a Duration and it can be added or subtracted
-// from a Timestamp. Range is approximately +-10,000 years.
-//
-// # Examples
-//
-// Example 1: Compute Duration from two Timestamps in pseudo code.
-//
-// Timestamp start = ...;
-// Timestamp end = ...;
-// Duration duration = ...;
-//
-// duration.seconds = end.seconds - start.seconds;
-// duration.nanos = end.nanos - start.nanos;
-//
-// if (duration.seconds < 0 && duration.nanos > 0) {
-// duration.seconds += 1;
-// duration.nanos -= 1000000000;
-// } else if (durations.seconds > 0 && duration.nanos < 0) {
-// duration.seconds -= 1;
-// duration.nanos += 1000000000;
-// }
-//
-// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
-//
-// Timestamp start = ...;
-// Duration duration = ...;
-// Timestamp end = ...;
-//
-// end.seconds = start.seconds + duration.seconds;
-// end.nanos = start.nanos + duration.nanos;
-//
-// if (end.nanos < 0) {
-// end.seconds -= 1;
-// end.nanos += 1000000000;
-// } else if (end.nanos >= 1000000000) {
-// end.seconds += 1;
-// end.nanos -= 1000000000;
-// }
-//
-// Example 3: Compute Duration from datetime.timedelta in Python.
-//
-// td = datetime.timedelta(days=3, minutes=10)
-// duration = Duration()
-// duration.FromTimedelta(td)
-//
-// # JSON Mapping
-//
-// In JSON format, the Duration type is encoded as a string rather than an
-// object, where the string ends in the suffix "s" (indicating seconds) and
-// is preceded by the number of seconds, with nanoseconds expressed as
-// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
-// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
-// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
-// microsecond should be expressed in JSON format as "3.000001s".
-//
-//
-type Duration struct {
- // Signed seconds of the span of time. Must be from -315,576,000,000
- // to +315,576,000,000 inclusive. Note: these bounds are computed from:
- // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
- Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
- // Signed fractions of a second at nanosecond resolution of the span
- // of time. Durations less than one second are represented with a 0
- // `seconds` field and a positive or negative `nanos` field. For durations
- // of one second or more, a non-zero value for the `nanos` field must be
- // of the same sign as the `seconds` field. Must be from -999,999,999
- // to +999,999,999 inclusive.
- Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
+var File_github_com_golang_protobuf_ptypes_duration_duration_proto protoreflect.FileDescriptor
-func (m *Duration) Reset() { *m = Duration{} }
-func (m *Duration) String() string { return proto.CompactTextString(m) }
-func (*Duration) ProtoMessage() {}
-func (*Duration) Descriptor() ([]byte, []int) {
- return fileDescriptor_23597b2ebd7ac6c5, []int{0}
+var file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = []byte{
+ 0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
+ 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
+ 0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x64, 0x75, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67,
+ 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
+ 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73,
+ 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
-func (*Duration) XXX_WellKnownType() string { return "Duration" }
-
-func (m *Duration) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Duration.Unmarshal(m, b)
-}
-func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Duration.Marshal(b, m, deterministic)
-}
-func (m *Duration) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Duration.Merge(m, src)
-}
-func (m *Duration) XXX_Size() int {
- return xxx_messageInfo_Duration.Size(m)
-}
-func (m *Duration) XXX_DiscardUnknown() {
- xxx_messageInfo_Duration.DiscardUnknown(m)
+var file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = []interface{}{}
+var file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
}
-var xxx_messageInfo_Duration proto.InternalMessageInfo
-
-func (m *Duration) GetSeconds() int64 {
- if m != nil {
- return m.Seconds
+func init() { file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() }
+func file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() {
+ if File_github_com_golang_protobuf_ptypes_duration_duration_proto != nil {
+ return
}
- return 0
-}
-
-func (m *Duration) GetNanos() int32 {
- if m != nil {
- return m.Nanos
- }
- return 0
-}
-
-func init() {
- proto.RegisterType((*Duration)(nil), "google.protobuf.Duration")
-}
-
-func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5) }
-
-var fileDescriptor_23597b2ebd7ac6c5 = []byte{
- // 190 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f,
- 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a,
- 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56,
- 0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5,
- 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e,
- 0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0xc3, 0x25, 0x9c, 0x9c,
- 0x9f, 0xab, 0x87, 0x66, 0xa4, 0x13, 0x2f, 0xcc, 0xc0, 0x00, 0x90, 0x48, 0x00, 0x63, 0x94, 0x56,
- 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e,
- 0x3a, 0xc2, 0x7d, 0x05, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x70, 0x67, 0xfe, 0x60, 0x64, 0x5c, 0xc4,
- 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, 0x00, 0x54, 0xa9, 0x5e, 0x78,
- 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, 0x12, 0x1b, 0xd8, 0x0c, 0x63,
- 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x84, 0x30, 0xff, 0xf3, 0x00, 0x00, 0x00,
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 0,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes,
+ DependencyIndexes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs,
+ }.Build()
+ File_github_com_golang_protobuf_ptypes_duration_duration_proto = out.File
+ file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = nil
+ file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = nil
+ file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = nil
}
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto
deleted file mode 100644
index 975fce41a..000000000
--- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto
+++ /dev/null
@@ -1,117 +0,0 @@
-// Protocol Buffers - Google's data interchange format
-// Copyright 2008 Google Inc. All rights reserved.
-// https://developers.google.com/protocol-buffers/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-syntax = "proto3";
-
-package google.protobuf;
-
-option csharp_namespace = "Google.Protobuf.WellKnownTypes";
-option cc_enable_arenas = true;
-option go_package = "github.com/golang/protobuf/ptypes/duration";
-option java_package = "com.google.protobuf";
-option java_outer_classname = "DurationProto";
-option java_multiple_files = true;
-option objc_class_prefix = "GPB";
-
-// A Duration represents a signed, fixed-length span of time represented
-// as a count of seconds and fractions of seconds at nanosecond
-// resolution. It is independent of any calendar and concepts like "day"
-// or "month". It is related to Timestamp in that the difference between
-// two Timestamp values is a Duration and it can be added or subtracted
-// from a Timestamp. Range is approximately +-10,000 years.
-//
-// # Examples
-//
-// Example 1: Compute Duration from two Timestamps in pseudo code.
-//
-// Timestamp start = ...;
-// Timestamp end = ...;
-// Duration duration = ...;
-//
-// duration.seconds = end.seconds - start.seconds;
-// duration.nanos = end.nanos - start.nanos;
-//
-// if (duration.seconds < 0 && duration.nanos > 0) {
-// duration.seconds += 1;
-// duration.nanos -= 1000000000;
-// } else if (durations.seconds > 0 && duration.nanos < 0) {
-// duration.seconds -= 1;
-// duration.nanos += 1000000000;
-// }
-//
-// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
-//
-// Timestamp start = ...;
-// Duration duration = ...;
-// Timestamp end = ...;
-//
-// end.seconds = start.seconds + duration.seconds;
-// end.nanos = start.nanos + duration.nanos;
-//
-// if (end.nanos < 0) {
-// end.seconds -= 1;
-// end.nanos += 1000000000;
-// } else if (end.nanos >= 1000000000) {
-// end.seconds += 1;
-// end.nanos -= 1000000000;
-// }
-//
-// Example 3: Compute Duration from datetime.timedelta in Python.
-//
-// td = datetime.timedelta(days=3, minutes=10)
-// duration = Duration()
-// duration.FromTimedelta(td)
-//
-// # JSON Mapping
-//
-// In JSON format, the Duration type is encoded as a string rather than an
-// object, where the string ends in the suffix "s" (indicating seconds) and
-// is preceded by the number of seconds, with nanoseconds expressed as
-// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
-// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
-// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
-// microsecond should be expressed in JSON format as "3.000001s".
-//
-//
-message Duration {
-
- // Signed seconds of the span of time. Must be from -315,576,000,000
- // to +315,576,000,000 inclusive. Note: these bounds are computed from:
- // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
- int64 seconds = 1;
-
- // Signed fractions of a second at nanosecond resolution of the span
- // of time. Durations less than one second are represented with a 0
- // `seconds` field and a positive or negative `nanos` field. For durations
- // of one second or more, a non-zero value for the `nanos` field must be
- // of the same sign as the `seconds` field. Must be from -999,999,999
- // to +999,999,999 inclusive.
- int32 nanos = 2;
-}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go
index 8da0df01a..026d0d491 100644
--- a/vendor/github.com/golang/protobuf/ptypes/timestamp.go
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go
@@ -1,46 +1,18 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2016 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
package ptypes
-// This file implements operations on google.protobuf.Timestamp.
-
import (
"errors"
"fmt"
"time"
- tspb "github.com/golang/protobuf/ptypes/timestamp"
+ timestamppb "github.com/golang/protobuf/ptypes/timestamp"
)
+// Range of google.protobuf.Duration as specified in timestamp.proto.
const (
// Seconds field of the earliest valid Timestamp.
// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
@@ -50,44 +22,18 @@ const (
maxValidSeconds = 253402300800
)
-// validateTimestamp determines whether a Timestamp is valid.
-// A valid timestamp represents a time in the range
-// [0001-01-01, 10000-01-01) and has a Nanos field
-// in the range [0, 1e9).
-//
-// If the Timestamp is valid, validateTimestamp returns nil.
-// Otherwise, it returns an error that describes
-// the problem.
-//
-// Every valid Timestamp can be represented by a time.Time, but the converse is not true.
-func validateTimestamp(ts *tspb.Timestamp) error {
- if ts == nil {
- return errors.New("timestamp: nil Timestamp")
- }
- if ts.Seconds < minValidSeconds {
- return fmt.Errorf("timestamp: %v before 0001-01-01", ts)
- }
- if ts.Seconds >= maxValidSeconds {
- return fmt.Errorf("timestamp: %v after 10000-01-01", ts)
- }
- if ts.Nanos < 0 || ts.Nanos >= 1e9 {
- return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts)
- }
- return nil
-}
-
-// Timestamp converts a google.protobuf.Timestamp proto to a time.Time.
+// Timestamp converts a timestamppb.Timestamp to a time.Time.
// It returns an error if the argument is invalid.
//
-// Unlike most Go functions, if Timestamp returns an error, the first return value
-// is not the zero time.Time. Instead, it is the value obtained from the
+// Unlike most Go functions, if Timestamp returns an error, the first return
+// value is not the zero time.Time. Instead, it is the value obtained from the
// time.Unix function when passed the contents of the Timestamp, in the UTC
// locale. This may or may not be a meaningful time; many invalid Timestamps
// do map to valid time.Times.
//
// A nil Timestamp returns an error. The first return value in that case is
// undefined.
-func Timestamp(ts *tspb.Timestamp) (time.Time, error) {
+func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) {
// Don't return the zero value on error, because corresponds to a valid
// timestamp. Instead return whatever time.Unix gives us.
var t time.Time
@@ -100,7 +46,7 @@ func Timestamp(ts *tspb.Timestamp) (time.Time, error) {
}
// TimestampNow returns a google.protobuf.Timestamp for the current time.
-func TimestampNow() *tspb.Timestamp {
+func TimestampNow() *timestamppb.Timestamp {
ts, err := TimestampProto(time.Now())
if err != nil {
panic("ptypes: time.Now() out of Timestamp range")
@@ -110,8 +56,8 @@ func TimestampNow() *tspb.Timestamp {
// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
// It returns an error if the resulting Timestamp is invalid.
-func TimestampProto(t time.Time) (*tspb.Timestamp, error) {
- ts := &tspb.Timestamp{
+func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) {
+ ts := &timestamppb.Timestamp{
Seconds: t.Unix(),
Nanos: int32(t.Nanosecond()),
}
@@ -121,12 +67,37 @@ func TimestampProto(t time.Time) (*tspb.Timestamp, error) {
return ts, nil
}
-// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid
-// Timestamps, it returns an error message in parentheses.
-func TimestampString(ts *tspb.Timestamp) string {
+// TimestampString returns the RFC 3339 string for valid Timestamps.
+// For invalid Timestamps, it returns an error message in parentheses.
+func TimestampString(ts *timestamppb.Timestamp) string {
t, err := Timestamp(ts)
if err != nil {
return fmt.Sprintf("(%v)", err)
}
return t.Format(time.RFC3339Nano)
}
+
+// validateTimestamp determines whether a Timestamp is valid.
+// A valid timestamp represents a time in the range [0001-01-01, 10000-01-01)
+// and has a Nanos field in the range [0, 1e9).
+//
+// If the Timestamp is valid, validateTimestamp returns nil.
+// Otherwise, it returns an error that describes the problem.
+//
+// Every valid Timestamp can be represented by a time.Time,
+// but the converse is not true.
+func validateTimestamp(ts *timestamppb.Timestamp) error {
+ if ts == nil {
+ return errors.New("timestamp: nil Timestamp")
+ }
+ if ts.Seconds < minValidSeconds {
+ return fmt.Errorf("timestamp: %v before 0001-01-01", ts)
+ }
+ if ts.Seconds >= maxValidSeconds {
+ return fmt.Errorf("timestamp: %v after 10000-01-01", ts)
+ }
+ if ts.Nanos < 0 || ts.Nanos >= 1e9 {
+ return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts)
+ }
+ return nil
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
index 31cd846de..a76f80760 100644
--- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
@@ -1,179 +1,64 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google/protobuf/timestamp.proto
+// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
package timestamp
import (
- fmt "fmt"
- proto "github.com/golang/protobuf/proto"
- math "math"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+ reflect "reflect"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+// Symbols defined in public import of google/protobuf/timestamp.proto.
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+type Timestamp = timestamppb.Timestamp
-// A Timestamp represents a point in time independent of any time zone
-// or calendar, represented as seconds and fractions of seconds at
-// nanosecond resolution in UTC Epoch time. It is encoded using the
-// Proleptic Gregorian Calendar which extends the Gregorian calendar
-// backwards to year one. It is encoded assuming all minutes are 60
-// seconds long, i.e. leap seconds are "smeared" so that no leap second
-// table is needed for interpretation. Range is from
-// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
-// By restricting to that range, we ensure that we can convert to
-// and from RFC 3339 date strings.
-// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
-//
-// # Examples
-//
-// Example 1: Compute Timestamp from POSIX `time()`.
-//
-// Timestamp timestamp;
-// timestamp.set_seconds(time(NULL));
-// timestamp.set_nanos(0);
-//
-// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
-//
-// struct timeval tv;
-// gettimeofday(&tv, NULL);
-//
-// Timestamp timestamp;
-// timestamp.set_seconds(tv.tv_sec);
-// timestamp.set_nanos(tv.tv_usec * 1000);
-//
-// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
-//
-// FILETIME ft;
-// GetSystemTimeAsFileTime(&ft);
-// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
-//
-// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
-// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
-// Timestamp timestamp;
-// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
-// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
-//
-// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
-//
-// long millis = System.currentTimeMillis();
-//
-// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
-// .setNanos((int) ((millis % 1000) * 1000000)).build();
-//
-//
-// Example 5: Compute Timestamp from current time in Python.
-//
-// timestamp = Timestamp()
-// timestamp.GetCurrentTime()
-//
-// # JSON Mapping
-//
-// In JSON format, the Timestamp type is encoded as a string in the
-// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
-// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
-// where {year} is always expressed using four digits while {month}, {day},
-// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
-// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
-// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
-// is required. A proto3 JSON serializer should always use UTC (as indicated by
-// "Z") when printing the Timestamp type and a proto3 JSON parser should be
-// able to accept both UTC and other timezones (as indicated by an offset).
-//
-// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
-// 01:30 UTC on January 15, 2017.
-//
-// In JavaScript, one can convert a Date object to this format using the
-// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString]
-// method. In Python, a standard `datetime.datetime` object can be converted
-// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
-// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
-// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
-// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--
-// ) to obtain a formatter capable of generating timestamps in this format.
-//
-//
-type Timestamp struct {
- // Represents seconds of UTC time since Unix epoch
- // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
- // 9999-12-31T23:59:59Z inclusive.
- Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
- // Non-negative fractions of a second at nanosecond resolution. Negative
- // second values with fractions must still have non-negative nanos values
- // that count forward in time. Must be from 0 to 999,999,999
- // inclusive.
- Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
+var File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto protoreflect.FileDescriptor
-func (m *Timestamp) Reset() { *m = Timestamp{} }
-func (m *Timestamp) String() string { return proto.CompactTextString(m) }
-func (*Timestamp) ProtoMessage() {}
-func (*Timestamp) Descriptor() ([]byte, []int) {
- return fileDescriptor_292007bbfe81227e, []int{0}
+var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = []byte{
+ 0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
+ 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
+ 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x74, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74,
+ 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x37,
+ 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
+ 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
+ 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x3b, 0x74, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x33,
}
-func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" }
-
-func (m *Timestamp) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Timestamp.Unmarshal(m, b)
-}
-func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic)
-}
-func (m *Timestamp) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Timestamp.Merge(m, src)
-}
-func (m *Timestamp) XXX_Size() int {
- return xxx_messageInfo_Timestamp.Size(m)
-}
-func (m *Timestamp) XXX_DiscardUnknown() {
- xxx_messageInfo_Timestamp.DiscardUnknown(m)
+var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = []interface{}{}
+var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
}
-var xxx_messageInfo_Timestamp proto.InternalMessageInfo
-
-func (m *Timestamp) GetSeconds() int64 {
- if m != nil {
- return m.Seconds
+func init() { file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() }
+func file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() {
+ if File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto != nil {
+ return
}
- return 0
-}
-
-func (m *Timestamp) GetNanos() int32 {
- if m != nil {
- return m.Nanos
- }
- return 0
-}
-
-func init() {
- proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp")
-}
-
-func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e) }
-
-var fileDescriptor_292007bbfe81227e = []byte{
- // 191 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f,
- 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d,
- 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28,
- 0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5,
- 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89,
- 0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x1d, 0x97, 0x70,
- 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0x99, 0x4e, 0x7c, 0x70, 0x13, 0x03, 0x40, 0x42, 0x01, 0x8c, 0x51,
- 0xda, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xe9, 0xf9, 0x39, 0x89,
- 0x79, 0xe9, 0x08, 0x27, 0x16, 0x94, 0x54, 0x16, 0xa4, 0x16, 0x23, 0x5c, 0xfa, 0x83, 0x91, 0x71,
- 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xc9, 0x01, 0x50, 0xb5, 0x7a,
- 0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x3d, 0x49, 0x6c, 0x60, 0x43,
- 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x77, 0x4a, 0x07, 0xf7, 0x00, 0x00, 0x00,
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 0,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes,
+ DependencyIndexes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs,
+ }.Build()
+ File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto = out.File
+ file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = nil
+ file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = nil
+ file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = nil
}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
deleted file mode 100644
index eafb3fa03..000000000
--- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
+++ /dev/null
@@ -1,135 +0,0 @@
-// Protocol Buffers - Google's data interchange format
-// Copyright 2008 Google Inc. All rights reserved.
-// https://developers.google.com/protocol-buffers/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-syntax = "proto3";
-
-package google.protobuf;
-
-option csharp_namespace = "Google.Protobuf.WellKnownTypes";
-option cc_enable_arenas = true;
-option go_package = "github.com/golang/protobuf/ptypes/timestamp";
-option java_package = "com.google.protobuf";
-option java_outer_classname = "TimestampProto";
-option java_multiple_files = true;
-option objc_class_prefix = "GPB";
-
-// A Timestamp represents a point in time independent of any time zone
-// or calendar, represented as seconds and fractions of seconds at
-// nanosecond resolution in UTC Epoch time. It is encoded using the
-// Proleptic Gregorian Calendar which extends the Gregorian calendar
-// backwards to year one. It is encoded assuming all minutes are 60
-// seconds long, i.e. leap seconds are "smeared" so that no leap second
-// table is needed for interpretation. Range is from
-// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
-// By restricting to that range, we ensure that we can convert to
-// and from RFC 3339 date strings.
-// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
-//
-// # Examples
-//
-// Example 1: Compute Timestamp from POSIX `time()`.
-//
-// Timestamp timestamp;
-// timestamp.set_seconds(time(NULL));
-// timestamp.set_nanos(0);
-//
-// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
-//
-// struct timeval tv;
-// gettimeofday(&tv, NULL);
-//
-// Timestamp timestamp;
-// timestamp.set_seconds(tv.tv_sec);
-// timestamp.set_nanos(tv.tv_usec * 1000);
-//
-// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
-//
-// FILETIME ft;
-// GetSystemTimeAsFileTime(&ft);
-// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
-//
-// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
-// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
-// Timestamp timestamp;
-// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
-// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
-//
-// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
-//
-// long millis = System.currentTimeMillis();
-//
-// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
-// .setNanos((int) ((millis % 1000) * 1000000)).build();
-//
-//
-// Example 5: Compute Timestamp from current time in Python.
-//
-// timestamp = Timestamp()
-// timestamp.GetCurrentTime()
-//
-// # JSON Mapping
-//
-// In JSON format, the Timestamp type is encoded as a string in the
-// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
-// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
-// where {year} is always expressed using four digits while {month}, {day},
-// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
-// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
-// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
-// is required. A proto3 JSON serializer should always use UTC (as indicated by
-// "Z") when printing the Timestamp type and a proto3 JSON parser should be
-// able to accept both UTC and other timezones (as indicated by an offset).
-//
-// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
-// 01:30 UTC on January 15, 2017.
-//
-// In JavaScript, one can convert a Date object to this format using the
-// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString]
-// method. In Python, a standard `datetime.datetime` object can be converted
-// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
-// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
-// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
-// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--
-// ) to obtain a formatter capable of generating timestamps in this format.
-//
-//
-message Timestamp {
-
- // Represents seconds of UTC time since Unix epoch
- // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
- // 9999-12-31T23:59:59Z inclusive.
- int64 seconds = 1;
-
- // Non-negative fractions of a second at nanosecond resolution. Negative
- // second values with fractions must still have non-negative nanos values
- // that count forward in time. Must be from 0 to 999,999,999
- // inclusive.
- int32 nanos = 2;
-}
diff --git a/vendor/github.com/klauspost/pgzip/LICENSE b/vendor/github.com/klauspost/pgzip/LICENSE
index 2bdc0d751..3909da410 100644
--- a/vendor/github.com/klauspost/pgzip/LICENSE
+++ b/vendor/github.com/klauspost/pgzip/LICENSE
@@ -1,4 +1,4 @@
-The MIT License (MIT)
+MIT License
Copyright (c) 2014 Klaus Post
@@ -19,4 +19,3 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
-
diff --git a/vendor/github.com/klauspost/pgzip/gzip.go b/vendor/github.com/klauspost/pgzip/gzip.go
index cb3dc0896..257c4d299 100644
--- a/vendor/github.com/klauspost/pgzip/gzip.go
+++ b/vendor/github.com/klauspost/pgzip/gzip.go
@@ -405,7 +405,7 @@ func (z *Writer) Write(p []byte) (int, error) {
if len(z.currentBuffer) == z.blockSize {
z.compressCurrent(false)
if err := z.checkError(); err != nil {
- return len(p) - len(q) - length, err
+ return len(p) - len(q), err
}
}
z.size += length
diff --git a/vendor/github.com/nxadm/tail/.gitignore b/vendor/github.com/nxadm/tail/.gitignore
new file mode 100644
index 000000000..fa81aa93a
--- /dev/null
+++ b/vendor/github.com/nxadm/tail/.gitignore
@@ -0,0 +1,2 @@
+.idea/
+.test/ \ No newline at end of file
diff --git a/vendor/github.com/nxadm/tail/.travis.yml b/vendor/github.com/nxadm/tail/.travis.yml
new file mode 100644
index 000000000..95dd3bd78
--- /dev/null
+++ b/vendor/github.com/nxadm/tail/.travis.yml
@@ -0,0 +1,16 @@
+language: go
+
+script:
+ - go test -race -v ./...
+
+go:
+ - "1.9"
+ - "1.10"
+ - "1.11"
+ - "1.12"
+ - "1.13"
+ - tip
+
+matrix:
+ allow_failures:
+ - go: tip
diff --git a/vendor/github.com/nxadm/tail/CHANGES.md b/vendor/github.com/nxadm/tail/CHANGES.md
new file mode 100644
index 000000000..ef1b5fbed
--- /dev/null
+++ b/vendor/github.com/nxadm/tail/CHANGES.md
@@ -0,0 +1,46 @@
+# Version v1.4.4
+
+* Fix of checksum problem because of forced tag. No changes to the code.
+
+# Version v1.4.1
+
+* Incorporated PR 162 by by Mohammed902: "Simplify non-Windows build tag".
+
+# Version v1.4.0
+
+* Incorporated PR 9 by mschneider82: "Added seekinfo to Tail".
+
+# Version v1.3.1
+
+* Incorporated PR 7: "Fix deadlock when stopping on non-empty file/buffer",
+fixes upstream issue 93.
+
+
+# Version v1.3.0
+
+* Incorporated changes of unmerged upstream PR 149 by mezzi: "added line num
+to Line struct".
+
+# Version v1.2.1
+
+* Incorporated changes of unmerged upstream PR 128 by jadekler: "Compile-able
+code in readme".
+* Incorporated changes of unmerged upstream PR 130 by fgeller: "small change
+to comment wording".
+* Incorporated changes of unmerged upstream PR 133 by sm3142: "removed
+spurious newlines from log messages".
+
+# Version v1.2.0
+
+* Incorporated changes of unmerged upstream PR 126 by Code-Hex: "Solved the
+ problem for never return the last line if it's not followed by a newline".
+* Incorporated changes of unmerged upstream PR 131 by StoicPerlman: "Remove
+deprecated os.SEEK consts". The changes bumped the minimal supported Go
+release to 1.9.
+
+# Version v1.1.0
+
+* migration to go modules.
+* release of master branch of the dormant upstream, because it contains
+fixes and improvement no present in the tagged release.
+
diff --git a/vendor/github.com/nxadm/tail/Dockerfile b/vendor/github.com/nxadm/tail/Dockerfile
new file mode 100644
index 000000000..d9633891c
--- /dev/null
+++ b/vendor/github.com/nxadm/tail/Dockerfile
@@ -0,0 +1,19 @@
+FROM golang
+
+RUN mkdir -p $GOPATH/src/github.com/nxadm/tail/
+ADD . $GOPATH/src/github.com/nxadm/tail/
+
+# expecting to fetch dependencies successfully.
+RUN go get -v github.com/nxadm/tail
+
+# expecting to run the test successfully.
+RUN go test -v github.com/nxadm/tail
+
+# expecting to install successfully
+RUN go install -v github.com/nxadm/tail
+RUN go install -v github.com/nxadm/tail/cmd/gotail
+
+RUN $GOPATH/bin/gotail -h || true
+
+ENV PATH $GOPATH/bin:$PATH
+CMD ["gotail"]
diff --git a/vendor/github.com/nxadm/tail/LICENSE b/vendor/github.com/nxadm/tail/LICENSE
new file mode 100644
index 000000000..818d802a5
--- /dev/null
+++ b/vendor/github.com/nxadm/tail/LICENSE
@@ -0,0 +1,21 @@
+# The MIT License (MIT)
+
+# © Copyright 2015 Hewlett Packard Enterprise Development LP
+Copyright (c) 2014 ActiveState
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/nxadm/tail/README.md b/vendor/github.com/nxadm/tail/README.md
new file mode 100644
index 000000000..dbb6c1727
--- /dev/null
+++ b/vendor/github.com/nxadm/tail/README.md
@@ -0,0 +1,36 @@
+[![Build Status](https://travis-ci.org/nxadm/tail.svg?branch=master)](https://travis-ci.org/nxadm/tail)
+
+This is repo is forked from the dormant upstream repo at
+[hpcloud](https://github.com/hpcloud/tail). This fork adds support for go
+modules, updates the dependencies, adds features and fixes bugs. Go 1.9 is
+the oldest compiler release supported.
+
+# Go package for tail-ing files
+
+A Go package striving to emulate the features of the BSD `tail` program.
+
+```Go
+t, err := tail.TailFile("/var/log/nginx.log", tail.Config{Follow: true})
+if err != nil {
+ panic(err)
+}
+
+for line := range t.Lines {
+ fmt.Println(line.Text)
+}
+```
+
+See [API documentation](http://godoc.org/github.com/nxadm/tail).
+
+## Log rotation
+
+Tail comes with full support for truncation/move detection as it is
+designed to work with log rotation tools.
+
+## Installing
+
+ go get github.com/nxadm/tail/...
+
+## Windows support
+
+This package [needs assistance](https://github.com/nxadm/tail/labels/Windows) for full Windows support.
diff --git a/vendor/github.com/nxadm/tail/appveyor.yml b/vendor/github.com/nxadm/tail/appveyor.yml
new file mode 100644
index 000000000..e149bc62d
--- /dev/null
+++ b/vendor/github.com/nxadm/tail/appveyor.yml
@@ -0,0 +1,11 @@
+version: 0.{build}
+skip_tags: true
+cache: C:\Users\appveyor\AppData\Local\NuGet\Cache
+build_script:
+- SET GOPATH=c:\workspace
+- go test -v -race ./...
+test: off
+clone_folder: c:\workspace\src\github.com\nxadm\tail
+branches:
+ only:
+ - master
diff --git a/vendor/github.com/nxadm/tail/go.mod b/vendor/github.com/nxadm/tail/go.mod
new file mode 100644
index 000000000..fb10d42af
--- /dev/null
+++ b/vendor/github.com/nxadm/tail/go.mod
@@ -0,0 +1,9 @@
+module github.com/nxadm/tail
+
+go 1.13
+
+require (
+ github.com/fsnotify/fsnotify v1.4.7
+ golang.org/x/sys v0.0.0-20190904154756-749cb33beabd // indirect
+ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7
+)
diff --git a/vendor/github.com/nxadm/tail/go.sum b/vendor/github.com/nxadm/tail/go.sum
new file mode 100644
index 000000000..b391f1904
--- /dev/null
+++ b/vendor/github.com/nxadm/tail/go.sum
@@ -0,0 +1,6 @@
+github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+golang.org/x/sys v0.0.0-20190904154756-749cb33beabd h1:DBH9mDw0zluJT/R+nGuV3jWFWLFaHyYZWD4tOT+cjn0=
+golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
diff --git a/vendor/github.com/nxadm/tail/ratelimiter/Licence b/vendor/github.com/nxadm/tail/ratelimiter/Licence
new file mode 100644
index 000000000..434aab19f
--- /dev/null
+++ b/vendor/github.com/nxadm/tail/ratelimiter/Licence
@@ -0,0 +1,7 @@
+Copyright (C) 2013 99designs
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/nxadm/tail/ratelimiter/leakybucket.go b/vendor/github.com/nxadm/tail/ratelimiter/leakybucket.go
new file mode 100644
index 000000000..358b69e7f
--- /dev/null
+++ b/vendor/github.com/nxadm/tail/ratelimiter/leakybucket.go
@@ -0,0 +1,97 @@
+// Package ratelimiter implements the Leaky Bucket ratelimiting algorithm with memcached and in-memory backends.
+package ratelimiter
+
+import (
+ "time"
+)
+
+type LeakyBucket struct {
+ Size uint16
+ Fill float64
+ LeakInterval time.Duration // time.Duration for 1 unit of size to leak
+ Lastupdate time.Time
+ Now func() time.Time
+}
+
+func NewLeakyBucket(size uint16, leakInterval time.Duration) *LeakyBucket {
+ bucket := LeakyBucket{
+ Size: size,
+ Fill: 0,
+ LeakInterval: leakInterval,
+ Now: time.Now,
+ Lastupdate: time.Now(),
+ }
+
+ return &bucket
+}
+
+func (b *LeakyBucket) updateFill() {
+ now := b.Now()
+ if b.Fill > 0 {
+ elapsed := now.Sub(b.Lastupdate)
+
+ b.Fill -= float64(elapsed) / float64(b.LeakInterval)
+ if b.Fill < 0 {
+ b.Fill = 0
+ }
+ }
+ b.Lastupdate = now
+}
+
+func (b *LeakyBucket) Pour(amount uint16) bool {
+ b.updateFill()
+
+ var newfill float64 = b.Fill + float64(amount)
+
+ if newfill > float64(b.Size) {
+ return false
+ }
+
+ b.Fill = newfill
+
+ return true
+}
+
+// The time at which this bucket will be completely drained
+func (b *LeakyBucket) DrainedAt() time.Time {
+ return b.Lastupdate.Add(time.Duration(b.Fill * float64(b.LeakInterval)))
+}
+
+// The duration until this bucket is completely drained
+func (b *LeakyBucket) TimeToDrain() time.Duration {
+ return b.DrainedAt().Sub(b.Now())
+}
+
+func (b *LeakyBucket) TimeSinceLastUpdate() time.Duration {
+ return b.Now().Sub(b.Lastupdate)
+}
+
+type LeakyBucketSer struct {
+ Size uint16
+ Fill float64
+ LeakInterval time.Duration // time.Duration for 1 unit of size to leak
+ Lastupdate time.Time
+}
+
+func (b *LeakyBucket) Serialise() *LeakyBucketSer {
+ bucket := LeakyBucketSer{
+ Size: b.Size,
+ Fill: b.Fill,
+ LeakInterval: b.LeakInterval,
+ Lastupdate: b.Lastupdate,
+ }
+
+ return &bucket
+}
+
+func (b *LeakyBucketSer) DeSerialise() *LeakyBucket {
+ bucket := LeakyBucket{
+ Size: b.Size,
+ Fill: b.Fill,
+ LeakInterval: b.LeakInterval,
+ Lastupdate: b.Lastupdate,
+ Now: time.Now,
+ }
+
+ return &bucket
+}
diff --git a/vendor/github.com/nxadm/tail/ratelimiter/memory.go b/vendor/github.com/nxadm/tail/ratelimiter/memory.go
new file mode 100644
index 000000000..bf3c2131b
--- /dev/null
+++ b/vendor/github.com/nxadm/tail/ratelimiter/memory.go
@@ -0,0 +1,60 @@
+package ratelimiter
+
+import (
+ "errors"
+ "time"
+)
+
+const (
+ GC_SIZE int = 100
+ GC_PERIOD time.Duration = 60 * time.Second
+)
+
+type Memory struct {
+ store map[string]LeakyBucket
+ lastGCCollected time.Time
+}
+
+func NewMemory() *Memory {
+ m := new(Memory)
+ m.store = make(map[string]LeakyBucket)
+ m.lastGCCollected = time.Now()
+ return m
+}
+
+func (m *Memory) GetBucketFor(key string) (*LeakyBucket, error) {
+
+ bucket, ok := m.store[key]
+ if !ok {
+ return nil, errors.New("miss")
+ }
+
+ return &bucket, nil
+}
+
+func (m *Memory) SetBucketFor(key string, bucket LeakyBucket) error {
+
+ if len(m.store) > GC_SIZE {
+ m.GarbageCollect()
+ }
+
+ m.store[key] = bucket
+
+ return nil
+}
+
+func (m *Memory) GarbageCollect() {
+ now := time.Now()
+
+ // rate limit GC to once per minute
+ if now.Unix() >= m.lastGCCollected.Add(GC_PERIOD).Unix() {
+ for key, bucket := range m.store {
+ // if the bucket is drained, then GC
+ if bucket.DrainedAt().Unix() < now.Unix() {
+ delete(m.store, key)
+ }
+ }
+
+ m.lastGCCollected = now
+ }
+}
diff --git a/vendor/github.com/nxadm/tail/ratelimiter/storage.go b/vendor/github.com/nxadm/tail/ratelimiter/storage.go
new file mode 100644
index 000000000..89b2fe882
--- /dev/null
+++ b/vendor/github.com/nxadm/tail/ratelimiter/storage.go
@@ -0,0 +1,6 @@
+package ratelimiter
+
+type Storage interface {
+ GetBucketFor(string) (*LeakyBucket, error)
+ SetBucketFor(string, LeakyBucket) error
+}
diff --git a/vendor/github.com/nxadm/tail/tail.go b/vendor/github.com/nxadm/tail/tail.go
new file mode 100644
index 000000000..58d3c4b95
--- /dev/null
+++ b/vendor/github.com/nxadm/tail/tail.go
@@ -0,0 +1,440 @@
+// Copyright (c) 2015 HPE Software Inc. All rights reserved.
+// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
+
+package tail
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/nxadm/tail/ratelimiter"
+ "github.com/nxadm/tail/util"
+ "github.com/nxadm/tail/watch"
+ "gopkg.in/tomb.v1"
+)
+
+var (
+ ErrStop = errors.New("tail should now stop")
+)
+
+type Line struct {
+ Text string
+ Num int
+ SeekInfo SeekInfo
+ Time time.Time
+ Err error // Error from tail
+}
+
+// NewLine returns a Line with present time.
+func NewLine(text string, lineNum int) *Line {
+ return &Line{text, lineNum, SeekInfo{}, time.Now(), nil}
+}
+
+// SeekInfo represents arguments to `io.Seek`
+type SeekInfo struct {
+ Offset int64
+ Whence int // io.Seek*
+}
+
+type logger interface {
+ Fatal(v ...interface{})
+ Fatalf(format string, v ...interface{})
+ Fatalln(v ...interface{})
+ Panic(v ...interface{})
+ Panicf(format string, v ...interface{})
+ Panicln(v ...interface{})
+ Print(v ...interface{})
+ Printf(format string, v ...interface{})
+ Println(v ...interface{})
+}
+
+// Config is used to specify how a file must be tailed.
+type Config struct {
+ // File-specifc
+ Location *SeekInfo // Seek to this location before tailing
+ ReOpen bool // Reopen recreated files (tail -F)
+ MustExist bool // Fail early if the file does not exist
+ Poll bool // Poll for file changes instead of using inotify
+ Pipe bool // Is a named pipe (mkfifo)
+ RateLimiter *ratelimiter.LeakyBucket
+
+ // Generic IO
+ Follow bool // Continue looking for new lines (tail -f)
+ MaxLineSize int // If non-zero, split longer lines into multiple lines
+
+ // Logger, when nil, is set to tail.DefaultLogger
+ // To disable logging: set field to tail.DiscardingLogger
+ Logger logger
+}
+
+type Tail struct {
+ Filename string
+ Lines chan *Line
+ Config
+
+ file *os.File
+ reader *bufio.Reader
+ lineNum int
+
+ watcher watch.FileWatcher
+ changes *watch.FileChanges
+
+ tomb.Tomb // provides: Done, Kill, Dying
+
+ lk sync.Mutex
+}
+
+var (
+ // DefaultLogger is used when Config.Logger == nil
+ DefaultLogger = log.New(os.Stderr, "", log.LstdFlags)
+ // DiscardingLogger can be used to disable logging output
+ DiscardingLogger = log.New(ioutil.Discard, "", 0)
+)
+
+// TailFile begins tailing the file. Output stream is made available
+// via the `Tail.Lines` channel. To handle errors during tailing,
+// invoke the `Wait` or `Err` method after finishing reading from the
+// `Lines` channel.
+func TailFile(filename string, config Config) (*Tail, error) {
+ if config.ReOpen && !config.Follow {
+ util.Fatal("cannot set ReOpen without Follow.")
+ }
+
+ t := &Tail{
+ Filename: filename,
+ Lines: make(chan *Line),
+ Config: config,
+ }
+
+ // when Logger was not specified in config, use default logger
+ if t.Logger == nil {
+ t.Logger = DefaultLogger
+ }
+
+ if t.Poll {
+ t.watcher = watch.NewPollingFileWatcher(filename)
+ } else {
+ t.watcher = watch.NewInotifyFileWatcher(filename)
+ }
+
+ if t.MustExist {
+ var err error
+ t.file, err = OpenFile(t.Filename)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ go t.tailFileSync()
+
+ return t, nil
+}
+
+// Tell returns the file's current position, like stdio's ftell().
+// But this value is not very accurate.
+// One line from the chan(tail.Lines) may have been read,
+// so it may have lost one line.
+func (tail *Tail) Tell() (offset int64, err error) {
+ if tail.file == nil {
+ return
+ }
+ offset, err = tail.file.Seek(0, io.SeekCurrent)
+ if err != nil {
+ return
+ }
+
+ tail.lk.Lock()
+ defer tail.lk.Unlock()
+ if tail.reader == nil {
+ return
+ }
+
+ offset -= int64(tail.reader.Buffered())
+ return
+}
+
+// Stop stops the tailing activity.
+func (tail *Tail) Stop() error {
+ tail.Kill(nil)
+ return tail.Wait()
+}
+
+// StopAtEOF stops tailing as soon as the end of the file is reached.
+func (tail *Tail) StopAtEOF() error {
+ tail.Kill(errStopAtEOF)
+ return tail.Wait()
+}
+
+var errStopAtEOF = errors.New("tail: stop at eof")
+
+func (tail *Tail) close() {
+ close(tail.Lines)
+ tail.closeFile()
+}
+
+func (tail *Tail) closeFile() {
+ if tail.file != nil {
+ tail.file.Close()
+ tail.file = nil
+ }
+}
+
+func (tail *Tail) reopen() error {
+ tail.closeFile()
+ tail.lineNum = 0
+ for {
+ var err error
+ tail.file, err = OpenFile(tail.Filename)
+ if err != nil {
+ if os.IsNotExist(err) {
+ tail.Logger.Printf("Waiting for %s to appear...", tail.Filename)
+ if err := tail.watcher.BlockUntilExists(&tail.Tomb); err != nil {
+ if err == tomb.ErrDying {
+ return err
+ }
+ return fmt.Errorf("Failed to detect creation of %s: %s", tail.Filename, err)
+ }
+ continue
+ }
+ return fmt.Errorf("Unable to open file %s: %s", tail.Filename, err)
+ }
+ break
+ }
+ return nil
+}
+
+func (tail *Tail) readLine() (string, error) {
+ tail.lk.Lock()
+ line, err := tail.reader.ReadString('\n')
+ tail.lk.Unlock()
+ if err != nil {
+ // Note ReadString "returns the data read before the error" in
+ // case of an error, including EOF, so we return it as is. The
+ // caller is expected to process it if err is EOF.
+ return line, err
+ }
+
+ line = strings.TrimRight(line, "\n")
+
+ return line, err
+}
+
+func (tail *Tail) tailFileSync() {
+ defer tail.Done()
+ defer tail.close()
+
+ if !tail.MustExist {
+ // deferred first open.
+ err := tail.reopen()
+ if err != nil {
+ if err != tomb.ErrDying {
+ tail.Kill(err)
+ }
+ return
+ }
+ }
+
+ // Seek to requested location on first open of the file.
+ if tail.Location != nil {
+ _, err := tail.file.Seek(tail.Location.Offset, tail.Location.Whence)
+ if err != nil {
+ tail.Killf("Seek error on %s: %s", tail.Filename, err)
+ return
+ }
+ }
+
+ tail.openReader()
+
+ // Read line by line.
+ for {
+ // do not seek in named pipes
+ if !tail.Pipe {
+ // grab the position in case we need to back up in the event of a half-line
+ if _, err := tail.Tell(); err != nil {
+ tail.Kill(err)
+ return
+ }
+ }
+
+ line, err := tail.readLine()
+
+ // Process `line` even if err is EOF.
+ if err == nil {
+ cooloff := !tail.sendLine(line)
+ if cooloff {
+ // Wait a second before seeking till the end of
+ // file when rate limit is reached.
+ msg := ("Too much log activity; waiting a second before resuming tailing")
+ offset, _ := tail.Tell()
+ tail.Lines <- &Line{msg, tail.lineNum, SeekInfo{Offset: offset}, time.Now(), errors.New(msg)}
+ select {
+ case <-time.After(time.Second):
+ case <-tail.Dying():
+ return
+ }
+ if err := tail.seekEnd(); err != nil {
+ tail.Kill(err)
+ return
+ }
+ }
+ } else if err == io.EOF {
+ if !tail.Follow {
+ if line != "" {
+ tail.sendLine(line)
+ }
+ return
+ }
+
+ if tail.Follow && line != "" {
+ tail.sendLine(line)
+ if err := tail.seekEnd(); err != nil {
+ tail.Kill(err)
+ return
+ }
+ }
+
+ // When EOF is reached, wait for more data to become
+ // available. Wait strategy is based on the `tail.watcher`
+ // implementation (inotify or polling).
+ err := tail.waitForChanges()
+ if err != nil {
+ if err != ErrStop {
+ tail.Kill(err)
+ }
+ return
+ }
+ } else {
+ // non-EOF error
+ tail.Killf("Error reading %s: %s", tail.Filename, err)
+ return
+ }
+
+ select {
+ case <-tail.Dying():
+ if tail.Err() == errStopAtEOF {
+ continue
+ }
+ return
+ default:
+ }
+ }
+}
+
+// waitForChanges waits until the file has been appended, deleted,
+// moved or truncated. When moved or deleted - the file will be
+// reopened if ReOpen is true. Truncated files are always reopened.
+func (tail *Tail) waitForChanges() error {
+ if tail.changes == nil {
+ pos, err := tail.file.Seek(0, io.SeekCurrent)
+ if err != nil {
+ return err
+ }
+ tail.changes, err = tail.watcher.ChangeEvents(&tail.Tomb, pos)
+ if err != nil {
+ return err
+ }
+ }
+
+ select {
+ case <-tail.changes.Modified:
+ return nil
+ case <-tail.changes.Deleted:
+ tail.changes = nil
+ if tail.ReOpen {
+ // XXX: we must not log from a library.
+ tail.Logger.Printf("Re-opening moved/deleted file %s ...", tail.Filename)
+ if err := tail.reopen(); err != nil {
+ return err
+ }
+ tail.Logger.Printf("Successfully reopened %s", tail.Filename)
+ tail.openReader()
+ return nil
+ }
+ tail.Logger.Printf("Stopping tail as file no longer exists: %s", tail.Filename)
+ return ErrStop
+ case <-tail.changes.Truncated:
+ // Always reopen truncated files (Follow is true)
+ tail.Logger.Printf("Re-opening truncated file %s ...", tail.Filename)
+ if err := tail.reopen(); err != nil {
+ return err
+ }
+ tail.Logger.Printf("Successfully reopened truncated %s", tail.Filename)
+ tail.openReader()
+ return nil
+ case <-tail.Dying():
+ return ErrStop
+ }
+}
+
+func (tail *Tail) openReader() {
+ tail.lk.Lock()
+ if tail.MaxLineSize > 0 {
+ // add 2 to account for newline characters
+ tail.reader = bufio.NewReaderSize(tail.file, tail.MaxLineSize+2)
+ } else {
+ tail.reader = bufio.NewReader(tail.file)
+ }
+ tail.lk.Unlock()
+}
+
+func (tail *Tail) seekEnd() error {
+ return tail.seekTo(SeekInfo{Offset: 0, Whence: io.SeekEnd})
+}
+
+func (tail *Tail) seekTo(pos SeekInfo) error {
+ _, err := tail.file.Seek(pos.Offset, pos.Whence)
+ if err != nil {
+ return fmt.Errorf("Seek error on %s: %s", tail.Filename, err)
+ }
+ // Reset the read buffer whenever the file is re-seek'ed
+ tail.reader.Reset(tail.file)
+ return nil
+}
+
+// sendLine sends the line(s) to Lines channel, splitting longer lines
+// if necessary. Return false if rate limit is reached.
+func (tail *Tail) sendLine(line string) bool {
+ now := time.Now()
+ lines := []string{line}
+
+ // Split longer lines
+ if tail.MaxLineSize > 0 && len(line) > tail.MaxLineSize {
+ lines = util.PartitionString(line, tail.MaxLineSize)
+ }
+
+ for _, line := range lines {
+ tail.lineNum++
+ offset, _ := tail.Tell()
+ select {
+ case tail.Lines <- &Line{line, tail.lineNum, SeekInfo{Offset: offset}, now, nil}:
+ case <-tail.Dying():
+ return true
+ }
+ }
+
+ if tail.Config.RateLimiter != nil {
+ ok := tail.Config.RateLimiter.Pour(uint16(len(lines)))
+ if !ok {
+ tail.Logger.Printf("Leaky bucket full (%v); entering 1s cooloff period.",
+ tail.Filename)
+ return false
+ }
+ }
+
+ return true
+}
+
+// Cleanup removes inotify watches added by the tail package. This function is
+// meant to be invoked from a process's exit handler. Linux kernel may not
+// automatically remove inotify watches after the process exits.
+func (tail *Tail) Cleanup() {
+ watch.Cleanup(tail.Filename)
+}
diff --git a/vendor/github.com/nxadm/tail/tail_posix.go b/vendor/github.com/nxadm/tail/tail_posix.go
new file mode 100644
index 000000000..1b94520ec
--- /dev/null
+++ b/vendor/github.com/nxadm/tail/tail_posix.go
@@ -0,0 +1,11 @@
+// +build !windows
+
+package tail
+
+import (
+ "os"
+)
+
+func OpenFile(name string) (file *os.File, err error) {
+ return os.Open(name)
+}
diff --git a/vendor/github.com/nxadm/tail/tail_windows.go b/vendor/github.com/nxadm/tail/tail_windows.go
new file mode 100644
index 000000000..4aaceea28
--- /dev/null
+++ b/vendor/github.com/nxadm/tail/tail_windows.go
@@ -0,0 +1,12 @@
+// +build windows
+
+package tail
+
+import (
+ "github.com/nxadm/tail/winfile"
+ "os"
+)
+
+func OpenFile(name string) (file *os.File, err error) {
+ return winfile.OpenFile(name, os.O_RDONLY, 0)
+}
diff --git a/vendor/github.com/nxadm/tail/util/util.go b/vendor/github.com/nxadm/tail/util/util.go
new file mode 100644
index 000000000..2ba0ed71c
--- /dev/null
+++ b/vendor/github.com/nxadm/tail/util/util.go
@@ -0,0 +1,48 @@
+// Copyright (c) 2015 HPE Software Inc. All rights reserved.
+// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
+
+package util
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "runtime/debug"
+)
+
+type Logger struct {
+ *log.Logger
+}
+
+var LOGGER = &Logger{log.New(os.Stderr, "", log.LstdFlags)}
+
+// fatal is like panic except it displays only the current goroutine's stack.
+func Fatal(format string, v ...interface{}) {
+ // https://github.com/nxadm/log/blob/master/log.go#L45
+ LOGGER.Output(2, fmt.Sprintf("FATAL -- "+format, v...)+"\n"+string(debug.Stack()))
+ os.Exit(1)
+}
+
+// partitionString partitions the string into chunks of given size,
+// with the last chunk of variable size.
+func PartitionString(s string, chunkSize int) []string {
+ if chunkSize <= 0 {
+ panic("invalid chunkSize")
+ }
+ length := len(s)
+ chunks := 1 + length/chunkSize
+ start := 0
+ end := chunkSize
+ parts := make([]string, 0, chunks)
+ for {
+ if end > length {
+ end = length
+ }
+ parts = append(parts, s[start:end])
+ if end == length {
+ break
+ }
+ start, end = end, end+chunkSize
+ }
+ return parts
+}
diff --git a/vendor/github.com/nxadm/tail/watch/filechanges.go b/vendor/github.com/nxadm/tail/watch/filechanges.go
new file mode 100644
index 000000000..f80aead9a
--- /dev/null
+++ b/vendor/github.com/nxadm/tail/watch/filechanges.go
@@ -0,0 +1,36 @@
+package watch
+
+type FileChanges struct {
+ Modified chan bool // Channel to get notified of modifications
+ Truncated chan bool // Channel to get notified of truncations
+ Deleted chan bool // Channel to get notified of deletions/renames
+}
+
+func NewFileChanges() *FileChanges {
+ return &FileChanges{
+ make(chan bool, 1), make(chan bool, 1), make(chan bool, 1)}
+}
+
+func (fc *FileChanges) NotifyModified() {
+ sendOnlyIfEmpty(fc.Modified)
+}
+
+func (fc *FileChanges) NotifyTruncated() {
+ sendOnlyIfEmpty(fc.Truncated)
+}
+
+func (fc *FileChanges) NotifyDeleted() {
+ sendOnlyIfEmpty(fc.Deleted)
+}
+
+// sendOnlyIfEmpty sends on a bool channel only if the channel has no
+// backlog to be read by other goroutines. This concurrency pattern
+// can be used to notify other goroutines if and only if they are
+// looking for it (i.e., subsequent notifications can be compressed
+// into one).
+func sendOnlyIfEmpty(ch chan bool) {
+ select {
+ case ch <- true:
+ default:
+ }
+}
diff --git a/vendor/github.com/nxadm/tail/watch/inotify.go b/vendor/github.com/nxadm/tail/watch/inotify.go
new file mode 100644
index 000000000..439921810
--- /dev/null
+++ b/vendor/github.com/nxadm/tail/watch/inotify.go
@@ -0,0 +1,135 @@
+// Copyright (c) 2015 HPE Software Inc. All rights reserved.
+// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
+
+package watch
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/nxadm/tail/util"
+
+ "github.com/fsnotify/fsnotify"
+ "gopkg.in/tomb.v1"
+)
+
+// InotifyFileWatcher uses inotify to monitor file changes.
+type InotifyFileWatcher struct {
+ Filename string
+ Size int64
+}
+
+func NewInotifyFileWatcher(filename string) *InotifyFileWatcher {
+ fw := &InotifyFileWatcher{filepath.Clean(filename), 0}
+ return fw
+}
+
+func (fw *InotifyFileWatcher) BlockUntilExists(t *tomb.Tomb) error {
+ err := WatchCreate(fw.Filename)
+ if err != nil {
+ return err
+ }
+ defer RemoveWatchCreate(fw.Filename)
+
+ // Do a real check now as the file might have been created before
+ // calling `WatchFlags` above.
+ if _, err = os.Stat(fw.Filename); !os.IsNotExist(err) {
+ // file exists, or stat returned an error.
+ return err
+ }
+
+ events := Events(fw.Filename)
+
+ for {
+ select {
+ case evt, ok := <-events:
+ if !ok {
+ return fmt.Errorf("inotify watcher has been closed")
+ }
+ evtName, err := filepath.Abs(evt.Name)
+ if err != nil {
+ return err
+ }
+ fwFilename, err := filepath.Abs(fw.Filename)
+ if err != nil {
+ return err
+ }
+ if evtName == fwFilename {
+ return nil
+ }
+ case <-t.Dying():
+ return tomb.ErrDying
+ }
+ }
+ panic("unreachable")
+}
+
+func (fw *InotifyFileWatcher) ChangeEvents(t *tomb.Tomb, pos int64) (*FileChanges, error) {
+ err := Watch(fw.Filename)
+ if err != nil {
+ return nil, err
+ }
+
+ changes := NewFileChanges()
+ fw.Size = pos
+
+ go func() {
+
+ events := Events(fw.Filename)
+
+ for {
+ prevSize := fw.Size
+
+ var evt fsnotify.Event
+ var ok bool
+
+ select {
+ case evt, ok = <-events:
+ if !ok {
+ RemoveWatch(fw.Filename)
+ return
+ }
+ case <-t.Dying():
+ RemoveWatch(fw.Filename)
+ return
+ }
+
+ switch {
+ case evt.Op&fsnotify.Remove == fsnotify.Remove:
+ fallthrough
+
+ case evt.Op&fsnotify.Rename == fsnotify.Rename:
+ RemoveWatch(fw.Filename)
+ changes.NotifyDeleted()
+ return
+
+ //With an open fd, unlink(fd) - inotify returns IN_ATTRIB (==fsnotify.Chmod)
+ case evt.Op&fsnotify.Chmod == fsnotify.Chmod:
+ fallthrough
+
+ case evt.Op&fsnotify.Write == fsnotify.Write:
+ fi, err := os.Stat(fw.Filename)
+ if err != nil {
+ if os.IsNotExist(err) {
+ RemoveWatch(fw.Filename)
+ changes.NotifyDeleted()
+ return
+ }
+ // XXX: report this error back to the user
+ util.Fatal("Failed to stat file %v: %v", fw.Filename, err)
+ }
+ fw.Size = fi.Size()
+
+ if prevSize > 0 && prevSize > fw.Size {
+ changes.NotifyTruncated()
+ } else {
+ changes.NotifyModified()
+ }
+ prevSize = fw.Size
+ }
+ }
+ }()
+
+ return changes, nil
+}
diff --git a/vendor/github.com/nxadm/tail/watch/inotify_tracker.go b/vendor/github.com/nxadm/tail/watch/inotify_tracker.go
new file mode 100644
index 000000000..a94bcd4cb
--- /dev/null
+++ b/vendor/github.com/nxadm/tail/watch/inotify_tracker.go
@@ -0,0 +1,248 @@
+// Copyright (c) 2015 HPE Software Inc. All rights reserved.
+// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
+
+package watch
+
+import (
+ "log"
+ "os"
+ "path/filepath"
+ "sync"
+ "syscall"
+
+ "github.com/nxadm/tail/util"
+
+ "github.com/fsnotify/fsnotify"
+)
+
+type InotifyTracker struct {
+ mux sync.Mutex
+ watcher *fsnotify.Watcher
+ chans map[string]chan fsnotify.Event
+ done map[string]chan bool
+ watchNums map[string]int
+ watch chan *watchInfo
+ remove chan *watchInfo
+ error chan error
+}
+
+type watchInfo struct {
+ op fsnotify.Op
+ fname string
+}
+
+func (this *watchInfo) isCreate() bool {
+ return this.op == fsnotify.Create
+}
+
+var (
+ // globally shared InotifyTracker; ensures only one fsnotify.Watcher is used
+ shared *InotifyTracker
+
+ // these are used to ensure the shared InotifyTracker is run exactly once
+ once = sync.Once{}
+ goRun = func() {
+ shared = &InotifyTracker{
+ mux: sync.Mutex{},
+ chans: make(map[string]chan fsnotify.Event),
+ done: make(map[string]chan bool),
+ watchNums: make(map[string]int),
+ watch: make(chan *watchInfo),
+ remove: make(chan *watchInfo),
+ error: make(chan error),
+ }
+ go shared.run()
+ }
+
+ logger = log.New(os.Stderr, "", log.LstdFlags)
+)
+
+// Watch signals the run goroutine to begin watching the input filename
+func Watch(fname string) error {
+ return watch(&watchInfo{
+ fname: fname,
+ })
+}
+
+// Watch create signals the run goroutine to begin watching the input filename
+// if call the WatchCreate function, don't call the Cleanup, call the RemoveWatchCreate
+func WatchCreate(fname string) error {
+ return watch(&watchInfo{
+ op: fsnotify.Create,
+ fname: fname,
+ })
+}
+
+func watch(winfo *watchInfo) error {
+ // start running the shared InotifyTracker if not already running
+ once.Do(goRun)
+
+ winfo.fname = filepath.Clean(winfo.fname)
+ shared.watch <- winfo
+ return <-shared.error
+}
+
+// RemoveWatch signals the run goroutine to remove the watch for the input filename
+func RemoveWatch(fname string) error {
+ return remove(&watchInfo{
+ fname: fname,
+ })
+}
+
+// RemoveWatch create signals the run goroutine to remove the watch for the input filename
+func RemoveWatchCreate(fname string) error {
+ return remove(&watchInfo{
+ op: fsnotify.Create,
+ fname: fname,
+ })
+}
+
+func remove(winfo *watchInfo) error {
+ // start running the shared InotifyTracker if not already running
+ once.Do(goRun)
+
+ winfo.fname = filepath.Clean(winfo.fname)
+ shared.mux.Lock()
+ done := shared.done[winfo.fname]
+ if done != nil {
+ delete(shared.done, winfo.fname)
+ close(done)
+ }
+ shared.mux.Unlock()
+
+ shared.remove <- winfo
+ return <-shared.error
+}
+
+// Events returns a channel to which FileEvents corresponding to the input filename
+// will be sent. This channel will be closed when removeWatch is called on this
+// filename.
+func Events(fname string) <-chan fsnotify.Event {
+ shared.mux.Lock()
+ defer shared.mux.Unlock()
+
+ return shared.chans[fname]
+}
+
+// Cleanup removes the watch for the input filename if necessary.
+func Cleanup(fname string) error {
+ return RemoveWatch(fname)
+}
+
+// watchFlags calls fsnotify.WatchFlags for the input filename and flags, creating
+// a new Watcher if the previous Watcher was closed.
+func (shared *InotifyTracker) addWatch(winfo *watchInfo) error {
+ shared.mux.Lock()
+ defer shared.mux.Unlock()
+
+ if shared.chans[winfo.fname] == nil {
+ shared.chans[winfo.fname] = make(chan fsnotify.Event)
+ }
+ if shared.done[winfo.fname] == nil {
+ shared.done[winfo.fname] = make(chan bool)
+ }
+
+ fname := winfo.fname
+ if winfo.isCreate() {
+ // Watch for new files to be created in the parent directory.
+ fname = filepath.Dir(fname)
+ }
+
+ var err error
+ // already in inotify watch
+ if shared.watchNums[fname] == 0 {
+ err = shared.watcher.Add(fname)
+ }
+ if err == nil {
+ shared.watchNums[fname]++
+ }
+ return err
+}
+
+// removeWatch calls fsnotify.RemoveWatch for the input filename and closes the
+// corresponding events channel.
+func (shared *InotifyTracker) removeWatch(winfo *watchInfo) error {
+ shared.mux.Lock()
+
+ ch := shared.chans[winfo.fname]
+ if ch != nil {
+ delete(shared.chans, winfo.fname)
+ close(ch)
+ }
+
+ fname := winfo.fname
+ if winfo.isCreate() {
+ // Watch for new files to be created in the parent directory.
+ fname = filepath.Dir(fname)
+ }
+ shared.watchNums[fname]--
+ watchNum := shared.watchNums[fname]
+ if watchNum == 0 {
+ delete(shared.watchNums, fname)
+ }
+ shared.mux.Unlock()
+
+ var err error
+ // If we were the last ones to watch this file, unsubscribe from inotify.
+ // This needs to happen after releasing the lock because fsnotify waits
+ // synchronously for the kernel to acknowledge the removal of the watch
+ // for this file, which causes us to deadlock if we still held the lock.
+ if watchNum == 0 {
+ err = shared.watcher.Remove(fname)
+ }
+
+ return err
+}
+
+// sendEvent sends the input event to the appropriate Tail.
+func (shared *InotifyTracker) sendEvent(event fsnotify.Event) {
+ name := filepath.Clean(event.Name)
+
+ shared.mux.Lock()
+ ch := shared.chans[name]
+ done := shared.done[name]
+ shared.mux.Unlock()
+
+ if ch != nil && done != nil {
+ select {
+ case ch <- event:
+ case <-done:
+ }
+ }
+}
+
+// run starts the goroutine in which the shared struct reads events from its
+// Watcher's Event channel and sends the events to the appropriate Tail.
+func (shared *InotifyTracker) run() {
+ watcher, err := fsnotify.NewWatcher()
+ if err != nil {
+ util.Fatal("failed to create Watcher")
+ }
+ shared.watcher = watcher
+
+ for {
+ select {
+ case winfo := <-shared.watch:
+ shared.error <- shared.addWatch(winfo)
+
+ case winfo := <-shared.remove:
+ shared.error <- shared.removeWatch(winfo)
+
+ case event, open := <-shared.watcher.Events:
+ if !open {
+ return
+ }
+ shared.sendEvent(event)
+
+ case err, open := <-shared.watcher.Errors:
+ if !open {
+ return
+ } else if err != nil {
+ sysErr, ok := err.(*os.SyscallError)
+ if !ok || sysErr.Err != syscall.EINTR {
+ logger.Printf("Error in Watcher Error channel: %s", err)
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/nxadm/tail/watch/polling.go b/vendor/github.com/nxadm/tail/watch/polling.go
new file mode 100644
index 000000000..fb1706908
--- /dev/null
+++ b/vendor/github.com/nxadm/tail/watch/polling.go
@@ -0,0 +1,118 @@
+// Copyright (c) 2015 HPE Software Inc. All rights reserved.
+// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
+
+package watch
+
+import (
+ "os"
+ "runtime"
+ "time"
+
+ "github.com/nxadm/tail/util"
+ "gopkg.in/tomb.v1"
+)
+
+// PollingFileWatcher polls the file for changes.
+type PollingFileWatcher struct {
+ Filename string
+ Size int64
+}
+
+func NewPollingFileWatcher(filename string) *PollingFileWatcher {
+ fw := &PollingFileWatcher{filename, 0}
+ return fw
+}
+
+var POLL_DURATION time.Duration
+
+func (fw *PollingFileWatcher) BlockUntilExists(t *tomb.Tomb) error {
+ for {
+ if _, err := os.Stat(fw.Filename); err == nil {
+ return nil
+ } else if !os.IsNotExist(err) {
+ return err
+ }
+ select {
+ case <-time.After(POLL_DURATION):
+ continue
+ case <-t.Dying():
+ return tomb.ErrDying
+ }
+ }
+ panic("unreachable")
+}
+
+func (fw *PollingFileWatcher) ChangeEvents(t *tomb.Tomb, pos int64) (*FileChanges, error) {
+ origFi, err := os.Stat(fw.Filename)
+ if err != nil {
+ return nil, err
+ }
+
+ changes := NewFileChanges()
+ var prevModTime time.Time
+
+ // XXX: use tomb.Tomb to cleanly manage these goroutines. replace
+ // the fatal (below) with tomb's Kill.
+
+ fw.Size = pos
+
+ go func() {
+ prevSize := fw.Size
+ for {
+ select {
+ case <-t.Dying():
+ return
+ default:
+ }
+
+ time.Sleep(POLL_DURATION)
+ fi, err := os.Stat(fw.Filename)
+ if err != nil {
+ // Windows cannot delete a file if a handle is still open (tail keeps one open)
+ // so it gives access denied to anything trying to read it until all handles are released.
+ if os.IsNotExist(err) || (runtime.GOOS == "windows" && os.IsPermission(err)) {
+ // File does not exist (has been deleted).
+ changes.NotifyDeleted()
+ return
+ }
+
+ // XXX: report this error back to the user
+ util.Fatal("Failed to stat file %v: %v", fw.Filename, err)
+ }
+
+ // File got moved/renamed?
+ if !os.SameFile(origFi, fi) {
+ changes.NotifyDeleted()
+ return
+ }
+
+ // File got truncated?
+ fw.Size = fi.Size()
+ if prevSize > 0 && prevSize > fw.Size {
+ changes.NotifyTruncated()
+ prevSize = fw.Size
+ continue
+ }
+ // File got bigger?
+ if prevSize > 0 && prevSize < fw.Size {
+ changes.NotifyModified()
+ prevSize = fw.Size
+ continue
+ }
+ prevSize = fw.Size
+
+ // File was appended to (changed)?
+ modTime := fi.ModTime()
+ if modTime != prevModTime {
+ prevModTime = modTime
+ changes.NotifyModified()
+ }
+ }
+ }()
+
+ return changes, nil
+}
+
+func init() {
+ POLL_DURATION = 250 * time.Millisecond
+}
diff --git a/vendor/github.com/nxadm/tail/watch/watch.go b/vendor/github.com/nxadm/tail/watch/watch.go
new file mode 100644
index 000000000..2e1783ef0
--- /dev/null
+++ b/vendor/github.com/nxadm/tail/watch/watch.go
@@ -0,0 +1,20 @@
+// Copyright (c) 2015 HPE Software Inc. All rights reserved.
+// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
+
+package watch
+
+import "gopkg.in/tomb.v1"
+
+// FileWatcher monitors file-level events.
+type FileWatcher interface {
+ // BlockUntilExists blocks until the file comes into existence.
+ BlockUntilExists(*tomb.Tomb) error
+
+ // ChangeEvents reports on changes to a file, be it modification,
+ // deletion, renames or truncations. Returned FileChanges group of
+ // channels will be closed, thus become unusable, after a deletion
+ // or truncation event.
+ // In order to properly report truncations, ChangeEvents requires
+ // the caller to pass their current offset in the file.
+ ChangeEvents(*tomb.Tomb, int64) (*FileChanges, error)
+}
diff --git a/vendor/github.com/nxadm/tail/winfile/winfile.go b/vendor/github.com/nxadm/tail/winfile/winfile.go
new file mode 100644
index 000000000..aa7e7bc5d
--- /dev/null
+++ b/vendor/github.com/nxadm/tail/winfile/winfile.go
@@ -0,0 +1,92 @@
+// +build windows
+
+package winfile
+
+import (
+ "os"
+ "syscall"
+ "unsafe"
+)
+
+// issue also described here
+//https://codereview.appspot.com/8203043/
+
+// https://github.com/jnwhiteh/golang/blob/master/src/pkg/syscall/syscall_windows.go#L218
+func Open(path string, mode int, perm uint32) (fd syscall.Handle, err error) {
+ if len(path) == 0 {
+ return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND
+ }
+ pathp, err := syscall.UTF16PtrFromString(path)
+ if err != nil {
+ return syscall.InvalidHandle, err
+ }
+ var access uint32
+ switch mode & (syscall.O_RDONLY | syscall.O_WRONLY | syscall.O_RDWR) {
+ case syscall.O_RDONLY:
+ access = syscall.GENERIC_READ
+ case syscall.O_WRONLY:
+ access = syscall.GENERIC_WRITE
+ case syscall.O_RDWR:
+ access = syscall.GENERIC_READ | syscall.GENERIC_WRITE
+ }
+ if mode&syscall.O_CREAT != 0 {
+ access |= syscall.GENERIC_WRITE
+ }
+ if mode&syscall.O_APPEND != 0 {
+ access &^= syscall.GENERIC_WRITE
+ access |= syscall.FILE_APPEND_DATA
+ }
+ sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE | syscall.FILE_SHARE_DELETE)
+ var sa *syscall.SecurityAttributes
+ if mode&syscall.O_CLOEXEC == 0 {
+ sa = makeInheritSa()
+ }
+ var createmode uint32
+ switch {
+ case mode&(syscall.O_CREAT|syscall.O_EXCL) == (syscall.O_CREAT | syscall.O_EXCL):
+ createmode = syscall.CREATE_NEW
+ case mode&(syscall.O_CREAT|syscall.O_TRUNC) == (syscall.O_CREAT | syscall.O_TRUNC):
+ createmode = syscall.CREATE_ALWAYS
+ case mode&syscall.O_CREAT == syscall.O_CREAT:
+ createmode = syscall.OPEN_ALWAYS
+ case mode&syscall.O_TRUNC == syscall.O_TRUNC:
+ createmode = syscall.TRUNCATE_EXISTING
+ default:
+ createmode = syscall.OPEN_EXISTING
+ }
+ h, e := syscall.CreateFile(pathp, access, sharemode, sa, createmode, syscall.FILE_ATTRIBUTE_NORMAL, 0)
+ return h, e
+}
+
+// https://github.com/jnwhiteh/golang/blob/master/src/pkg/syscall/syscall_windows.go#L211
+func makeInheritSa() *syscall.SecurityAttributes {
+ var sa syscall.SecurityAttributes
+ sa.Length = uint32(unsafe.Sizeof(sa))
+ sa.InheritHandle = 1
+ return &sa
+}
+
+// https://github.com/jnwhiteh/golang/blob/master/src/pkg/os/file_windows.go#L133
+func OpenFile(name string, flag int, perm os.FileMode) (file *os.File, err error) {
+ r, e := Open(name, flag|syscall.O_CLOEXEC, syscallMode(perm))
+ if e != nil {
+ return nil, e
+ }
+ return os.NewFile(uintptr(r), name), nil
+}
+
+// https://github.com/jnwhiteh/golang/blob/master/src/pkg/os/file_posix.go#L61
+func syscallMode(i os.FileMode) (o uint32) {
+ o |= uint32(i.Perm())
+ if i&os.ModeSetuid != 0 {
+ o |= syscall.S_ISUID
+ }
+ if i&os.ModeSetgid != 0 {
+ o |= syscall.S_ISGID
+ }
+ if i&os.ModeSticky != 0 {
+ o |= syscall.S_ISVTX
+ }
+ // No mapping for Go's ModeTemporary (plan9 only).
+ return
+}
diff --git a/vendor/github.com/onsi/ginkgo/.travis.yml b/vendor/github.com/onsi/ginkgo/.travis.yml
index 65dc3002b..079af2431 100644
--- a/vendor/github.com/onsi/ginkgo/.travis.yml
+++ b/vendor/github.com/onsi/ginkgo/.travis.yml
@@ -1,7 +1,7 @@
language: go
go:
- - 1.12.x
- 1.13.x
+ - 1.14.x
- tip
cache:
diff --git a/vendor/github.com/onsi/ginkgo/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/CHANGELOG.md
index 84b479404..52d8ca36d 100644
--- a/vendor/github.com/onsi/ginkgo/CHANGELOG.md
+++ b/vendor/github.com/onsi/ginkgo/CHANGELOG.md
@@ -1,3 +1,27 @@
+## 1.12.2
+
+### Fixes
+- Update dependencies [ea4a036]
+
+## 1.12.1
+
+### Fixes
+- Make unfocus ("blur") much faster (#674) [8b18061]
+- Fix typo (#673) [7fdcbe8]
+- Test against 1.14 and remove 1.12 [d5c2ad6]
+- Test if a coverprofile content is empty before checking its latest character (#670) [14d9fa2]
+- replace tail package with maintained one. this fixes go get errors (#667) [4ba33d4]
+- improve ginkgo performance - makes progress on #644 [a14f98e]
+- fix convert integration tests [1f8ba69]
+- fix typo succesful -> successful (#663) [1ea49cf]
+- Fix invalid link (#658) [b886136]
+- convert utility : Include comments from source (#657) [1077c6d]
+- Explain what BDD means [d79e7fb]
+- skip race detector test on unsupported platform (#642) [f8ab89d]
+- Use Dup2 from golang.org/x/sys/unix instead of syscallDup (#638) [5d53c55]
+- Fix missing newline in combined coverage file (#641) [6a07ea2]
+- check if a spec is run before returning SpecSummary (#645) [8850000]
+
## 1.12.0
### Features
@@ -208,7 +232,7 @@ New Features:
- `ginkgo -tags=TAG_LIST` passes a list of tags down to the `go build` command.
- `ginkgo --failFast` aborts the test suite after the first failure.
- `ginkgo generate file_1 file_2` can take multiple file arguments.
-- Ginkgo now summarizes any spec failures that occured at the end of the test run.
+- Ginkgo now summarizes any spec failures that occurred at the end of the test run.
- `ginkgo --randomizeSuites` will run tests *suites* in random order using the generated/passed-in seed.
Improvements:
diff --git a/vendor/github.com/onsi/ginkgo/README.md b/vendor/github.com/onsi/ginkgo/README.md
index cdf8d054a..2fda7b9eb 100644
--- a/vendor/github.com/onsi/ginkgo/README.md
+++ b/vendor/github.com/onsi/ginkgo/README.md
@@ -1,29 +1,29 @@
-![Ginkgo: A Go BDD Testing Framework](http://onsi.github.io/ginkgo/images/ginkgo.png)
+![Ginkgo: A Go BDD Testing Framework](https://onsi.github.io/ginkgo/images/ginkgo.png)
[![Build Status](https://travis-ci.org/onsi/ginkgo.svg?branch=master)](https://travis-ci.org/onsi/ginkgo)
-Jump to the [docs](http://onsi.github.io/ginkgo/) to learn more. To start rolling your Ginkgo tests *now* [keep reading](#set-me-up)!
+Jump to the [docs](https://onsi.github.io/ginkgo/) to learn more. To start rolling your Ginkgo tests *now* [keep reading](#set-me-up)!
If you have a question, comment, bug report, feature request, etc. please open a GitHub issue.
## Feature List
-- Ginkgo uses Go's `testing` package and can live alongside your existing `testing` tests. It's easy to [bootstrap](http://onsi.github.io/ginkgo/#bootstrapping-a-suite) and start writing your [first tests](http://onsi.github.io/ginkgo/#adding-specs-to-a-suite)
+- Ginkgo uses Go's `testing` package and can live alongside your existing `testing` tests. It's easy to [bootstrap](https://onsi.github.io/ginkgo/#bootstrapping-a-suite) and start writing your [first tests](https://onsi.github.io/ginkgo/#adding-specs-to-a-suite)
-- Structure your BDD-style tests expressively:
- - Nestable [`Describe`, `Context` and `When` container blocks](http://onsi.github.io/ginkgo/#organizing-specs-with-containers-describe-and-context)
- - [`BeforeEach` and `AfterEach` blocks](http://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach) for setup and teardown
- - [`It` and `Specify` blocks](http://onsi.github.io/ginkgo/#individual-specs-) that hold your assertions
- - [`JustBeforeEach` blocks](http://onsi.github.io/ginkgo/#separating-creation-and-configuration-justbeforeeach) that separate creation from configuration (also known as the subject action pattern).
- - [`BeforeSuite` and `AfterSuite` blocks](http://onsi.github.io/ginkgo/#global-setup-and-teardown-beforesuite-and-aftersuite) to prep for and cleanup after a suite.
+- Ginkgo allows you to write tests in Go using expressive [Behavior-Driven Development](https://en.wikipedia.org/wiki/Behavior-driven_development) ("BDD") style:
+ - Nestable [`Describe`, `Context` and `When` container blocks](https://onsi.github.io/ginkgo/#organizing-specs-with-containers-describe-and-context)
+ - [`BeforeEach` and `AfterEach` blocks](https://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach) for setup and teardown
+ - [`It` and `Specify` blocks](https://onsi.github.io/ginkgo/#individual-specs-it) that hold your assertions
+ - [`JustBeforeEach` blocks](https://onsi.github.io/ginkgo/#separating-creation-and-configuration-justbeforeeach) that separate creation from configuration (also known as the subject action pattern).
+ - [`BeforeSuite` and `AfterSuite` blocks](https://onsi.github.io/ginkgo/#global-setup-and-teardown-beforesuite-and-aftersuite) to prep for and cleanup after a suite.
- A comprehensive test runner that lets you:
- - Mark specs as [pending](http://onsi.github.io/ginkgo/#pending-specs)
- - [Focus](http://onsi.github.io/ginkgo/#focused-specs) individual specs, and groups of specs, either programmatically or on the command line
- - Run your tests in [random order](http://onsi.github.io/ginkgo/#spec-permutation), and then reuse random seeds to replicate the same order.
- - Break up your test suite into parallel processes for straightforward [test parallelization](http://onsi.github.io/ginkgo/#parallel-specs)
+ - Mark specs as [pending](https://onsi.github.io/ginkgo/#pending-specs)
+ - [Focus](https://onsi.github.io/ginkgo/#focused-specs) individual specs, and groups of specs, either programmatically or on the command line
+ - Run your tests in [random order](https://onsi.github.io/ginkgo/#spec-permutation), and then reuse random seeds to replicate the same order.
+ - Break up your test suite into parallel processes for straightforward [test parallelization](https://onsi.github.io/ginkgo/#parallel-specs)
-- `ginkgo`: a command line interface with plenty of handy command line arguments for [running your tests](http://onsi.github.io/ginkgo/#running-tests) and [generating](http://onsi.github.io/ginkgo/#generators) test files. Here are a few choice examples:
+- `ginkgo`: a command line interface with plenty of handy command line arguments for [running your tests](https://onsi.github.io/ginkgo/#running-tests) and [generating](https://onsi.github.io/ginkgo/#generators) test files. Here are a few choice examples:
- `ginkgo -nodes=N` runs your tests in `N` parallel processes and print out coherent output in realtime
- `ginkgo -cover` runs your tests using Go's code coverage tool
- `ginkgo convert` converts an XUnit-style `testing` package to a Ginkgo-style package
@@ -37,27 +37,27 @@ If you have a question, comment, bug report, feature request, etc. please open a
- `ginkgo watch` [watches](https://onsi.github.io/ginkgo/#watching-for-changes) packages *and their dependencies* for changes, then reruns tests. Run tests immediately as you develop!
-- Built-in support for testing [asynchronicity](http://onsi.github.io/ginkgo/#asynchronous-tests)
+- Built-in support for testing [asynchronicity](https://onsi.github.io/ginkgo/#asynchronous-tests)
-- Built-in support for [benchmarking](http://onsi.github.io/ginkgo/#benchmark-tests) your code. Control the number of benchmark samples as you gather runtimes and other, arbitrary, bits of numerical information about your code.
+- Built-in support for [benchmarking](https://onsi.github.io/ginkgo/#benchmark-tests) your code. Control the number of benchmark samples as you gather runtimes and other, arbitrary, bits of numerical information about your code.
- [Completions for Sublime Text](https://github.com/onsi/ginkgo-sublime-completions): just use [Package Control](https://sublime.wbond.net/) to install `Ginkgo Completions`.
- [Completions for VSCode](https://github.com/onsi/vscode-ginkgo): just use VSCode's extension installer to install `vscode-ginkgo`.
-- Straightforward support for third-party testing libraries such as [Gomock](https://code.google.com/p/gomock/) and [Testify](https://github.com/stretchr/testify). Check out the [docs](http://onsi.github.io/ginkgo/#third-party-integrations) for details.
+- Straightforward support for third-party testing libraries such as [Gomock](https://code.google.com/p/gomock/) and [Testify](https://github.com/stretchr/testify). Check out the [docs](https://onsi.github.io/ginkgo/#third-party-integrations) for details.
- A modular architecture that lets you easily:
- - Write [custom reporters](http://onsi.github.io/ginkgo/#writing-custom-reporters) (for example, Ginkgo comes with a [JUnit XML reporter](http://onsi.github.io/ginkgo/#generating-junit-xml-output) and a TeamCity reporter).
- - [Adapt an existing matcher library (or write your own!)](http://onsi.github.io/ginkgo/#using-other-matcher-libraries) to work with Ginkgo
+ - Write [custom reporters](https://onsi.github.io/ginkgo/#writing-custom-reporters) (for example, Ginkgo comes with a [JUnit XML reporter](https://onsi.github.io/ginkgo/#generating-junit-xml-output) and a TeamCity reporter).
+ - [Adapt an existing matcher library (or write your own!)](https://onsi.github.io/ginkgo/#using-other-matcher-libraries) to work with Ginkgo
-## [Gomega](http://github.com/onsi/gomega): Ginkgo's Preferred Matcher Library
+## [Gomega](https://github.com/onsi/gomega): Ginkgo's Preferred Matcher Library
-Ginkgo is best paired with Gomega. Learn more about Gomega [here](http://onsi.github.io/gomega/)
+Ginkgo is best paired with Gomega. Learn more about Gomega [here](https://onsi.github.io/gomega/)
-## [Agouti](http://github.com/sclevine/agouti): A Go Acceptance Testing Framework
+## [Agouti](https://github.com/sclevine/agouti): A Go Acceptance Testing Framework
-Agouti allows you run WebDriver integration tests. Learn more about Agouti [here](http://agouti.org)
+Agouti allows you run WebDriver integration tests. Learn more about Agouti [here](https://agouti.org)
## Set Me Up!
@@ -87,16 +87,16 @@ With that said, it's great to know what your options are :)
### What Go gives you out of the box
-Testing is a first class citizen in Go, however Go's built-in testing primitives are somewhat limited: The [testing](http://golang.org/pkg/testing) package provides basic XUnit style tests and no assertion library.
+Testing is a first class citizen in Go, however Go's built-in testing primitives are somewhat limited: The [testing](https://golang.org/pkg/testing) package provides basic XUnit style tests and no assertion library.
### Matcher libraries for Go's XUnit style tests
A number of matcher libraries have been written to augment Go's built-in XUnit style tests. Here are two that have gained traction:
- [testify](https://github.com/stretchr/testify)
-- [gocheck](http://labix.org/gocheck)
+- [gocheck](https://labix.org/gocheck)
-You can also use Ginkgo's matcher library [Gomega](https://github.com/onsi/gomega) in [XUnit style tests](http://onsi.github.io/gomega/#using-gomega-with-golangs-xunitstyle-tests)
+You can also use Ginkgo's matcher library [Gomega](https://github.com/onsi/gomega) in [XUnit style tests](https://onsi.github.io/gomega/#using-gomega-with-golangs-xunitstyle-tests)
### BDD style testing frameworks
diff --git a/vendor/github.com/onsi/ginkgo/config/config.go b/vendor/github.com/onsi/ginkgo/config/config.go
index 949f8130f..342645022 100644
--- a/vendor/github.com/onsi/ginkgo/config/config.go
+++ b/vendor/github.com/onsi/ginkgo/config/config.go
@@ -20,7 +20,7 @@ import (
"fmt"
)
-const VERSION = "1.12.0"
+const VERSION = "1.12.2"
type GinkgoConfigType struct {
RandomSeed int64
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/build_command.go b/vendor/github.com/onsi/ginkgo/ginkgo/build_command.go
index f0eb375c3..2fddef0f7 100644
--- a/vendor/github.com/onsi/ginkgo/ginkgo/build_command.go
+++ b/vendor/github.com/onsi/ginkgo/ginkgo/build_command.go
@@ -57,8 +57,6 @@ func (r *SpecBuilder) BuildSpecs(args []string, additionalArgs []string) {
} else {
fmt.Printf(" compiled %s.test\n", suite.PackageName)
}
-
- runner.CleanUp()
}
if passed {
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/convert/package_rewriter.go b/vendor/github.com/onsi/ginkgo/ginkgo/convert/package_rewriter.go
index 5e00d5618..363e52fe2 100644
--- a/vendor/github.com/onsi/ginkgo/ginkgo/convert/package_rewriter.go
+++ b/vendor/github.com/onsi/ginkgo/ginkgo/convert/package_rewriter.go
@@ -33,7 +33,8 @@ func RewritePackage(packageName string) {
*/
func findTestsInPackage(pkg *build.Package) (testfiles []string) {
for _, file := range append(pkg.TestGoFiles, pkg.XTestGoFiles...) {
- testfiles = append(testfiles, filepath.Join(pkg.Dir, file))
+ testfile, _ := filepath.Abs(filepath.Join(pkg.Dir, file))
+ testfiles = append(testfiles, testfile)
}
dirFiles, err := ioutil.ReadDir(pkg.Dir)
@@ -103,10 +104,11 @@ func addGinkgoSuiteForPackage(pkg *build.Package) {
* Shells out to `go fmt` to format the package
*/
func goFmtPackage(pkg *build.Package) {
- output, err := exec.Command("go", "fmt", pkg.ImportPath).Output()
+ path, _ := filepath.Abs(pkg.ImportPath)
+ output, err := exec.Command("go", "fmt", path).CombinedOutput()
if err != nil {
- fmt.Printf("Warning: Error running 'go fmt %s'.\nstdout: %s\n%s\n", pkg.ImportPath, output, err.Error())
+ fmt.Printf("Warning: Error running 'go fmt %s'.\nstdout: %s\n%s\n", path, output, err.Error())
}
}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/convert/testfile_rewriter.go b/vendor/github.com/onsi/ginkgo/ginkgo/convert/testfile_rewriter.go
index d415050ef..60c73504a 100644
--- a/vendor/github.com/onsi/ginkgo/ginkgo/convert/testfile_rewriter.go
+++ b/vendor/github.com/onsi/ginkgo/ginkgo/convert/testfile_rewriter.go
@@ -22,7 +22,7 @@ import (
*/
func rewriteTestsInFile(pathToFile string) {
fileSet := token.NewFileSet()
- rootNode, err := parser.ParseFile(fileSet, pathToFile, nil, 0)
+ rootNode, err := parser.ParseFile(fileSet, pathToFile, nil, parser.ParseComments)
if err != nil {
panic(fmt.Sprintf("Error parsing test file '%s':\n%s\n", pathToFile, err.Error()))
}
@@ -56,11 +56,12 @@ func rewriteTestsInFile(pathToFile string) {
}
fileInfo, err := os.Stat(pathToFile)
+
if err != nil {
panic(fmt.Sprintf("Error stat'ing file: %s\n", pathToFile))
}
- ioutil.WriteFile(pathToFile, buffer.Bytes(), fileInfo.Mode())
+ err = ioutil.WriteFile(pathToFile, buffer.Bytes(), fileInfo.Mode())
}
/*
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/run_command.go b/vendor/github.com/onsi/ginkgo/ginkgo/run_command.go
index 1d06e08fd..f225d272f 100644
--- a/vendor/github.com/onsi/ginkgo/ginkgo/run_command.go
+++ b/vendor/github.com/onsi/ginkgo/ginkgo/run_command.go
@@ -179,19 +179,24 @@ func (r *SpecRunner) combineCoverprofiles(runners []*testrunner.TestRunner) erro
for index, runner := range runners {
contents, err := ioutil.ReadFile(runner.CoverageFile)
+ if err != nil {
+ fmt.Printf("Unable to read coverage file %s to combine, %v\n", runner.CoverageFile, err)
+ return nil // non-fatal error
+ }
+
// remove the cover mode line from every file
// except the first one
if index > 0 {
contents = modeRegex.ReplaceAll(contents, []byte{})
}
- if err != nil {
- fmt.Printf("Unable to read coverage file %s to combine, %v\n", runner.CoverageFile, err)
- return nil // non-fatal error
- }
-
_, err = combined.Write(contents)
+ // Add a newline to the end of every file if missing.
+ if err == nil && len(contents) > 0 && contents[len(contents)-1] != '\n' {
+ _, err = combined.Write([]byte("\n"))
+ }
+
if err != nil {
fmt.Printf("Unable to append to coverprofile, %v\n", err)
return nil // non-fatal error
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/test_runner.go b/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/test_runner.go
index 80670d24a..66c0f06f6 100644
--- a/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/test_runner.go
+++ b/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/test_runner.go
@@ -49,11 +49,7 @@ func New(suite testsuite.TestSuite, numCPU int, parallelStream bool, timeout tim
}
if !suite.Precompiled {
- dir, err := ioutil.TempDir("", "ginkgo")
- if err != nil {
- panic(fmt.Sprintf("couldn't create temporary directory... might be time to rm -rf:\n%s", err.Error()))
- }
- runner.compilationTargetPath = filepath.Join(dir, suite.PackageName+".test")
+ runner.compilationTargetPath, _ = filepath.Abs(filepath.Join(suite.Path, suite.PackageName+".test"))
}
return runner
@@ -248,7 +244,7 @@ func (t *TestRunner) CleanUp() {
if t.Suite.Precompiled {
return
}
- os.RemoveAll(filepath.Dir(t.compilationTargetPath))
+ os.Remove(t.compilationTargetPath)
}
func (t *TestRunner) runSerialGinkgoSuite() RunResult {
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/unfocus_command.go b/vendor/github.com/onsi/ginkgo/ginkgo/unfocus_command.go
index cedc2b59c..d9dfb6e44 100644
--- a/vendor/github.com/onsi/ginkgo/ginkgo/unfocus_command.go
+++ b/vendor/github.com/onsi/ginkgo/ginkgo/unfocus_command.go
@@ -1,11 +1,18 @@
package main
import (
+ "bytes"
"flag"
"fmt"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "io"
"io/ioutil"
- "os/exec"
+ "os"
+ "path/filepath"
"strings"
+ "sync"
)
func BuildUnfocusCommand() *Command {
@@ -22,40 +29,152 @@ func BuildUnfocusCommand() *Command {
}
func unfocusSpecs([]string, []string) {
- unfocus("Describe")
- unfocus("Context")
- unfocus("It")
- unfocus("Measure")
- unfocus("DescribeTable")
- unfocus("Entry")
- unfocus("Specify")
- unfocus("When")
-}
-
-func unfocus(component string) {
- fmt.Printf("Removing F%s...\n", component)
- files, err := ioutil.ReadDir(".")
+ fmt.Println("Scanning for focus...")
+
+ goFiles := make(chan string)
+ go func() {
+ unfocusDir(goFiles, ".")
+ close(goFiles)
+ }()
+
+ const workers = 10
+ wg := sync.WaitGroup{}
+ wg.Add(workers)
+
+ for i := 0; i < workers; i++ {
+ go func() {
+ for path := range goFiles {
+ unfocusFile(path)
+ }
+ wg.Done()
+ }()
+ }
+
+ wg.Wait()
+}
+
+func unfocusDir(goFiles chan string, path string) {
+ files, err := ioutil.ReadDir(path)
if err != nil {
fmt.Println(err.Error())
return
}
+
for _, f := range files {
- // Exclude "vendor" directory
- if f.IsDir() && f.Name() == "vendor" {
- continue
+ switch {
+ case f.IsDir() && shouldProcessDir(f.Name()):
+ unfocusDir(goFiles, filepath.Join(path, f.Name()))
+ case !f.IsDir() && shouldProcessFile(f.Name()):
+ goFiles <- filepath.Join(path, f.Name())
}
- // Exclude non-go files in the current directory
- if !f.IsDir() && !strings.HasSuffix(f.Name(), ".go") {
- continue
+ }
+}
+
+func shouldProcessDir(basename string) bool {
+ return basename != "vendor" && !strings.HasPrefix(basename, ".")
+}
+
+func shouldProcessFile(basename string) bool {
+ return strings.HasSuffix(basename, ".go")
+}
+
+func unfocusFile(path string) {
+ data, err := ioutil.ReadFile(path)
+ if err != nil {
+ fmt.Printf("error reading file '%s': %s\n", path, err.Error())
+ return
+ }
+
+ ast, err := parser.ParseFile(token.NewFileSet(), path, bytes.NewReader(data), 0)
+ if err != nil {
+ fmt.Printf("error parsing file '%s': %s\n", path, err.Error())
+ return
+ }
+
+ eliminations := scanForFocus(ast)
+ if len(eliminations) == 0 {
+ return
+ }
+
+ fmt.Printf("...updating %s\n", path)
+ backup, err := writeBackup(path, data)
+ if err != nil {
+ fmt.Printf("error creating backup file: %s\n", err.Error())
+ return
+ }
+
+ if err := updateFile(path, data, eliminations); err != nil {
+ fmt.Printf("error writing file '%s': %s\n", path, err.Error())
+ return
+ }
+
+ os.Remove(backup)
+}
+
+func writeBackup(path string, data []byte) (string, error) {
+ t, err := ioutil.TempFile(filepath.Dir(path), filepath.Base(path))
+
+ if err != nil {
+ return "", fmt.Errorf("error creating temporary file: %w", err)
+ }
+ defer t.Close()
+
+ if _, err := io.Copy(t, bytes.NewReader(data)); err != nil {
+ return "", fmt.Errorf("error writing to temporary file: %w", err)
+ }
+
+ return t.Name(), nil
+}
+
+func updateFile(path string, data []byte, eliminations []int64) error {
+ to, err := os.Create(path)
+ if err != nil {
+ return fmt.Errorf("error opening file for writing '%s': %w\n", path, err)
+ }
+ defer to.Close()
+
+ from := bytes.NewReader(data)
+ var cursor int64
+ for _, byteToEliminate := range eliminations {
+ if _, err := io.CopyN(to, from, byteToEliminate-cursor); err != nil {
+ return fmt.Errorf("error copying data: %w", err)
}
- // Recursively run `gofmt` otherwise
- cmd := exec.Command("gofmt", fmt.Sprintf("-r=F%s -> %s", component, component), "-w", f.Name())
- out, err := cmd.CombinedOutput()
- if err != nil {
- fmt.Println(err.Error())
+
+ cursor = byteToEliminate + 1
+
+ if _, err := from.Seek(1, io.SeekCurrent); err != nil {
+ return fmt.Errorf("error seeking to position in buffer: %w", err)
}
- if string(out) != "" {
- fmt.Println(string(out))
+ }
+
+ if _, err := io.Copy(to, from); err != nil {
+ return fmt.Errorf("error copying end data: %w", err)
+ }
+
+ return nil
+}
+
+func scanForFocus(file *ast.File) (eliminations []int64) {
+ ast.Inspect(file, func(n ast.Node) bool {
+ if c, ok := n.(*ast.CallExpr); ok {
+ if i, ok := c.Fun.(*ast.Ident); ok {
+ if isFocus(i.Name) {
+ eliminations = append(eliminations, int64(i.Pos()-file.Pos()))
+ }
+ }
}
+
+ return true
+ })
+
+ return eliminations
+}
+
+func isFocus(name string) bool {
+ switch name {
+ case "FDescribe", "FContext", "FIt", "FMeasure", "FDescribeTable", "FEntry", "FSpecify", "FWhen":
+ return true
+ default:
+ return false
}
}
diff --git a/vendor/github.com/onsi/ginkgo/go.mod b/vendor/github.com/onsi/ginkgo/go.mod
index 15a4ab571..1f7125228 100644
--- a/vendor/github.com/onsi/ginkgo/go.mod
+++ b/vendor/github.com/onsi/ginkgo/go.mod
@@ -1,9 +1,11 @@
module github.com/onsi/ginkgo
require (
- github.com/hpcloud/tail v1.0.0
- github.com/onsi/gomega v1.7.1
- golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e
+ github.com/fsnotify/fsnotify v1.4.9 // indirect
+ github.com/nxadm/tail v1.4.4
+ github.com/onsi/gomega v1.10.1
+ golang.org/x/sys v0.0.0-20200519105757-fe76b779f299
+ golang.org/x/text v0.3.2 // indirect
)
-go 1.12
+go 1.13
diff --git a/vendor/github.com/onsi/ginkgo/go.sum b/vendor/github.com/onsi/ginkgo/go.sum
index 29adce41a..2b774f3e8 100644
--- a/vendor/github.com/onsi/ginkgo/go.sum
+++ b/vendor/github.com/onsi/ginkgo/go.sum
@@ -1,21 +1,60 @@
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
+github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
+github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/gomega v1.7.1 h1:K0jcRCwNQM3vFGh1ppMtDh/+7ApJrjldlX8fA0jDTLQ=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
+github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE=
+github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7 h1:AeiKBIuRw3UomYXSbLy0Mc2dDLfdtbT/IVn4keq83P0=
+golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e h1:N7DeIrjYszNmSW409R3frPPwglRwMkXSBzwVbkOjLLA=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200519105757-fe76b779f299 h1:DYfZAGf2WMFjMxbgTjaC+2HC7NkNAQs+6Q8b9WEB/F4=
+golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
@@ -24,3 +63,5 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkep
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
+gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go b/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go
index f9ab30067..992437d9e 100644
--- a/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go
+++ b/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go
@@ -197,11 +197,11 @@ func (aggregator *Aggregator) announceSpec(specSummary *types.SpecSummary) {
switch specSummary.State {
case types.SpecStatePassed:
if specSummary.IsMeasurement {
- aggregator.stenographer.AnnounceSuccesfulMeasurement(specSummary, aggregator.config.Succinct)
+ aggregator.stenographer.AnnounceSuccessfulMeasurement(specSummary, aggregator.config.Succinct)
} else if specSummary.RunTime.Seconds() >= aggregator.config.SlowSpecThreshold {
- aggregator.stenographer.AnnounceSuccesfulSlowSpec(specSummary, aggregator.config.Succinct)
+ aggregator.stenographer.AnnounceSuccessfulSlowSpec(specSummary, aggregator.config.Succinct)
} else {
- aggregator.stenographer.AnnounceSuccesfulSpec(specSummary)
+ aggregator.stenographer.AnnounceSuccessfulSpec(specSummary)
}
case types.SpecStatePending:
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go
index ab6622a29..774967db6 100644
--- a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go
+++ b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go
@@ -7,7 +7,8 @@ import (
"io/ioutil"
"os"
- "github.com/hpcloud/tail"
+ "github.com/nxadm/tail"
+ "golang.org/x/sys/unix"
)
func NewOutputInterceptor() OutputInterceptor {
@@ -35,12 +36,10 @@ func (interceptor *outputInterceptor) StartInterceptingOutput() error {
return err
}
- // Call a function in ./syscall_dup_*.go
- // If building for everything other than linux_arm64,
- // use a "normal" syscall.Dup2(oldfd, newfd) call. If building for linux_arm64 (which doesn't have syscall.Dup2)
- // call syscall.Dup3(oldfd, newfd, 0). They are nearly identical, see: http://linux.die.net/man/2/dup3
- syscallDup(int(interceptor.redirectFile.Fd()), 1)
- syscallDup(int(interceptor.redirectFile.Fd()), 2)
+ // This might call Dup3 if the dup2 syscall is not available, e.g. on
+ // linux/arm64 or linux/riscv64
+ unix.Dup2(int(interceptor.redirectFile.Fd()), 1)
+ unix.Dup2(int(interceptor.redirectFile.Fd()), 2)
if interceptor.streamTarget != nil {
interceptor.tailer, _ = tail.TailFile(interceptor.redirectFile.Name(), tail.Config{Follow: true})
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go
deleted file mode 100644
index 9550d37b3..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// +build linux,arm64
-
-package remote
-
-import "syscall"
-
-// linux_arm64 doesn't have syscall.Dup2 which ginkgo uses, so
-// use the nearly identical syscall.Dup3 instead
-func syscallDup(oldfd int, newfd int) (err error) {
- return syscall.Dup3(oldfd, newfd, 0)
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_riscv64.go b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_riscv64.go
deleted file mode 100644
index 0d40f0a54..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_riscv64.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// +build linux,riscv64
-
-package remote
-
-import "syscall"
-
-// linux_riscv64 doesn't have syscall.Dup2 which ginkgo uses, so
-// use the nearly identical syscall.Dup3 instead
-func syscallDup(oldfd int, newfd int) (err error) {
- return syscall.Dup3(oldfd, newfd, 0)
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_solaris.go b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_solaris.go
deleted file mode 100644
index 75ef7fb78..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_solaris.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build solaris
-
-package remote
-
-import "golang.org/x/sys/unix"
-
-func syscallDup(oldfd int, newfd int) (err error) {
- return unix.Dup2(oldfd, newfd)
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go
deleted file mode 100644
index 981aa7466..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// +build !linux !arm64
-// +build !linux !riscv64
-// +build !windows
-// +build !solaris
-
-package remote
-
-import "syscall"
-
-func syscallDup(oldfd int, newfd int) (err error) {
- return syscall.Dup2(oldfd, newfd)
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/suite/suite.go b/vendor/github.com/onsi/ginkgo/internal/suite/suite.go
index 3104bbc88..34f639ee4 100644
--- a/vendor/github.com/onsi/ginkgo/internal/suite/suite.go
+++ b/vendor/github.com/onsi/ginkgo/internal/suite/suite.go
@@ -102,6 +102,9 @@ func (suite *Suite) generateSpecsIterator(description string, config config.Gink
}
func (suite *Suite) CurrentRunningSpecSummary() (*types.SpecSummary, bool) {
+ if !suite.running {
+ return nil, false
+ }
return suite.runner.CurrentSpecSummary()
}
diff --git a/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go
index c76283b46..f0c9f6141 100644
--- a/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go
+++ b/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go
@@ -57,11 +57,11 @@ func (reporter *DefaultReporter) SpecDidComplete(specSummary *types.SpecSummary)
switch specSummary.State {
case types.SpecStatePassed:
if specSummary.IsMeasurement {
- reporter.stenographer.AnnounceSuccesfulMeasurement(specSummary, reporter.config.Succinct)
+ reporter.stenographer.AnnounceSuccessfulMeasurement(specSummary, reporter.config.Succinct)
} else if specSummary.RunTime.Seconds() >= reporter.config.SlowSpecThreshold {
- reporter.stenographer.AnnounceSuccesfulSlowSpec(specSummary, reporter.config.Succinct)
+ reporter.stenographer.AnnounceSuccessfulSlowSpec(specSummary, reporter.config.Succinct)
} else {
- reporter.stenographer.AnnounceSuccesfulSpec(specSummary)
+ reporter.stenographer.AnnounceSuccessfulSpec(specSummary)
if reporter.config.ReportPassed {
reporter.stenographer.AnnounceCapturedOutput(specSummary.CapturedOutput)
}
diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go
index 98854e7d9..1aa5b9db0 100644
--- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go
+++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go
@@ -105,16 +105,16 @@ func (stenographer *FakeStenographer) AnnounceCapturedOutput(output string) {
stenographer.registerCall("AnnounceCapturedOutput", output)
}
-func (stenographer *FakeStenographer) AnnounceSuccesfulSpec(spec *types.SpecSummary) {
- stenographer.registerCall("AnnounceSuccesfulSpec", spec)
+func (stenographer *FakeStenographer) AnnounceSuccessfulSpec(spec *types.SpecSummary) {
+ stenographer.registerCall("AnnounceSuccessfulSpec", spec)
}
-func (stenographer *FakeStenographer) AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool) {
- stenographer.registerCall("AnnounceSuccesfulSlowSpec", spec, succinct)
+func (stenographer *FakeStenographer) AnnounceSuccessfulSlowSpec(spec *types.SpecSummary, succinct bool) {
+ stenographer.registerCall("AnnounceSuccessfulSlowSpec", spec, succinct)
}
-func (stenographer *FakeStenographer) AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool) {
- stenographer.registerCall("AnnounceSuccesfulMeasurement", spec, succinct)
+func (stenographer *FakeStenographer) AnnounceSuccessfulMeasurement(spec *types.SpecSummary, succinct bool) {
+ stenographer.registerCall("AnnounceSuccessfulMeasurement", spec, succinct)
}
func (stenographer *FakeStenographer) AnnouncePendingSpec(spec *types.SpecSummary, noisy bool) {
diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go
index 601c74d66..638d6fbb1 100644
--- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go
+++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go
@@ -47,9 +47,9 @@ type Stenographer interface {
AnnounceCapturedOutput(output string)
- AnnounceSuccesfulSpec(spec *types.SpecSummary)
- AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool)
- AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool)
+ AnnounceSuccessfulSpec(spec *types.SpecSummary)
+ AnnounceSuccessfulSlowSpec(spec *types.SpecSummary, succinct bool)
+ AnnounceSuccessfulMeasurement(spec *types.SpecSummary, succinct bool)
AnnouncePendingSpec(spec *types.SpecSummary, noisy bool)
AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool)
@@ -245,12 +245,12 @@ func (s *consoleStenographer) AnnounceCapturedOutput(output string) {
s.midBlock()
}
-func (s *consoleStenographer) AnnounceSuccesfulSpec(spec *types.SpecSummary) {
+func (s *consoleStenographer) AnnounceSuccessfulSpec(spec *types.SpecSummary) {
s.print(0, s.colorize(greenColor, s.denoter))
s.stream()
}
-func (s *consoleStenographer) AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool) {
+func (s *consoleStenographer) AnnounceSuccessfulSlowSpec(spec *types.SpecSummary, succinct bool) {
s.printBlockWithMessage(
s.colorize(greenColor, "%s [SLOW TEST:%.3f seconds]", s.denoter, spec.RunTime.Seconds()),
"",
@@ -259,7 +259,7 @@ func (s *consoleStenographer) AnnounceSuccesfulSlowSpec(spec *types.SpecSummary,
)
}
-func (s *consoleStenographer) AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool) {
+func (s *consoleStenographer) AnnounceSuccessfulMeasurement(spec *types.SpecSummary, succinct bool) {
s.printBlockWithMessage(
s.colorize(greenColor, "%s [MEASUREMENT]", s.denoter),
s.measurementReport(spec, succinct),
diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md
index 35adfb8d7..3aafdbcfc 100644
--- a/vendor/github.com/onsi/gomega/CHANGELOG.md
+++ b/vendor/github.com/onsi/gomega/CHANGELOG.md
@@ -1,3 +1,8 @@
+## 1.10.1
+
+### Fixes
+- Update dependencies (#389) [9f5eecd]
+
## 1.10.0
### Features
diff --git a/vendor/github.com/onsi/gomega/go.mod b/vendor/github.com/onsi/gomega/go.mod
index 1eb0dfa68..778935141 100644
--- a/vendor/github.com/onsi/gomega/go.mod
+++ b/vendor/github.com/onsi/gomega/go.mod
@@ -1,17 +1,9 @@
module github.com/onsi/gomega
require (
- github.com/fsnotify/fsnotify v1.4.7 // indirect
- github.com/golang/protobuf v1.2.0
- github.com/hpcloud/tail v1.0.0 // indirect
- github.com/onsi/ginkgo v1.6.0
- golang.org/x/net v0.0.0-20180906233101-161cd47e91fd
- golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f // indirect
- golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e // indirect
- golang.org/x/text v0.3.0 // indirect
- golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7
- gopkg.in/fsnotify.v1 v1.4.7 // indirect
- gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
- gopkg.in/yaml.v2 v2.2.4
+ github.com/golang/protobuf v1.4.2
+ github.com/onsi/ginkgo v1.12.1
+ golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7
+ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543
+ gopkg.in/yaml.v2 v2.3.0
)
-
diff --git a/vendor/github.com/onsi/gomega/go.sum b/vendor/github.com/onsi/gomega/go.sum
index b872e8a0d..610b09bee 100644
--- a/vendor/github.com/onsi/gomega/go.sum
+++ b/vendor/github.com/onsi/gomega/go.sum
@@ -2,20 +2,52 @@ github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
+github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.12.1 h1:mFwc4LvZ0xpSvDZ3E+k8Yte0hLOMxXUlP+yXtJqkYfQ=
+github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
+github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7 h1:AeiKBIuRw3UomYXSbLy0Mc2dDLfdtbT/IVn4keq83P0=
+golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e h1:N7DeIrjYszNmSW409R3frPPwglRwMkXSBzwVbkOjLLA=
+golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc=
-golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
@@ -24,3 +56,5 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkep
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
+gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go
index 65e837e20..8ff9611d5 100644
--- a/vendor/github.com/onsi/gomega/gomega_dsl.go
+++ b/vendor/github.com/onsi/gomega/gomega_dsl.go
@@ -24,7 +24,7 @@ import (
"github.com/onsi/gomega/types"
)
-const GOMEGA_VERSION = "1.10.0"
+const GOMEGA_VERSION = "1.10.1"
const nilFailHandlerPanic = `You are trying to make an assertion, but Gomega's fail handler is nil.
If you're using Ginkgo then you probably forgot to put your assertion in an It().
diff --git a/vendor/github.com/opencontainers/go-digest/.mailmap b/vendor/github.com/opencontainers/go-digest/.mailmap
index ba611cb21..eaf8b2f9e 100644
--- a/vendor/github.com/opencontainers/go-digest/.mailmap
+++ b/vendor/github.com/opencontainers/go-digest/.mailmap
@@ -1 +1,4 @@
+Aaron Lehmann <aaronl@vitelus.com> <aaron.lehmann@docker.com>
+Derek McGowan <derek@mcg.dev> <derek@mcgstyle.net>
Stephen J Day <stephen.day@docker.com> <stevvooe@users.noreply.github.com>
+Haibing Zhou <zhouhaibing089@gmail.com>
diff --git a/vendor/github.com/opencontainers/go-digest/.pullapprove.yml b/vendor/github.com/opencontainers/go-digest/.pullapprove.yml
index 45fa4b9ec..b6165f83c 100644
--- a/vendor/github.com/opencontainers/go-digest/.pullapprove.yml
+++ b/vendor/github.com/opencontainers/go-digest/.pullapprove.yml
@@ -1,12 +1,28 @@
-approve_by_comment: true
-approve_regex: '^(Approved|lgtm|LGTM|:shipit:|:star:|:\+1:|:ship:)'
-reject_regex: ^Rejected
-reset_on_push: true
-author_approval: ignored
-signed_off_by:
- required: true
-reviewers:
- teams:
- - go-digest-maintainers
- name: default
+version: 2
+
+requirements:
+ signed_off_by:
+ required: true
+
+always_pending:
+ title_regex: '^WIP'
+ explanation: 'Work in progress...'
+
+group_defaults:
required: 2
+ approve_by_comment:
+ enabled: true
+ approve_regex: '^LGTM'
+ reject_regex: '^Rejected'
+ reset_on_push:
+ enabled: true
+ author_approval:
+ ignored: true
+ conditions:
+ branches:
+ - master
+
+groups:
+ go-digest:
+ teams:
+ - go-digest-maintainers
diff --git a/vendor/github.com/opencontainers/go-digest/.travis.yml b/vendor/github.com/opencontainers/go-digest/.travis.yml
index 7ea4ed1d2..5775f885c 100644
--- a/vendor/github.com/opencontainers/go-digest/.travis.yml
+++ b/vendor/github.com/opencontainers/go-digest/.travis.yml
@@ -1,4 +1,5 @@
language: go
go:
- - 1.7
+ - 1.12.x
+ - 1.13.x
- master
diff --git a/vendor/github.com/opencontainers/go-digest/LICENSE.code b/vendor/github.com/opencontainers/go-digest/LICENSE
index 0ea3ff81e..3ac8ab648 100644
--- a/vendor/github.com/opencontainers/go-digest/LICENSE.code
+++ b/vendor/github.com/opencontainers/go-digest/LICENSE
@@ -176,6 +176,7 @@
END OF TERMS AND CONDITIONS
+ Copyright 2019, 2020 OCI Contributors
Copyright 2016 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/vendor/github.com/opencontainers/go-digest/MAINTAINERS b/vendor/github.com/opencontainers/go-digest/MAINTAINERS
index 42a29795d..843b1b206 100644
--- a/vendor/github.com/opencontainers/go-digest/MAINTAINERS
+++ b/vendor/github.com/opencontainers/go-digest/MAINTAINERS
@@ -1,9 +1,5 @@
-Aaron Lehmann <aaron.lehmann@docker.com> (@aaronlehmann)
-Brandon Philips <brandon.philips@coreos.com> (@philips)
-Brendan Burns <bburns@microsoft.com> (@brendandburns)
Derek McGowan <derek@mcgstyle.net> (@dmcgowan)
-Jason Bouzane <jbouzane@google.com> (@jbouzane)
-John Starks <jostarks@microsoft.com> (@jstarks)
-Jonathan Boulle <jon.boulle@coreos.com> (@jonboulle)
-Stephen Day <stephen.day@docker.com> (@stevvooe)
-Vincent Batts <vbatts@redhat.com> (@vbatts)
+Stephen Day <stevvooe@gmail.com> (@stevvooe)
+Vincent Batts <vbatts@hashbangbash.com> (@vbatts)
+Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp> (@AkihiroSuda)
+Sebastiaan van Stijn <github@gone.nl> (@thaJeztah)
diff --git a/vendor/github.com/opencontainers/go-digest/README.md b/vendor/github.com/opencontainers/go-digest/README.md
index 0f5a04092..a11287207 100644
--- a/vendor/github.com/opencontainers/go-digest/README.md
+++ b/vendor/github.com/opencontainers/go-digest/README.md
@@ -8,20 +8,16 @@ Please see the [godoc](https://godoc.org/github.com/opencontainers/go-digest) fo
# What is a digest?
-A digest is just a hash.
+A digest is just a [hash](https://en.wikipedia.org/wiki/Hash_function).
-The most common use case for a digest is to create a content
-identifier for use in [Content Addressable Storage](https://en.wikipedia.org/wiki/Content-addressable_storage)
-systems:
+The most common use case for a digest is to create a content identifier for use in [Content Addressable Storage](https://en.wikipedia.org/wiki/Content-addressable_storage) systems:
```go
id := digest.FromBytes([]byte("my content"))
```
-In the example above, the id can be used to uniquely identify
-the byte slice "my content". This allows two disparate applications
-to agree on a verifiable identifier without having to trust one
-another.
+In the example above, the id can be used to uniquely identify the byte slice "my content".
+This allows two disparate applications to agree on a verifiable identifier without having to trust one another.
An identifying digest can be verified, as follows:
@@ -31,8 +27,7 @@ if id != digest.FromBytes([]byte("my content")) {
}
```
-A `Verifier` type can be used to handle cases where an `io.Reader`
-makes more sense:
+A `Verifier` type can be used to handle cases where an `io.Reader` makes more sense:
```go
rd := getContent()
@@ -44,33 +39,28 @@ if !verifier.Verified() {
}
```
-Using [Merkle DAGs](https://en.wikipedia.org/wiki/Merkle_tree), this
-can power a rich, safe, content distribution system.
+Using [Merkle DAGs](https://en.wikipedia.org/wiki/Merkle_tree), this can power a rich, safe, content distribution system.
# Usage
-While the [godoc](https://godoc.org/github.com/opencontainers/go-digest) is
-considered the best resource, a few important items need to be called
-out when using this package.
+While the [godoc](https://godoc.org/github.com/opencontainers/go-digest) is considered the best resource, a few important items need to be called out when using this package.
-1. Make sure to import the hash implementations into your application
- or the package will panic. You should have something like the
- following in the main (or other entrypoint) of your application:
+1. Make sure to import the hash implementations into your application or the package will panic.
+ You should have something like the following in the main (or other entrypoint) of your application:
```go
import (
_ "crypto/sha256"
- _ "crypto/sha512"
+ _ "crypto/sha512"
)
```
This may seem inconvenient but it allows you replace the hash
implementations with others, such as https://github.com/stevvooe/resumable.
-2. Even though `digest.Digest` may be assemable as a string, _always_
- verify your input with `digest.Parse` or use `Digest.Validate`
- when accepting untrusted input. While there are measures to
- avoid common problems, this will ensure you have valid digests
- in the rest of your application.
+2. Even though `digest.Digest` may be assemblable as a string, _always_ verify your input with `digest.Parse` or use `Digest.Validate` when accepting untrusted input.
+ While there are measures to avoid common problems, this will ensure you have valid digests in the rest of your application.
+
+3. While alternative encodings of hash values (digests) are possible (for example, base64), this package deals exclusively with hex-encoded digests.
# Stability
@@ -80,25 +70,27 @@ As always, before using a package export, read the [godoc](https://godoc.org/git
# Contributing
-This package is considered fairly complete. It has been in production
-in thousands (millions?) of deployments and is fairly battle-hardened.
-New additions will be met with skepticism. If you think there is a
-missing feature, please file a bug clearly describing the problem and
-the alternatives you tried before submitting a PR.
+This package is considered fairly complete.
+It has been in production in thousands (millions?) of deployments and is fairly battle-hardened.
+New additions will be met with skepticism.
+If you think there is a missing feature, please file a bug clearly describing the problem and the alternatives you tried before submitting a PR.
-# Reporting security issues
+## Code of Conduct
-Please DO NOT file a public issue, instead send your report privately to
-security@opencontainers.org.
+Participation in the OpenContainers community is governed by [OpenContainer's Code of Conduct][code-of-conduct].
-The maintainers take security seriously. If you discover a security issue,
-please bring it to their attention right away!
+## Security
-If you are reporting a security issue, do not create an issue or file a pull
-request on GitHub. Instead, disclose the issue responsibly by sending an email
-to security@opencontainers.org (which is inhabited only by the maintainers of
-the various OCI projects).
+If you find an issue, please follow the [security][security] protocol to report it.
# Copyright and license
-Copyright © 2016 Docker, Inc. All rights reserved, except as follows. Code is released under the [Apache 2.0 license](LICENSE.code). This `README.md` file and the [`CONTRIBUTING.md`](CONTRIBUTING.md) file are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file [`LICENSE.docs`](LICENSE.docs). You may obtain a duplicate copy of the same license, titled CC BY-SA 4.0, at http://creativecommons.org/licenses/by-sa/4.0/.
+Copyright © 2019, 2020 OCI Contributors
+Copyright © 2016 Docker, Inc.
+All rights reserved, except as follows.
+Code is released under the [Apache 2.0 license](LICENSE).
+This `README.md` file and the [`CONTRIBUTING.md`](CONTRIBUTING.md) file are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file [`LICENSE.docs`](LICENSE.docs).
+You may obtain a duplicate copy of the same license, titled CC BY-SA 4.0, at http://creativecommons.org/licenses/by-sa/4.0/.
+
+[security]: https://github.com/opencontainers/org/blob/master/security
+[code-of-conduct]: https://github.com/opencontainers/org/blob/master/CODE_OF_CONDUCT.md
diff --git a/vendor/github.com/opencontainers/go-digest/algorithm.go b/vendor/github.com/opencontainers/go-digest/algorithm.go
index 8813bd26f..490951dc3 100644
--- a/vendor/github.com/opencontainers/go-digest/algorithm.go
+++ b/vendor/github.com/opencontainers/go-digest/algorithm.go
@@ -1,3 +1,4 @@
+// Copyright 2019, 2020 OCI Contributors
// Copyright 2017 Docker, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/vendor/github.com/opencontainers/go-digest/digest.go b/vendor/github.com/opencontainers/go-digest/digest.go
index ad398cba2..518b5e715 100644
--- a/vendor/github.com/opencontainers/go-digest/digest.go
+++ b/vendor/github.com/opencontainers/go-digest/digest.go
@@ -1,3 +1,4 @@
+// Copyright 2019, 2020 OCI Contributors
// Copyright 2017 Docker, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/vendor/github.com/opencontainers/go-digest/digester.go b/vendor/github.com/opencontainers/go-digest/digester.go
index 36fa2728e..ede907757 100644
--- a/vendor/github.com/opencontainers/go-digest/digester.go
+++ b/vendor/github.com/opencontainers/go-digest/digester.go
@@ -1,3 +1,4 @@
+// Copyright 2019, 2020 OCI Contributors
// Copyright 2017 Docker, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/vendor/github.com/opencontainers/go-digest/doc.go b/vendor/github.com/opencontainers/go-digest/doc.go
index 491ea1ef1..83d3a936c 100644
--- a/vendor/github.com/opencontainers/go-digest/doc.go
+++ b/vendor/github.com/opencontainers/go-digest/doc.go
@@ -1,3 +1,4 @@
+// Copyright 2019, 2020 OCI Contributors
// Copyright 2017 Docker, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
@@ -29,8 +30,13 @@
//
// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc
//
-// In this case, the string "sha256" is the algorithm and the hex bytes are
-// the "digest".
+// The "algorithm" portion defines both the hashing algorithm used to calculate
+// the digest and the encoding of the resulting digest, which defaults to "hex"
+// if not otherwise specified. Currently, all supported algorithms have their
+// digests encoded in hex strings.
+//
+// In the example above, the string "sha256" is the algorithm and the hex bytes
+// are the "digest".
//
// Because the Digest type is simply a string, once a valid Digest is
// obtained, comparisons are cheap, quick and simple to express with the
diff --git a/vendor/github.com/opencontainers/go-digest/go.mod b/vendor/github.com/opencontainers/go-digest/go.mod
new file mode 100644
index 000000000..cf5d7b1d2
--- /dev/null
+++ b/vendor/github.com/opencontainers/go-digest/go.mod
@@ -0,0 +1,3 @@
+module github.com/opencontainers/go-digest
+
+go 1.13
diff --git a/vendor/github.com/opencontainers/go-digest/verifiers.go b/vendor/github.com/opencontainers/go-digest/verifiers.go
index 32125e918..afef506f4 100644
--- a/vendor/github.com/opencontainers/go-digest/verifiers.go
+++ b/vendor/github.com/opencontainers/go-digest/verifiers.go
@@ -1,3 +1,4 @@
+// Copyright 2019, 2020 OCI Contributors
// Copyright 2017 Docker, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go
index 48e621c99..7b60f8bb3 100644
--- a/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go
+++ b/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go
@@ -89,6 +89,8 @@ type User struct {
UID uint32 `json:"uid" platform:"linux,solaris"`
// GID is the group id.
GID uint32 `json:"gid" platform:"linux,solaris"`
+ // Umask is the umask for the init process.
+ Umask uint32 `json:"umask,omitempty" platform:"linux,solaris"`
// AdditionalGids are additional group ids set for the container's process.
AdditionalGids []uint32 `json:"additionalGids,omitempty" platform:"linux,solaris"`
// Username is the user name.
@@ -123,13 +125,26 @@ type Hook struct {
Timeout *int `json:"timeout,omitempty"`
}
+// Hooks specifies a command that is run in the container at a particular event in the lifecycle of a container
// Hooks for container setup and teardown
type Hooks struct {
- // Prestart is a list of hooks to be run before the container process is executed.
+ // Prestart is Deprecated. Prestart is a list of hooks to be run before the container process is executed.
+ // It is called in the Runtime Namespace
Prestart []Hook `json:"prestart,omitempty"`
+ // CreateRuntime is a list of hooks to be run after the container has been created but before pivot_root or any equivalent operation has been called
+ // It is called in the Runtime Namespace
+ CreateRuntime []Hook `json:"createRuntime,omitempty"`
+ // CreateContainer is a list of hooks to be run after the container has been created but before pivot_root or any equivalent operation has been called
+ // It is called in the Container Namespace
+ CreateContainer []Hook `json:"createContainer,omitempty"`
+ // StartContainer is a list of hooks to be run after the start operation is called but before the container process is started
+ // It is called in the Container Namespace
+ StartContainer []Hook `json:"startContainer,omitempty"`
// Poststart is a list of hooks to be run after the container process is started.
+ // It is called in the Runtime Namespace
Poststart []Hook `json:"poststart,omitempty"`
// Poststop is a list of hooks to be run after the container process exits.
+ // It is called in the Runtime Namespace
Poststop []Hook `json:"poststop,omitempty"`
}
@@ -165,6 +180,8 @@ type Linux struct {
// IntelRdt contains Intel Resource Director Technology (RDT) information for
// handling resource constraints (e.g., L3 cache, memory bandwidth) for the container
IntelRdt *LinuxIntelRdt `json:"intelRdt,omitempty"`
+ // Personality contains configuration for the Linux personality syscall
+ Personality *LinuxPersonality `json:"personality,omitempty"`
}
// LinuxNamespace is the configuration for a Linux namespace
@@ -291,6 +308,8 @@ type LinuxMemory struct {
Swappiness *uint64 `json:"swappiness,omitempty"`
// DisableOOMKiller disables the OOM killer for out of memory conditions
DisableOOMKiller *bool `json:"disableOOMKiller,omitempty"`
+ // Enables hierarchical memory accounting
+ UseHierarchy *bool `json:"useHierarchy,omitempty"`
}
// LinuxCPU for Linux cgroup 'cpu' resource management
@@ -387,6 +406,28 @@ type LinuxDeviceCgroup struct {
Access string `json:"access,omitempty"`
}
+// LinuxPersonalityDomain refers to a personality domain.
+type LinuxPersonalityDomain string
+
+// LinuxPersonalityFlag refers to an additional personality flag. None are currently defined.
+type LinuxPersonalityFlag string
+
+// Define domain and flags for Personality
+const (
+ // PerLinux is the standard Linux personality
+ PerLinux LinuxPersonalityDomain = "LINUX"
+ // PerLinux32 sets personality to 32 bit
+ PerLinux32 LinuxPersonalityDomain = "LINUX32"
+)
+
+// LinuxPersonality represents the Linux personality syscall input
+type LinuxPersonality struct {
+ // Domain for the personality
+ Domain LinuxPersonalityDomain `json:"domain"`
+ // Additional flags
+ Flags []LinuxPersonalityFlag `json:"flags,omitempty"`
+}
+
// Solaris contains platform-specific configuration for Solaris application containers.
type Solaris struct {
// SMF FMRI which should go "online" before we start the container process.
@@ -556,12 +597,16 @@ type VMImage struct {
type LinuxSeccomp struct {
DefaultAction LinuxSeccompAction `json:"defaultAction"`
Architectures []Arch `json:"architectures,omitempty"`
+ Flags []LinuxSeccompFlag `json:"flags,omitempty"`
Syscalls []LinuxSyscall `json:"syscalls,omitempty"`
}
// Arch used for additional architectures
type Arch string
+// LinuxSeccompFlag is a flag to pass to seccomp(2).
+type LinuxSeccompFlag string
+
// Additional architectures permitted to be used for system calls
// By default only the native architecture of the kernel is permitted
const (
@@ -595,6 +640,7 @@ const (
ActErrno LinuxSeccompAction = "SCMP_ACT_ERRNO"
ActTrace LinuxSeccompAction = "SCMP_ACT_TRACE"
ActAllow LinuxSeccompAction = "SCMP_ACT_ALLOW"
+ ActLog LinuxSeccompAction = "SCMP_ACT_LOG"
)
// LinuxSeccompOperator used to match syscall arguments in Seccomp
@@ -621,9 +667,10 @@ type LinuxSeccompArg struct {
// LinuxSyscall is used to match a syscall in Seccomp
type LinuxSyscall struct {
- Names []string `json:"names"`
- Action LinuxSeccompAction `json:"action"`
- Args []LinuxSeccompArg `json:"args,omitempty"`
+ Names []string `json:"names"`
+ Action LinuxSeccompAction `json:"action"`
+ ErrnoRet *uint `json:"errnoRet,omitempty"`
+ Args []LinuxSeccompArg `json:"args,omitempty"`
}
// LinuxIntelRdt has container runtime resource constraints for Intel RDT
diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go
index b920fc1b3..596af0c2f 100644
--- a/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go
+++ b/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go
@@ -8,7 +8,7 @@ const (
// VersionMinor is for functionality in a backwards-compatible manner
VersionMinor = 0
// VersionPatch is for backwards-compatible bug fixes
- VersionPatch = 1
+ VersionPatch = 2
// VersionDev indicates development branch. Releases will be empty string.
VersionDev = "-dev"
diff --git a/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusteroperator.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusteroperator.crd.yaml
deleted file mode 100644
index 114db5aec..000000000
--- a/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusteroperator.crd.yaml
+++ /dev/null
@@ -1,164 +0,0 @@
-kind: CustomResourceDefinition
-apiVersion: apiextensions.k8s.io/v1beta1
-metadata:
- name: clusteroperators.config.openshift.io
-spec:
- additionalPrinterColumns:
- - JSONPath: .status.versions[?(@.name=="operator")].version
- description: The version the operator is at.
- name: Version
- type: string
- - JSONPath: .status.conditions[?(@.type=="Available")].status
- description: Whether the operator is running and stable.
- name: Available
- type: string
- - JSONPath: .status.conditions[?(@.type=="Progressing")].status
- description: Whether the operator is processing changes.
- name: Progressing
- type: string
- - JSONPath: .status.conditions[?(@.type=="Degraded")].status
- description: Whether the operator is degraded.
- name: Degraded
- type: string
- - JSONPath: .status.conditions[?(@.type=="Available")].lastTransitionTime
- description: The time the operator's Available status last changed.
- name: Since
- type: date
- group: config.openshift.io
- names:
- kind: ClusterOperator
- listKind: ClusterOperatorList
- plural: clusteroperators
- singular: clusteroperator
- shortNames:
- - co
- preserveUnknownFields: false
- scope: Cluster
- subresources:
- status: {}
- version: v1
- versions:
- - name: v1
- served: true
- storage: true
- validation:
- openAPIV3Schema:
- description: ClusterOperator is the Custom Resource object which holds the current
- state of an operator. This object is used by operators to convey their state
- to the rest of the cluster.
- type: object
- required:
- - spec
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: spec holds configuration that could apply to any operator.
- type: object
- status:
- description: status holds the information about the state of an operator. It
- is consistent with status information across the Kubernetes ecosystem.
- type: object
- properties:
- conditions:
- description: conditions describes the state of the operator's managed
- and monitored components.
- type: array
- items:
- description: ClusterOperatorStatusCondition represents the state of
- the operator's managed and monitored components.
- type: object
- required:
- - lastTransitionTime
- - status
- - type
- properties:
- lastTransitionTime:
- description: lastTransitionTime is the time of the last update
- to the current status property.
- type: string
- format: date-time
- message:
- description: message provides additional information about the
- current condition. This is only to be consumed by humans.
- type: string
- reason:
- description: reason is the CamelCase reason for the condition's
- current status.
- type: string
- status:
- description: status of the condition, one of True, False, Unknown.
- type: string
- type:
- description: type specifies the aspect reported by this condition.
- type: string
- extension:
- description: extension contains any additional status information specific
- to the operator which owns this status object.
- type: object
- nullable: true
- x-kubernetes-preserve-unknown-fields: true
- relatedObjects:
- description: 'relatedObjects is a list of objects that are "interesting"
- or related to this operator. Common uses are: 1. the detailed resource
- driving the operator 2. operator namespaces 3. operand namespaces'
- type: array
- items:
- description: ObjectReference contains enough information to let you
- inspect or modify the referred object.
- type: object
- required:
- - group
- - name
- - resource
- properties:
- group:
- description: group of the referent.
- type: string
- name:
- description: name of the referent.
- type: string
- namespace:
- description: namespace of the referent.
- type: string
- resource:
- description: resource of the referent.
- type: string
- versions:
- description: versions is a slice of operator and operand version tuples. Operators
- which manage multiple operands will have multiple operand entries
- in the array. Available operators must report the version of the
- operator itself with the name "operator". An operator reports a new
- "operator" version when it has rolled out the new version to all of
- its operands.
- type: array
- items:
- type: object
- required:
- - name
- - version
- properties:
- name:
- description: name is the name of the particular operand this version
- is for. It usually matches container images, not operators.
- type: string
- version:
- description: version indicates which version of a particular operand
- is currently being managed. It must always match the Available
- operand. If 1.0.0 is Available, then this must indicate 1.0.0
- even if the operator is trying to rollout 1.1.0
- type: string
- versions:
- - name: v1
- served: true
- storage: true
diff --git a/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion.crd.yaml
deleted file mode 100644
index ccde0db23..000000000
--- a/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion.crd.yaml
+++ /dev/null
@@ -1,328 +0,0 @@
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
- name: clusterversions.config.openshift.io
-spec:
- group: config.openshift.io
- versions:
- - name: v1
- served: true
- storage: true
- scope: Cluster
- subresources:
- status: {}
- names:
- plural: clusterversions
- singular: clusterversion
- kind: ClusterVersion
- preserveUnknownFields: false
- additionalPrinterColumns:
- - name: Version
- type: string
- JSONPath: .status.history[?(@.state=="Completed")].version
- - name: Available
- type: string
- JSONPath: .status.conditions[?(@.type=="Available")].status
- - name: Progressing
- type: string
- JSONPath: .status.conditions[?(@.type=="Progressing")].status
- - name: Since
- type: date
- JSONPath: .status.conditions[?(@.type=="Progressing")].lastTransitionTime
- - name: Status
- type: string
- JSONPath: .status.conditions[?(@.type=="Progressing")].message
- validation:
- openAPIV3Schema:
- description: ClusterVersion is the configuration for the ClusterVersionOperator.
- This is where parameters related to automatic updates can be set.
- type: object
- required:
- - spec
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: spec is the desired state of the cluster version - the operator
- will work to ensure that the desired version is applied to the cluster.
- type: object
- required:
- - clusterID
- properties:
- channel:
- description: channel is an identifier for explicitly requesting that
- a non-default set of updates be applied to this cluster. The default
- channel will be contain stable updates that are appropriate for production
- clusters.
- type: string
- clusterID:
- description: clusterID uniquely identifies this cluster. This is expected
- to be an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
- in hexadecimal values). This is a required field.
- type: string
- desiredUpdate:
- description: "desiredUpdate is an optional field that indicates the
- desired value of the cluster version. Setting this value will trigger
- an upgrade (if the current version does not match the desired version).
- The set of recommended update values is listed as part of available
- updates in status, and setting values outside that range may cause
- the upgrade to fail. You may specify the version field without setting
- image if an update exists with that version in the availableUpdates
- or history. \n If an upgrade fails the operator will halt and report
- status about the failing component. Setting the desired update value
- back to the previous version will cause a rollback to be attempted.
- Not all rollbacks will succeed."
- type: object
- properties:
- force:
- description: "force allows an administrator to update to an image
- that has failed verification, does not appear in the availableUpdates
- list, or otherwise would be blocked by normal protections on update.
- This option should only be used when the authenticity of the provided
- image has been verified out of band because the provided image
- will run with full administrative access to the cluster. Do not
- use this flag with images that comes from unknown or potentially
- malicious sources. \n This flag does not override other forms
- of consistency checking that are required before a new update
- is deployed."
- type: boolean
- image:
- description: image is a container image location that contains the
- update. When this field is part of spec, image is optional if
- version is specified and the availableUpdates field contains a
- matching version.
- type: string
- version:
- description: version is a semantic versioning identifying the update
- version. When this field is part of spec, version is optional
- if image is specified.
- type: string
- overrides:
- description: overrides is list of overides for components that are managed
- by cluster version operator. Marking a component unmanaged will prevent
- the operator from creating or updating the object.
- type: array
- items:
- description: ComponentOverride allows overriding cluster version operator's
- behavior for a component.
- type: object
- required:
- - group
- - kind
- - name
- - namespace
- - unmanaged
- properties:
- group:
- description: group identifies the API group that the kind is in.
- type: string
- kind:
- description: kind indentifies which object to override.
- type: string
- name:
- description: name is the component's name.
- type: string
- namespace:
- description: namespace is the component's namespace. If the resource
- is cluster scoped, the namespace should be empty.
- type: string
- unmanaged:
- description: 'unmanaged controls if cluster version operator should
- stop managing the resources in this cluster. Default: false'
- type: boolean
- upstream:
- description: upstream may be used to specify the preferred update server.
- By default it will use the appropriate update server for the cluster
- and region.
- type: string
- status:
- description: status contains information about the available updates and
- any in-progress updates.
- type: object
- required:
- - availableUpdates
- - desired
- - observedGeneration
- - versionHash
- properties:
- availableUpdates:
- description: availableUpdates contains the list of updates that are
- appropriate for this cluster. This list may be empty if no updates
- are recommended, if the update service is unavailable, or if an invalid
- channel has been specified.
- type: array
- items:
- description: Update represents a release of the ClusterVersionOperator,
- referenced by the Image member.
- type: object
- properties:
- force:
- description: "force allows an administrator to update to an image
- that has failed verification, does not appear in the availableUpdates
- list, or otherwise would be blocked by normal protections on
- update. This option should only be used when the authenticity
- of the provided image has been verified out of band because
- the provided image will run with full administrative access
- to the cluster. Do not use this flag with images that comes
- from unknown or potentially malicious sources. \n This flag
- does not override other forms of consistency checking that are
- required before a new update is deployed."
- type: boolean
- image:
- description: image is a container image location that contains
- the update. When this field is part of spec, image is optional
- if version is specified and the availableUpdates field contains
- a matching version.
- type: string
- version:
- description: version is a semantic versioning identifying the
- update version. When this field is part of spec, version is
- optional if image is specified.
- type: string
- nullable: true
- conditions:
- description: conditions provides information about the cluster version.
- The condition "Available" is set to true if the desiredUpdate has
- been reached. The condition "Progressing" is set to true if an update
- is being applied. The condition "Degraded" is set to true if an update
- is currently blocked by a temporary or permanent error. Conditions
- are only valid for the current desiredUpdate when metadata.generation
- is equal to status.generation.
- type: array
- items:
- description: ClusterOperatorStatusCondition represents the state of
- the operator's managed and monitored components.
- type: object
- required:
- - lastTransitionTime
- - status
- - type
- properties:
- lastTransitionTime:
- description: lastTransitionTime is the time of the last update
- to the current status property.
- type: string
- format: date-time
- message:
- description: message provides additional information about the
- current condition. This is only to be consumed by humans.
- type: string
- reason:
- description: reason is the CamelCase reason for the condition's
- current status.
- type: string
- status:
- description: status of the condition, one of True, False, Unknown.
- type: string
- type:
- description: type specifies the aspect reported by this condition.
- type: string
- desired:
- description: desired is the version that the cluster is reconciling
- towards. If the cluster is not yet fully initialized desired will
- be set with the information available, which may be an image or a
- tag.
- type: object
- properties:
- force:
- description: "force allows an administrator to update to an image
- that has failed verification, does not appear in the availableUpdates
- list, or otherwise would be blocked by normal protections on update.
- This option should only be used when the authenticity of the provided
- image has been verified out of band because the provided image
- will run with full administrative access to the cluster. Do not
- use this flag with images that comes from unknown or potentially
- malicious sources. \n This flag does not override other forms
- of consistency checking that are required before a new update
- is deployed."
- type: boolean
- image:
- description: image is a container image location that contains the
- update. When this field is part of spec, image is optional if
- version is specified and the availableUpdates field contains a
- matching version.
- type: string
- version:
- description: version is a semantic versioning identifying the update
- version. When this field is part of spec, version is optional
- if image is specified.
- type: string
- history:
- description: history contains a list of the most recent versions applied
- to the cluster. This value may be empty during cluster startup, and
- then will be updated when a new update is being applied. The newest
- update is first in the list and it is ordered by recency. Updates
- in the history have state Completed if the rollout completed - if
- an update was failing or halfway applied the state will be Partial.
- Only a limited amount of update history is preserved.
- type: array
- items:
- description: UpdateHistory is a single attempted update to the cluster.
- type: object
- required:
- - completionTime
- - image
- - startedTime
- - state
- - verified
- properties:
- completionTime:
- description: completionTime, if set, is when the update was fully
- applied. The update that is currently being applied will have
- a null completion time. Completion time will always be set for
- entries that are not the current update (usually to the started
- time of the next update).
- type: string
- format: date-time
- nullable: true
- image:
- description: image is a container image location that contains
- the update. This value is always populated.
- type: string
- startedTime:
- description: startedTime is the time at which the update was started.
- type: string
- format: date-time
- state:
- description: state reflects whether the update was fully applied.
- The Partial state indicates the update is not fully applied,
- while the Completed state indicates the update was successfully
- rolled out at least once (all parts of the update successfully
- applied).
- type: string
- verified:
- description: verified indicates whether the provided update was
- properly verified before it was installed. If this is false
- the cluster may not be trusted.
- type: boolean
- version:
- description: version is a semantic versioning identifying the
- update version. If the requested image does not define a version,
- or if a failure occurs retrieving the image, this value may
- be empty.
- type: string
- observedGeneration:
- description: observedGeneration reports which version of the spec is
- being synced. If this value is not equal to metadata.generation, then
- the desired and conditions fields may represent a previous version.
- type: integer
- format: int64
- versionHash:
- description: versionHash is a fingerprint of the content that the cluster
- will be updated with. It is used by the operator to avoid unnecessary
- work and is for internal use only.
- type: string
- versions:
- - name: v1
- served: true
- storage: true
diff --git a/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_operatorhub.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_operatorhub.crd.yaml
deleted file mode 100644
index 8c857d45a..000000000
--- a/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_operatorhub.crd.yaml
+++ /dev/null
@@ -1,101 +0,0 @@
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
- name: operatorhubs.config.openshift.io
-spec:
- group: config.openshift.io
- names:
- kind: OperatorHub
- listKind: OperatorHubList
- plural: operatorhubs
- singular: operatorhub
- scope: Cluster
- preserveUnknownFields: false
- subresources:
- status: {}
- version: v1
- versions:
- - name: v1
- served: true
- storage: true
- "validation":
- "openAPIV3Schema":
- description: OperatorHub is the Schema for the operatorhubs API. It can be used
- to change the state of the default hub sources for OperatorHub on the cluster
- from enabled to disabled and vice versa.
- type: object
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: OperatorHubSpec defines the desired state of OperatorHub
- type: object
- properties:
- disableAllDefaultSources:
- description: disableAllDefaultSources allows you to disable all the
- default hub sources. If this is true, a specific entry in sources
- can be used to enable a default source. If this is false, a specific
- entry in sources can be used to disable or enable a default source.
- type: boolean
- sources:
- description: sources is the list of default hub sources and their configuration.
- If the list is empty, it implies that the default hub sources are
- enabled on the cluster unless disableAllDefaultSources is true. If
- disableAllDefaultSources is true and sources is not empty, the configuration
- present in sources will take precedence. The list of default hub sources
- and their current state will always be reflected in the status block.
- type: array
- items:
- description: HubSource is used to specify the hub source and its configuration
- type: object
- properties:
- disabled:
- description: disabled is used to disable a default hub source
- on cluster
- type: boolean
- name:
- description: name is the name of one of the default hub sources
- type: string
- maxLength: 253
- minLength: 1
- status:
- description: OperatorHubStatus defines the observed state of OperatorHub.
- The current state of the default hub sources will always be reflected
- here.
- type: object
- properties:
- sources:
- description: sources encapsulates the result of applying the configuration
- for each hub source
- type: array
- items:
- description: HubSourceStatus is used to reflect the current state
- of applying the configuration to a default source
- type: object
- properties:
- disabled:
- description: disabled is used to disable a default hub source
- on cluster
- type: boolean
- message:
- description: message provides more information regarding failures
- type: string
- name:
- description: name is the name of one of the default hub sources
- type: string
- maxLength: 253
- minLength: 1
- status:
- description: status indicates success or failure in applying the
- configuration
- type: string
diff --git a/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_proxy.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_proxy.crd.yaml
deleted file mode 100644
index afd076747..000000000
--- a/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_proxy.crd.yaml
+++ /dev/null
@@ -1,98 +0,0 @@
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
- name: proxies.config.openshift.io
-spec:
- group: config.openshift.io
- scope: Cluster
- preserveUnknownFields: false
- versions:
- - name: v1
- served: true
- storage: true
- names:
- kind: Proxy
- listKind: ProxyList
- plural: proxies
- singular: proxy
- subresources:
- status: {}
- "validation":
- "openAPIV3Schema":
- description: Proxy holds cluster-wide information on how to configure default
- proxies for the cluster. The canonical name is `cluster`
- type: object
- required:
- - spec
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: Spec holds user-settable values for the proxy configuration
- type: object
- properties:
- httpProxy:
- description: httpProxy is the URL of the proxy for HTTP requests. Empty
- means unset and will not result in an env var.
- type: string
- httpsProxy:
- description: httpsProxy is the URL of the proxy for HTTPS requests. Empty
- means unset and will not result in an env var.
- type: string
- noProxy:
- description: noProxy is a comma-separated list of hostnames and/or CIDRs
- for which the proxy should not be used. Empty means unset and will
- not result in an env var.
- type: string
- readinessEndpoints:
- description: readinessEndpoints is a list of endpoints used to verify
- readiness of the proxy.
- type: array
- items:
- type: string
- trustedCA:
- description: "trustedCA is a reference to a ConfigMap containing a CA
- certificate bundle used for client egress HTTPS connections. The certificate
- bundle must be from the CA that signed the proxy's certificate and
- be signed for everything. The trustedCA field should only be consumed
- by a proxy validator. The validator is responsible for reading the
- certificate bundle from required key \"ca-bundle.crt\" and copying
- it to a ConfigMap named \"trusted-ca-bundle\" in the \"openshift-config-managed\"
- namespace. The namespace for the ConfigMap referenced by trustedCA
- is \"openshift-config\". Here is an example ConfigMap (in yaml): \n
- apiVersion: v1 kind: ConfigMap metadata: name: user-ca-bundle namespace:
- openshift-config data: ca-bundle.crt: | -----BEGIN CERTIFICATE-----
- \ Custom CA certificate bundle. -----END CERTIFICATE-----"
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced config
- map
- type: string
- status:
- description: status holds observed values from the cluster. They may not
- be overridden.
- type: object
- properties:
- httpProxy:
- description: httpProxy is the URL of the proxy for HTTP requests.
- type: string
- httpsProxy:
- description: httpsProxy is the URL of the proxy for HTTPS requests.
- type: string
- noProxy:
- description: noProxy is a comma-separated list of hostnames and/or CIDRs
- for which the proxy should not be used.
- type: string
diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver.crd.yaml
deleted file mode 100644
index 4e1fdac37..000000000
--- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver.crd.yaml
+++ /dev/null
@@ -1,219 +0,0 @@
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
- name: apiservers.config.openshift.io
-spec:
- group: config.openshift.io
- scope: Cluster
- preserveUnknownFields: false
- names:
- kind: APIServer
- singular: apiserver
- plural: apiservers
- listKind: APIServerList
- versions:
- - name: v1
- served: true
- storage: true
- subresources:
- status: {}
- "validation":
- "openAPIV3Schema":
- description: APIServer holds configuration (like serving certificates, client
- CA and CORS domains) shared by all API servers in the system, among them especially
- kube-apiserver and openshift-apiserver. The canonical name of an instance
- is 'cluster'.
- type: object
- required:
- - spec
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- type: object
- properties:
- additionalCORSAllowedOrigins:
- description: additionalCORSAllowedOrigins lists additional, user-defined
- regular expressions describing hosts for which the API server allows
- access using the CORS headers. This may be needed to access the API
- and the integrated OAuth server from JavaScript applications. The
- values are regular expressions that correspond to the Golang regular
- expression language.
- type: array
- items:
- type: string
- clientCA:
- description: 'clientCA references a ConfigMap containing a certificate
- bundle for the signers that will be recognized for incoming client
- certificates in addition to the operator managed signers. If this
- is empty, then only operator managed signers are valid. You usually
- only have to set this if you have your own PKI you wish to honor client
- certificates from. The ConfigMap must exist in the openshift-config
- namespace and contain the following required fields: - ConfigMap.Data["ca-bundle.crt"]
- - CA bundle.'
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced config
- map
- type: string
- encryption:
- description: encryption allows the configuration of encryption of resources
- at the datastore layer.
- type: object
- properties:
- type:
- description: "type defines what encryption type should be used to
- encrypt resources at the datastore layer. When this field is unset
- (i.e. when it is set to the empty string), identity is implied.
- The behavior of unset can and will change over time. Even if
- encryption is enabled by default, the meaning of unset may change
- to a different encryption type based on changes in best practices.
- \n When encryption is enabled, all sensitive resources shipped
- with the platform are encrypted. This list of sensitive resources
- can and will change over time. The current authoritative list
- is: \n 1. secrets 2. configmaps 3. routes.route.openshift.io
- \ 4. oauthaccesstokens.oauth.openshift.io 5. oauthauthorizetokens.oauth.openshift.io"
- type: string
- enum:
- - ""
- - identity
- - aescbc
- servingCerts:
- description: servingCert is the TLS cert info for serving secure traffic.
- If not specified, operator managed certificates will be used for serving
- secure traffic.
- type: object
- properties:
- namedCertificates:
- description: namedCertificates references secrets containing the
- TLS cert info for serving secure traffic to specific hostnames.
- If no named certificates are provided, or no named certificates
- match the server name as understood by a client, the defaultServingCertificate
- will be used.
- type: array
- items:
- description: APIServerNamedServingCert maps a server DNS name,
- as understood by a client, to a certificate.
- type: object
- properties:
- names:
- description: names is a optional list of explicit DNS names
- (leading wildcards allowed) that should use this certificate
- to serve secure traffic. If no names are provided, the implicit
- names will be extracted from the certificates. Exact names
- trump over wildcard names. Explicit names defined here trump
- over extracted implicit names.
- type: array
- items:
- type: string
- servingCertificate:
- description: 'servingCertificate references a kubernetes.io/tls
- type secret containing the TLS cert info for serving secure
- traffic. The secret must exist in the openshift-config namespace
- and contain the following required fields: - Secret.Data["tls.key"]
- - TLS private key. - Secret.Data["tls.crt"] - TLS certificate.'
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced
- secret
- type: string
- tlsSecurityProfile:
- description: "tlsSecurityProfile specifies settings for TLS connections
- for externally exposed servers. \n If unset, a default (which may
- change between releases) is chosen. Note that only Old and Intermediate
- profiles are currently supported, and the maximum available MinTLSVersions
- is VersionTLS12."
- type: object
- properties:
- custom:
- description: "custom is a user-defined TLS security profile. Be
- extremely careful using a custom profile as invalid configurations
- can be catastrophic. An example custom profile looks like this:
- \n ciphers: - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305
- \ - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256
- \ minTLSVersion: TLSv1.1"
- type: object
- properties:
- ciphers:
- description: "ciphers is used to specify the cipher algorithms
- that are negotiated during the TLS handshake. Operators may
- remove entries their operands do not support. For example,
- to use DES-CBC3-SHA (yaml): \n ciphers: - DES-CBC3-SHA"
- type: array
- items:
- type: string
- minTLSVersion:
- description: "minTLSVersion is used to specify the minimal version
- of the TLS protocol that is negotiated during the TLS handshake.
- For example, to use TLS versions 1.1, 1.2 and 1.3 (yaml):
- \n minTLSVersion: TLSv1.1 \n NOTE: currently the highest
- minTLSVersion allowed is VersionTLS12"
- type: string
- nullable: true
- intermediate:
- description: "intermediate is a TLS security profile based on: \n
- https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29
- \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256
- \ - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256
- \ - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256
- \ - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384
- \ - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305
- \ - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384
- \ minTLSVersion: TLSv1.2"
- type: object
- nullable: true
- modern:
- description: "modern is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility
- \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256
- \ - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256
- \ minTLSVersion: TLSv1.3 \n NOTE: Currently unsupported."
- type: object
- nullable: true
- old:
- description: "old is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility
- \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256
- \ - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256
- \ - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256
- \ - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384
- \ - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305
- \ - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384
- \ - DHE-RSA-CHACHA20-POLY1305 - ECDHE-ECDSA-AES128-SHA256
- \ - ECDHE-RSA-AES128-SHA256 - ECDHE-ECDSA-AES128-SHA -
- ECDHE-RSA-AES128-SHA - ECDHE-ECDSA-AES256-SHA384 - ECDHE-RSA-AES256-SHA384
- \ - ECDHE-ECDSA-AES256-SHA - ECDHE-RSA-AES256-SHA -
- DHE-RSA-AES128-SHA256 - DHE-RSA-AES256-SHA256 - AES128-GCM-SHA256
- \ - AES256-GCM-SHA384 - AES128-SHA256 - AES256-SHA256
- \ - AES128-SHA - AES256-SHA - DES-CBC3-SHA minTLSVersion:
- TLSv1.0"
- type: object
- nullable: true
- type:
- description: "type is one of Old, Intermediate, Modern or Custom.
- Custom provides the ability to specify individual TLS security
- profile parameters. Old, Intermediate and Modern are TLS security
- profiles based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations
- \n The profiles are intent based, so they may change over time
- as new ciphers are developed and existing ciphers are found to
- be insecure. Depending on precisely which ciphers are available
- to a process, the list may be reduced. \n Note that the Modern
- profile is currently not supported because it is not yet well
- adopted by common software libraries."
- type: string
- status:
- type: object
diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd.yaml
deleted file mode 100644
index f21ac7ea8..000000000
--- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd.yaml
+++ /dev/null
@@ -1,123 +0,0 @@
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
- name: authentications.config.openshift.io
-spec:
- group: config.openshift.io
- names:
- kind: Authentication
- listKind: AuthenticationList
- plural: authentications
- singular: authentication
- scope: Cluster
- preserveUnknownFields: false
- subresources:
- status: {}
- versions:
- - name: v1
- served: true
- storage: true
- "validation":
- "openAPIV3Schema":
- description: Authentication specifies cluster-wide settings for authentication
- (like OAuth and webhook token authenticators). The canonical name of an instance
- is `cluster`.
- type: object
- required:
- - spec
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: spec holds user settable values for configuration
- type: object
- properties:
- oauthMetadata:
- description: 'oauthMetadata contains the discovery endpoint data for
- OAuth 2.0 Authorization Server Metadata for an external OAuth server.
- This discovery document can be viewed from its served location: oc
- get --raw ''/.well-known/oauth-authorization-server'' For further
- details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2
- If oauthMetadata.name is non-empty, this value has precedence over
- any metadata reference stored in status. The key "oauthMetadata" is
- used to locate the data. If specified and the config map or expected
- key is not found, no metadata is served. If the specified metadata
- is not valid, no metadata is served. The namespace for this config
- map is openshift-config.'
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced config
- map
- type: string
- type:
- description: type identifies the cluster managed, user facing authentication
- mode in use. Specifically, it manages the component that responds
- to login attempts. The default is IntegratedOAuth.
- type: string
- webhookTokenAuthenticators:
- description: webhookTokenAuthenticators configures remote token reviewers.
- These remote authentication webhooks can be used to verify bearer
- tokens via the tokenreviews.authentication.k8s.io REST API. This
- is required to honor bearer tokens that are provisioned by an external
- authentication service. The namespace for these secrets is openshift-config.
- type: array
- items:
- description: webhookTokenAuthenticator holds the necessary configuration
- options for a remote token authenticator
- type: object
- properties:
- kubeConfig:
- description: 'kubeConfig contains kube config file data which
- describes how to access the remote webhook service. For further
- details, see: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication
- The key "kubeConfig" is used to locate the data. If the secret
- or expected key is not found, the webhook is not honored. If
- the specified kube config data is not valid, the webhook is
- not honored. The namespace for this secret is determined by
- the point of use.'
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced secret
- type: string
- status:
- description: status holds observed values from the cluster. They may not
- be overridden.
- type: object
- properties:
- integratedOAuthMetadata:
- description: 'integratedOAuthMetadata contains the discovery endpoint
- data for OAuth 2.0 Authorization Server Metadata for the in-cluster
- integrated OAuth server. This discovery document can be viewed from
- its served location: oc get --raw ''/.well-known/oauth-authorization-server''
- For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2
- This contains the observed value based on cluster state. An explicitly
- set value in spec.oauthMetadata has precedence over this field. This
- field has no meaning if authentication spec.type is not set to IntegratedOAuth.
- The key "oauthMetadata" is used to locate the data. If the config
- map or expected key is not found, no metadata is served. If the specified
- metadata is not valid, no metadata is served. The namespace for this
- config map is openshift-config-managed.'
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced config
- map
- type: string
diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_build.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_build.crd.yaml
deleted file mode 100644
index 8f7583971..000000000
--- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_build.crd.yaml
+++ /dev/null
@@ -1,366 +0,0 @@
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
- name: builds.config.openshift.io
-spec:
- group: config.openshift.io
- scope: Cluster
- preserveUnknownFields: false
- names:
- kind: Build
- singular: build
- plural: builds
- listKind: BuildList
- versions:
- - name: v1
- served: true
- storage: true
- subresources:
- status: {}
- "validation":
- "openAPIV3Schema":
- description: "Build configures the behavior of OpenShift builds for the entire
- cluster. This includes default settings that can be overridden in BuildConfig
- objects, and overrides which are applied to all builds. \n The canonical name
- is \"cluster\""
- type: object
- required:
- - spec
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: Spec holds user-settable values for the build controller configuration
- type: object
- properties:
- additionalTrustedCA:
- description: "AdditionalTrustedCA is a reference to a ConfigMap containing
- additional CAs that should be trusted for image pushes and pulls during
- builds. The namespace for this config map is openshift-config. \n
- DEPRECATED: Additional CAs for image pull and push should be set on
- image.config.openshift.io/cluster instead."
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced config
- map
- type: string
- buildDefaults:
- description: BuildDefaults controls the default information for Builds
- type: object
- properties:
- defaultProxy:
- description: "DefaultProxy contains the default proxy settings for
- all build operations, including image pull/push and source download.
- \n Values can be overrode by setting the `HTTP_PROXY`, `HTTPS_PROXY`,
- and `NO_PROXY` environment variables in the build config's strategy."
- type: object
- properties:
- httpProxy:
- description: httpProxy is the URL of the proxy for HTTP requests. Empty
- means unset and will not result in an env var.
- type: string
- httpsProxy:
- description: httpsProxy is the URL of the proxy for HTTPS requests. Empty
- means unset and will not result in an env var.
- type: string
- noProxy:
- description: noProxy is a comma-separated list of hostnames
- and/or CIDRs for which the proxy should not be used. Empty
- means unset and will not result in an env var.
- type: string
- readinessEndpoints:
- description: readinessEndpoints is a list of endpoints used
- to verify readiness of the proxy.
- type: array
- items:
- type: string
- trustedCA:
- description: "trustedCA is a reference to a ConfigMap containing
- a CA certificate bundle used for client egress HTTPS connections.
- The certificate bundle must be from the CA that signed the
- proxy's certificate and be signed for everything. The trustedCA
- field should only be consumed by a proxy validator. The validator
- is responsible for reading the certificate bundle from required
- key \"ca-bundle.crt\" and copying it to a ConfigMap named
- \"trusted-ca-bundle\" in the \"openshift-config-managed\"
- namespace. The namespace for the ConfigMap referenced by trustedCA
- is \"openshift-config\". Here is an example ConfigMap (in
- yaml): \n apiVersion: v1 kind: ConfigMap metadata: name:
- user-ca-bundle namespace: openshift-config data: ca-bundle.crt:
- | -----BEGIN CERTIFICATE----- Custom CA certificate
- bundle. -----END CERTIFICATE-----"
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced
- config map
- type: string
- env:
- description: Env is a set of default environment variables that
- will be applied to the build if the specified variables do not
- exist on the build
- type: array
- items:
- description: EnvVar represents an environment variable present
- in a Container.
- type: object
- required:
- - name
- properties:
- name:
- description: Name of the environment variable. Must be a C_IDENTIFIER.
- type: string
- value:
- description: 'Variable references $(VAR_NAME) are expanded
- using the previous defined environment variables in the
- container and any service environment variables. If a variable
- cannot be resolved, the reference in the input string will
- be unchanged. The $(VAR_NAME) syntax can be escaped with
- a double $$, ie: $$(VAR_NAME). Escaped references will never
- be expanded, regardless of whether the variable exists or
- not. Defaults to "".'
- type: string
- valueFrom:
- description: Source for the environment variable's value.
- Cannot be used if value is not empty.
- type: object
- properties:
- configMapKeyRef:
- description: Selects a key of a ConfigMap.
- type: object
- required:
- - key
- properties:
- key:
- description: The key to select.
- type: string
- name:
- description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
- TODO: Add other useful fields. apiVersion, kind,
- uid?'
- type: string
- optional:
- description: Specify whether the ConfigMap or its
- key must be defined
- type: boolean
- fieldRef:
- description: 'Selects a field of the pod: supports metadata.name,
- metadata.namespace, metadata.labels, metadata.annotations,
- spec.nodeName, spec.serviceAccountName, status.hostIP,
- status.podIP, status.podIPs.'
- type: object
- required:
- - fieldPath
- properties:
- apiVersion:
- description: Version of the schema the FieldPath is
- written in terms of, defaults to "v1".
- type: string
- fieldPath:
- description: Path of the field to select in the specified
- API version.
- type: string
- resourceFieldRef:
- description: 'Selects a resource of the container: only
- resources limits and requests (limits.cpu, limits.memory,
- limits.ephemeral-storage, requests.cpu, requests.memory
- and requests.ephemeral-storage) are currently supported.'
- type: object
- required:
- - resource
- properties:
- containerName:
- description: 'Container name: required for volumes,
- optional for env vars'
- type: string
- divisor:
- description: Specifies the output format of the exposed
- resources, defaults to "1"
- type: string
- resource:
- description: 'Required: resource to select'
- type: string
- secretKeyRef:
- description: Selects a key of a secret in the pod's namespace
- type: object
- required:
- - key
- properties:
- key:
- description: The key of the secret to select from. Must
- be a valid secret key.
- type: string
- name:
- description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
- TODO: Add other useful fields. apiVersion, kind,
- uid?'
- type: string
- optional:
- description: Specify whether the Secret or its key
- must be defined
- type: boolean
- gitProxy:
- description: "GitProxy contains the proxy settings for git operations
- only. If set, this will override any Proxy settings for all git
- commands, such as git clone. \n Values that are not set here will
- be inherited from DefaultProxy."
- type: object
- properties:
- httpProxy:
- description: httpProxy is the URL of the proxy for HTTP requests. Empty
- means unset and will not result in an env var.
- type: string
- httpsProxy:
- description: httpsProxy is the URL of the proxy for HTTPS requests. Empty
- means unset and will not result in an env var.
- type: string
- noProxy:
- description: noProxy is a comma-separated list of hostnames
- and/or CIDRs for which the proxy should not be used. Empty
- means unset and will not result in an env var.
- type: string
- readinessEndpoints:
- description: readinessEndpoints is a list of endpoints used
- to verify readiness of the proxy.
- type: array
- items:
- type: string
- trustedCA:
- description: "trustedCA is a reference to a ConfigMap containing
- a CA certificate bundle used for client egress HTTPS connections.
- The certificate bundle must be from the CA that signed the
- proxy's certificate and be signed for everything. The trustedCA
- field should only be consumed by a proxy validator. The validator
- is responsible for reading the certificate bundle from required
- key \"ca-bundle.crt\" and copying it to a ConfigMap named
- \"trusted-ca-bundle\" in the \"openshift-config-managed\"
- namespace. The namespace for the ConfigMap referenced by trustedCA
- is \"openshift-config\". Here is an example ConfigMap (in
- yaml): \n apiVersion: v1 kind: ConfigMap metadata: name:
- user-ca-bundle namespace: openshift-config data: ca-bundle.crt:
- | -----BEGIN CERTIFICATE----- Custom CA certificate
- bundle. -----END CERTIFICATE-----"
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced
- config map
- type: string
- imageLabels:
- description: ImageLabels is a list of docker labels that are applied
- to the resulting image. User can override a default label by providing
- a label with the same name in their Build/BuildConfig.
- type: array
- items:
- type: object
- properties:
- name:
- description: Name defines the name of the label. It must have
- non-zero length.
- type: string
- value:
- description: Value defines the literal value of the label.
- type: string
- resources:
- description: Resources defines resource requirements to execute
- the build.
- type: object
- properties:
- limits:
- description: 'Limits describes the maximum amount of compute
- resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
- type: object
- additionalProperties:
- type: string
- requests:
- description: 'Requests describes the minimum amount of compute
- resources required. If Requests is omitted for a container,
- it defaults to Limits if that is explicitly specified, otherwise
- to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
- type: object
- additionalProperties:
- type: string
- buildOverrides:
- description: BuildOverrides controls override settings for builds
- type: object
- properties:
- imageLabels:
- description: ImageLabels is a list of docker labels that are applied
- to the resulting image. If user provided a label in their Build/BuildConfig
- with the same name as one in this list, the user's label will
- be overwritten.
- type: array
- items:
- type: object
- properties:
- name:
- description: Name defines the name of the label. It must have
- non-zero length.
- type: string
- value:
- description: Value defines the literal value of the label.
- type: string
- nodeSelector:
- description: NodeSelector is a selector which must be true for the
- build pod to fit on a node
- type: object
- additionalProperties:
- type: string
- tolerations:
- description: Tolerations is a list of Tolerations that will override
- any existing tolerations set on a build pod.
- type: array
- items:
- description: The pod this Toleration is attached to tolerates
- any taint that matches the triple <key,value,effect> using the
- matching operator <operator>.
- type: object
- properties:
- effect:
- description: Effect indicates the taint effect to match. Empty
- means match all taint effects. When specified, allowed values
- are NoSchedule, PreferNoSchedule and NoExecute.
- type: string
- key:
- description: Key is the taint key that the toleration applies
- to. Empty means match all taint keys. If the key is empty,
- operator must be Exists; this combination means to match
- all values and all keys.
- type: string
- operator:
- description: Operator represents a key's relationship to the
- value. Valid operators are Exists and Equal. Defaults to
- Equal. Exists is equivalent to wildcard for value, so that
- a pod can tolerate all taints of a particular category.
- type: string
- tolerationSeconds:
- description: TolerationSeconds represents the period of time
- the toleration (which must be of effect NoExecute, otherwise
- this field is ignored) tolerates the taint. By default,
- it is not set, which means tolerate the taint forever (do
- not evict). Zero and negative values will be treated as
- 0 (evict immediately) by the system.
- type: integer
- format: int64
- value:
- description: Value is the taint value the toleration matches
- to. If the operator is Exists, the value should be empty,
- otherwise just a regular string.
- type: string
diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_console.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_console.crd.yaml
deleted file mode 100644
index b527f7aa3..000000000
--- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_console.crd.yaml
+++ /dev/null
@@ -1,70 +0,0 @@
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
- name: consoles.config.openshift.io
-spec:
- scope: Cluster
- preserveUnknownFields: false
- group: config.openshift.io
- names:
- kind: Console
- listKind: ConsoleList
- plural: consoles
- singular: console
- subresources:
- status: {}
- versions:
- - name: v1
- served: true
- storage: true
- "validation":
- "openAPIV3Schema":
- description: Console holds cluster-wide configuration for the web console, including
- the logout URL, and reports the public URL of the console. The canonical name
- is `cluster`.
- type: object
- required:
- - spec
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: spec holds user settable values for configuration
- type: object
- properties:
- authentication:
- description: ConsoleAuthentication defines a list of optional configuration
- for console authentication.
- type: object
- properties:
- logoutRedirect:
- description: 'An optional, absolute URL to redirect web browsers
- to after logging out of the console. If not specified, it will
- redirect to the default login page. This is required when using
- an identity provider that supports single sign-on (SSO) such as:
- - OpenID (Keycloak, Azure) - RequestHeader (GSSAPI, SSPI, SAML)
- - OAuth (GitHub, GitLab, Google) Logging out of the console will
- destroy the user''s token. The logoutRedirect provides the user
- the option to perform single logout (SLO) through the identity
- provider to destroy their single sign-on session.'
- type: string
- pattern: ^$|^((https):\/\/?)[^\s()<>]+(?:\([\w\d]+\)|([^[:punct:]\s]|\/?))$
- status:
- description: status holds observed values from the cluster. They may not
- be overridden.
- type: object
- properties:
- consoleURL:
- description: The URL for the console. This will be derived from the
- host for the route that is created for the console.
- type: string
diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns.crd.yaml
deleted file mode 100644
index c883ee0f0..000000000
--- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns.crd.yaml
+++ /dev/null
@@ -1,100 +0,0 @@
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
- name: dnses.config.openshift.io
-spec:
- group: config.openshift.io
- names:
- kind: DNS
- listKind: DNSList
- plural: dnses
- singular: dns
- scope: Cluster
- preserveUnknownFields: false
- versions:
- - name: v1
- served: true
- storage: true
- subresources:
- status: {}
- "validation":
- "openAPIV3Schema":
- description: DNS holds cluster-wide information about DNS. The canonical name
- is `cluster`
- type: object
- required:
- - spec
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: spec holds user settable values for configuration
- type: object
- properties:
- baseDomain:
- description: "baseDomain is the base domain of the cluster. All managed
- DNS records will be sub-domains of this base. \n For example, given
- the base domain `openshift.example.com`, an API server DNS record
- may be created for `cluster-api.openshift.example.com`. \n Once set,
- this field cannot be changed."
- type: string
- privateZone:
- description: "privateZone is the location where all the DNS records
- that are only available internally to the cluster exist. \n If this
- field is nil, no private records should be created. \n Once set, this
- field cannot be changed."
- type: object
- properties:
- id:
- description: "id is the identifier that can be used to find the
- DNS hosted zone. \n on AWS zone can be fetched using `ID` as id
- in [1] on Azure zone can be fetched using `ID` as a pre-determined
- name in [2], on GCP zone can be fetched using `ID` as a pre-determined
- name in [3]. \n [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options
- [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show
- [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get"
- type: string
- tags:
- description: "tags can be used to query the DNS hosted zone. \n
- on AWS, resourcegroupstaggingapi [1] can be used to fetch a zone
- using `Tags` as tag-filters, \n [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options"
- type: object
- additionalProperties:
- type: string
- publicZone:
- description: "publicZone is the location where all the DNS records that
- are publicly accessible to the internet exist. \n If this field is
- nil, no public records should be created. \n Once set, this field
- cannot be changed."
- type: object
- properties:
- id:
- description: "id is the identifier that can be used to find the
- DNS hosted zone. \n on AWS zone can be fetched using `ID` as id
- in [1] on Azure zone can be fetched using `ID` as a pre-determined
- name in [2], on GCP zone can be fetched using `ID` as a pre-determined
- name in [3]. \n [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options
- [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show
- [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get"
- type: string
- tags:
- description: "tags can be used to query the DNS hosted zone. \n
- on AWS, resourcegroupstaggingapi [1] can be used to fetch a zone
- using `Tags` as tag-filters, \n [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options"
- type: object
- additionalProperties:
- type: string
- status:
- description: status holds observed values from the cluster. They may not
- be overridden.
- type: object
diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_featuregate.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_featuregate.crd.yaml
deleted file mode 100644
index 89084a33f..000000000
--- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_featuregate.crd.yaml
+++ /dev/null
@@ -1,76 +0,0 @@
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
- name: featuregates.config.openshift.io
-spec:
- group: config.openshift.io
- version: v1
- scope: Cluster
- preserveUnknownFields: false
- names:
- kind: FeatureGate
- singular: featuregate
- plural: featuregates
- listKind: FeatureGateList
- versions:
- - name: v1
- served: true
- storage: true
- subresources:
- status: {}
- "validation":
- "openAPIV3Schema":
- description: Feature holds cluster-wide information about feature gates. The
- canonical name is `cluster`
- type: object
- required:
- - spec
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: spec holds user settable values for configuration
- type: object
- properties:
- customNoUpgrade:
- description: customNoUpgrade allows the enabling or disabling of any
- feature. Turning this feature set on IS NOT SUPPORTED, CANNOT BE UNDONE,
- and PREVENTS UPGRADES. Because of its nature, this setting cannot
- be validated. If you have any typos or accidentally apply invalid
- combinations your cluster may fail in an unrecoverable way. featureSet
- must equal "CustomNoUpgrade" must be set to use this field.
- type: object
- properties:
- disabled:
- description: disabled is a list of all feature gates that you want
- to force off
- type: array
- items:
- type: string
- enabled:
- description: enabled is a list of all feature gates that you want
- to force on
- type: array
- items:
- type: string
- nullable: true
- featureSet:
- description: featureSet changes the list of features in the cluster. The
- default is empty. Be very careful adjusting this setting. Turning
- on or off features may cause irreversible changes in your cluster
- which cannot be undone.
- type: string
- status:
- description: status holds observed values from the cluster. They may not
- be overridden.
- type: object
diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_image.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_image.crd.yaml
deleted file mode 100644
index a0fd48709..000000000
--- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_image.crd.yaml
+++ /dev/null
@@ -1,144 +0,0 @@
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
- name: images.config.openshift.io
-spec:
- group: config.openshift.io
- scope: Cluster
- preserveUnknownFields: false
- names:
- kind: Image
- singular: image
- plural: images
- listKind: ImageList
- versions:
- - name: v1
- served: true
- storage: true
- subresources:
- status: {}
- "validation":
- "openAPIV3Schema":
- description: Image governs policies related to imagestream imports and runtime
- configuration for external registries. It allows cluster admins to configure
- which registries OpenShift is allowed to import images from, extra CA trust
- bundles for external registries, and policies to blacklist/whitelist registry
- hostnames. When exposing OpenShift's image registry to the public, this also
- lets cluster admins specify the external hostname.
- type: object
- required:
- - spec
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: spec holds user settable values for configuration
- type: object
- properties:
- additionalTrustedCA:
- description: additionalTrustedCA is a reference to a ConfigMap containing
- additional CAs that should be trusted during imagestream import, pod
- image pull, build image pull, and imageregistry pullthrough. The namespace
- for this config map is openshift-config.
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced config
- map
- type: string
- allowedRegistriesForImport:
- description: allowedRegistriesForImport limits the container image registries
- that normal users may import images from. Set this list to the registries
- that you trust to contain valid Docker images and that you want applications
- to be able to import from. Users with permission to create Images
- or ImageStreamMappings via the API are not affected by this policy
- - typically only administrators or system integrations will have those
- permissions.
- type: array
- items:
- description: RegistryLocation contains a location of the registry
- specified by the registry domain name. The domain name might include
- wildcards, like '*' or '??'.
- type: object
- properties:
- domainName:
- description: domainName specifies a domain name for the registry
- In case the registry use non-standard (80 or 443) port, the
- port should be included in the domain name as well.
- type: string
- insecure:
- description: insecure indicates whether the registry is secure
- (https) or insecure (http) By default (if not specified) the
- registry is assumed as secure.
- type: boolean
- externalRegistryHostnames:
- description: externalRegistryHostnames provides the hostnames for the
- default external image registry. The external hostname should be set
- only when the image registry is exposed externally. The first value
- is used in 'publicDockerImageRepository' field in ImageStreams. The
- value must be in "hostname[:port]" format.
- type: array
- items:
- type: string
- registrySources:
- description: registrySources contains configuration that determines
- how the container runtime should treat individual registries when
- accessing images for builds+pods. (e.g. whether or not to allow insecure
- access). It does not contain configuration for the internal cluster
- registry.
- type: object
- properties:
- allowedRegistries:
- description: "allowedRegistries are whitelisted for image pull/push.
- All other registries are blocked. \n Only one of BlockedRegistries
- or AllowedRegistries may be set."
- type: array
- items:
- type: string
- blockedRegistries:
- description: "blockedRegistries are blacklisted from image pull/push.
- All other registries are allowed. \n Only one of BlockedRegistries
- or AllowedRegistries may be set."
- type: array
- items:
- type: string
- insecureRegistries:
- description: insecureRegistries are registries which do not have
- a valid TLS certificates or only support HTTP connections.
- type: array
- items:
- type: string
- status:
- description: status holds observed values from the cluster. They may not
- be overridden.
- type: object
- properties:
- externalRegistryHostnames:
- description: externalRegistryHostnames provides the hostnames for the
- default external image registry. The external hostname should be set
- only when the image registry is exposed externally. The first value
- is used in 'publicDockerImageRepository' field in ImageStreams. The
- value must be in "hostname[:port]" format.
- type: array
- items:
- type: string
- internalRegistryHostname:
- description: internalRegistryHostname sets the hostname for the default
- internal image registry. The value must be in "hostname[:port]" format.
- This value is set by the image registry operator which controls the
- internal registry hostname. For backward compatibility, users can
- still use OPENSHIFT_DEFAULT_REGISTRY environment variable but this
- setting overrides the environment variable.
- type: string
diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure.crd.yaml
deleted file mode 100644
index 2aba542da..000000000
--- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure.crd.yaml
+++ /dev/null
@@ -1,221 +0,0 @@
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
- name: infrastructures.config.openshift.io
-spec:
- group: config.openshift.io
- names:
- kind: Infrastructure
- listKind: InfrastructureList
- plural: infrastructures
- singular: infrastructure
- scope: Cluster
- preserveUnknownFields: false
- versions:
- - name: v1
- served: true
- storage: true
- "validation":
- "openAPIV3Schema":
- description: Infrastructure holds cluster-wide information about Infrastructure. The
- canonical name is `cluster`
- type: object
- required:
- - spec
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: spec holds user settable values for configuration
- type: object
- properties:
- cloudConfig:
- description: cloudConfig is a reference to a ConfigMap containing the
- cloud provider configuration file. This configuration file is used
- to configure the Kubernetes cloud provider integration when using
- the built-in cloud provider integration or the external cloud controller
- manager. The namespace for this config map is openshift-config.
- type: object
- properties:
- key:
- description: Key allows pointing to a specific key/value inside
- of the configmap. This is useful for logical file references.
- type: string
- name:
- type: string
- status:
- description: status holds observed values from the cluster. They may not
- be overridden.
- type: object
- properties:
- apiServerInternalURI:
- description: apiServerInternalURL is a valid URI with scheme(http/https),
- address and port. apiServerInternalURL can be used by components
- like kubelets, to contact the Kubernetes API server using the infrastructure
- provider rather than Kubernetes networking.
- type: string
- apiServerURL:
- description: apiServerURL is a valid URI with scheme(http/https), address
- and port. apiServerURL can be used by components like the web console
- to tell users where to find the Kubernetes API.
- type: string
- etcdDiscoveryDomain:
- description: 'etcdDiscoveryDomain is the domain used to fetch the SRV
- records for discovering etcd servers and clients. For more info: https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery'
- type: string
- infrastructureName:
- description: infrastructureName uniquely identifies a cluster with a
- human friendly name. Once set it should not be changed. Must be of
- max length 27 and must have only alphanumeric or hyphen characters.
- type: string
- platform:
- description: "platform is the underlying infrastructure provider for
- the cluster. \n Deprecated: Use platformStatus.type instead."
- type: string
- platformStatus:
- description: platformStatus holds status information specific to the
- underlying infrastructure provider.
- type: object
- properties:
- aws:
- description: AWS contains settings specific to the Amazon Web Services
- infrastructure provider.
- type: object
- properties:
- region:
- description: region holds the default AWS region for new AWS
- resources created by the cluster.
- type: string
- azure:
- description: Azure contains settings specific to the Azure infrastructure
- provider.
- type: object
- properties:
- networkResourceGroupName:
- description: networkResourceGroupName is the Resource Group
- for network resources like the Virtual Network and Subnets
- used by the cluster. If empty, the value is same as ResourceGroupName.
- type: string
- resourceGroupName:
- description: resourceGroupName is the Resource Group for new
- Azure resources created for the cluster.
- type: string
- baremetal:
- description: BareMetal contains settings specific to the BareMetal
- platform.
- type: object
- properties:
- apiServerInternalIP:
- description: apiServerInternalIP is an IP address to contact
- the Kubernetes API server that can be used by components inside
- the cluster, like kubelets using the infrastructure rather
- than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI
- points to. It is the IP for a self-hosted load balancer in
- front of the API servers.
- type: string
- ingressIP:
- description: ingressIP is an external IP which routes to the
- default ingress controller. The IP is a suitable target of
- a wildcard DNS record used to resolve default route host names.
- type: string
- nodeDNSIP:
- description: nodeDNSIP is the IP address for the internal DNS
- used by the nodes. Unlike the one managed by the DNS operator,
- `NodeDNSIP` provides name resolution for the nodes themselves.
- There is no DNS-as-a-service for BareMetal deployments. In
- order to minimize necessary changes to the datacenter DNS,
- a DNS service is hosted as a static pod to serve those hostnames
- to the nodes in the cluster.
- type: string
- gcp:
- description: GCP contains settings specific to the Google Cloud
- Platform infrastructure provider.
- type: object
- properties:
- projectID:
- description: resourceGroupName is the Project ID for new GCP
- resources created for the cluster.
- type: string
- region:
- description: region holds the region for new GCP resources created
- for the cluster.
- type: string
- openstack:
- description: OpenStack contains settings specific to the OpenStack
- infrastructure provider.
- type: object
- properties:
- apiServerInternalIP:
- description: apiServerInternalIP is an IP address to contact
- the Kubernetes API server that can be used by components inside
- the cluster, like kubelets using the infrastructure rather
- than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI
- points to. It is the IP for a self-hosted load balancer in
- front of the API servers.
- type: string
- cloudName:
- description: cloudName is the name of the desired OpenStack
- cloud in the client configuration file (`clouds.yaml`).
- type: string
- ingressIP:
- description: ingressIP is an external IP which routes to the
- default ingress controller. The IP is a suitable target of
- a wildcard DNS record used to resolve default route host names.
- type: string
- nodeDNSIP:
- description: nodeDNSIP is the IP address for the internal DNS
- used by the nodes. Unlike the one managed by the DNS operator,
- `NodeDNSIP` provides name resolution for the nodes themselves.
- There is no DNS-as-a-service for OpenStack deployments. In
- order to minimize necessary changes to the datacenter DNS,
- a DNS service is hosted as a static pod to serve those hostnames
- to the nodes in the cluster.
- type: string
- ovirt:
- description: Ovirt contains settings specific to the oVirt infrastructure
- provider.
- type: object
- properties:
- apiServerInternalIP:
- description: apiServerInternalIP is an IP address to contact
- the Kubernetes API server that can be used by components inside
- the cluster, like kubelets using the infrastructure rather
- than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI
- points to. It is the IP for a self-hosted load balancer in
- front of the API servers.
- type: string
- ingressIP:
- description: ingressIP is an external IP which routes to the
- default ingress controller. The IP is a suitable target of
- a wildcard DNS record used to resolve default route host names.
- type: string
- nodeDNSIP:
- description: nodeDNSIP is the IP address for the internal DNS
- used by the nodes. Unlike the one managed by the DNS operator,
- `NodeDNSIP` provides name resolution for the nodes themselves.
- There is no DNS-as-a-service for oVirt deployments. In order
- to minimize necessary changes to the datacenter DNS, a DNS
- service is hosted as a static pod to serve those hostnames
- to the nodes in the cluster.
- type: string
- type:
- description: type is the underlying infrastructure provider for
- the cluster. This value controls whether infrastructure automation
- such as service load balancers, dynamic volume provisioning, machine
- creation and deletion, and other integrations are enabled. If
- None, no infrastructure automation is enabled. Allowed values
- are "AWS", "Azure", "BareMetal", "GCP", "Libvirt", "OpenStack",
- "VSphere", "oVirt", and "None". Individual components may not
- support all platforms, and must handle unrecognized platforms
- as None if they do not support that platform.
- type: string
diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_ingress.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_ingress.crd.yaml
deleted file mode 100644
index ada440425..000000000
--- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_ingress.crd.yaml
+++ /dev/null
@@ -1,55 +0,0 @@
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
- name: ingresses.config.openshift.io
-spec:
- group: config.openshift.io
- names:
- kind: Ingress
- listKind: IngressList
- plural: ingresses
- singular: ingress
- scope: Cluster
- preserveUnknownFields: false
- versions:
- - name: v1
- served: true
- storage: true
- subresources:
- status: {}
- "validation":
- "openAPIV3Schema":
- description: Ingress holds cluster-wide information about ingress, including
- the default ingress domain used for routes. The canonical name is `cluster`.
- type: object
- required:
- - spec
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: spec holds user settable values for configuration
- type: object
- properties:
- domain:
- description: "domain is used to generate a default host name for a route
- when the route's host name is empty. The generated host name will
- follow this pattern: \"<route-name>.<route-namespace>.<domain>\".
- \n It is also used as the default wildcard domain suffix for ingress.
- The default ingresscontroller domain will follow this pattern: \"*.<domain>\".
- \n Once set, changing domain is not currently supported."
- type: string
- status:
- description: status holds observed values from the cluster. They may not
- be overridden.
- type: object
diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network.crd.yaml
deleted file mode 100644
index bc3b62a87..000000000
--- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network.crd.yaml
+++ /dev/null
@@ -1,141 +0,0 @@
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
- name: networks.config.openshift.io
-spec:
- group: config.openshift.io
- names:
- kind: Network
- listKind: NetworkList
- plural: networks
- singular: network
- scope: Cluster
- preserveUnknownFields: false
- versions:
- - name: v1
- served: true
- storage: true
- "validation":
- "openAPIV3Schema":
- description: 'Network holds cluster-wide information about Network. The canonical
- name is `cluster`. It is used to configure the desired network configuration,
- such as: IP address pools for services/pod IPs, network plugin, etc. Please
- view network.spec for an explanation on what applies when configuring this
- resource.'
- type: object
- required:
- - spec
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: spec holds user settable values for configuration. As a general
- rule, this SHOULD NOT be read directly. Instead, you should consume the
- NetworkStatus, as it indicates the currently deployed configuration. Currently,
- most spec fields are immutable after installation. Please view the individual
- ones for further details on each.
- type: object
- properties:
- clusterNetwork:
- description: IP address pool to use for pod IPs. This field is immutable
- after installation.
- type: array
- items:
- description: ClusterNetworkEntry is a contiguous block of IP addresses
- from which pod IPs are allocated.
- type: object
- properties:
- cidr:
- description: The complete block for pod IPs.
- type: string
- hostPrefix:
- description: The size (prefix) of block to allocate to each node.
- type: integer
- format: int32
- minimum: 0
- externalIP:
- description: externalIP defines configuration for controllers that affect
- Service.ExternalIP. If nil, then ExternalIP is not allowed to be set.
- type: object
- properties:
- autoAssignCIDRs:
- description: autoAssignCIDRs is a list of CIDRs from which to automatically
- assign Service.ExternalIP. These are assigned when the service
- is of type LoadBalancer. In general, this is only useful for bare-metal
- clusters. In Openshift 3.x, this was misleadingly called "IngressIPs".
- Automatically assigned External IPs are not affected by any ExternalIPPolicy
- rules. Currently, only one entry may be provided.
- type: array
- items:
- type: string
- policy:
- description: policy is a set of restrictions applied to the ExternalIP
- field. If nil or empty, then ExternalIP is not allowed to be set.
- type: object
- properties:
- allowedCIDRs:
- description: allowedCIDRs is the list of allowed CIDRs.
- type: array
- items:
- type: string
- rejectedCIDRs:
- description: rejectedCIDRs is the list of disallowed CIDRs.
- These take precedence over allowedCIDRs.
- type: array
- items:
- type: string
- networkType:
- description: 'NetworkType is the plugin that is to be deployed (e.g.
- OpenShiftSDN). This should match a value that the cluster-network-operator
- understands, or else no networking will be installed. Currently supported
- values are: - OpenShiftSDN This field is immutable after installation.'
- type: string
- serviceNetwork:
- description: IP address pool for services. Currently, we only support
- a single entry here. This field is immutable after installation.
- type: array
- items:
- type: string
- status:
- description: status holds observed values from the cluster. They may not
- be overridden.
- type: object
- properties:
- clusterNetwork:
- description: IP address pool to use for pod IPs.
- type: array
- items:
- description: ClusterNetworkEntry is a contiguous block of IP addresses
- from which pod IPs are allocated.
- type: object
- properties:
- cidr:
- description: The complete block for pod IPs.
- type: string
- hostPrefix:
- description: The size (prefix) of block to allocate to each node.
- type: integer
- format: int32
- minimum: 0
- clusterNetworkMTU:
- description: ClusterNetworkMTU is the MTU for inter-pod networking.
- type: integer
- networkType:
- description: NetworkType is the plugin that is deployed (e.g. OpenShiftSDN).
- type: string
- serviceNetwork:
- description: IP address pool for services. Currently, we only support
- a single entry here.
- type: array
- items:
- type: string
diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_oauth.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_oauth.crd.yaml
deleted file mode 100644
index fd763d047..000000000
--- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_oauth.crd.yaml
+++ /dev/null
@@ -1,661 +0,0 @@
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
- name: oauths.config.openshift.io
-spec:
- group: config.openshift.io
- names:
- kind: OAuth
- listKind: OAuthList
- plural: oauths
- singular: oauth
- scope: Cluster
- preserveUnknownFields: false
- subresources:
- status: {}
- versions:
- - name: v1
- served: true
- storage: true
- "validation":
- "openAPIV3Schema":
- description: OAuth holds cluster-wide information about OAuth. The canonical
- name is `cluster`. It is used to configure the integrated OAuth server. This
- configuration is only honored when the top level Authentication config has
- type set to IntegratedOAuth.
- type: object
- required:
- - spec
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: OAuthSpec contains desired cluster auth configuration
- type: object
- properties:
- identityProviders:
- description: identityProviders is an ordered list of ways for a user
- to identify themselves. When this list is empty, no identities are
- provisioned for users.
- type: array
- items:
- description: IdentityProvider provides identities for users authenticating
- using credentials
- type: object
- properties:
- basicAuth:
- description: basicAuth contains configuration options for the
- BasicAuth IdP
- type: object
- properties:
- ca:
- description: ca is an optional reference to a config map by
- name containing the PEM-encoded CA bundle. It is used as
- a trust anchor to validate the TLS certificate presented
- by the remote server. The key "ca.crt" is used to locate
- the data. If specified and the config map or expected key
- is not found, the identity provider is not honored. If the
- specified ca data is not valid, the identity provider is
- not honored. If empty, the default system roots are used.
- The namespace for this config map is openshift-config.
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced
- config map
- type: string
- tlsClientCert:
- description: tlsClientCert is an optional reference to a secret
- by name that contains the PEM-encoded TLS client certificate
- to present when connecting to the server. The key "tls.crt"
- is used to locate the data. If specified and the secret
- or expected key is not found, the identity provider is not
- honored. If the specified certificate data is not valid,
- the identity provider is not honored. The namespace for
- this secret is openshift-config.
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced
- secret
- type: string
- tlsClientKey:
- description: tlsClientKey is an optional reference to a secret
- by name that contains the PEM-encoded TLS private key for
- the client certificate referenced in tlsClientCert. The
- key "tls.key" is used to locate the data. If specified and
- the secret or expected key is not found, the identity provider
- is not honored. If the specified certificate data is not
- valid, the identity provider is not honored. The namespace
- for this secret is openshift-config.
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced
- secret
- type: string
- url:
- description: url is the remote URL to connect to
- type: string
- github:
- description: github enables user authentication using GitHub credentials
- type: object
- properties:
- ca:
- description: ca is an optional reference to a config map by
- name containing the PEM-encoded CA bundle. It is used as
- a trust anchor to validate the TLS certificate presented
- by the remote server. The key "ca.crt" is used to locate
- the data. If specified and the config map or expected key
- is not found, the identity provider is not honored. If the
- specified ca data is not valid, the identity provider is
- not honored. If empty, the default system roots are used.
- This can only be configured when hostname is set to a non-empty
- value. The namespace for this config map is openshift-config.
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced
- config map
- type: string
- clientID:
- description: clientID is the oauth client ID
- type: string
- clientSecret:
- description: clientSecret is a required reference to the secret
- by name containing the oauth client secret. The key "clientSecret"
- is used to locate the data. If the secret or expected key
- is not found, the identity provider is not honored. The
- namespace for this secret is openshift-config.
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced
- secret
- type: string
- hostname:
- description: hostname is the optional domain (e.g. "mycompany.com")
- for use with a hosted instance of GitHub Enterprise. It
- must match the GitHub Enterprise settings value configured
- at /setup/settings#hostname.
- type: string
- organizations:
- description: organizations optionally restricts which organizations
- are allowed to log in
- type: array
- items:
- type: string
- teams:
- description: teams optionally restricts which teams are allowed
- to log in. Format is <org>/<team>.
- type: array
- items:
- type: string
- gitlab:
- description: gitlab enables user authentication using GitLab credentials
- type: object
- properties:
- ca:
- description: ca is an optional reference to a config map by
- name containing the PEM-encoded CA bundle. It is used as
- a trust anchor to validate the TLS certificate presented
- by the remote server. The key "ca.crt" is used to locate
- the data. If specified and the config map or expected key
- is not found, the identity provider is not honored. If the
- specified ca data is not valid, the identity provider is
- not honored. If empty, the default system roots are used.
- The namespace for this config map is openshift-config.
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced
- config map
- type: string
- clientID:
- description: clientID is the oauth client ID
- type: string
- clientSecret:
- description: clientSecret is a required reference to the secret
- by name containing the oauth client secret. The key "clientSecret"
- is used to locate the data. If the secret or expected key
- is not found, the identity provider is not honored. The
- namespace for this secret is openshift-config.
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced
- secret
- type: string
- url:
- description: url is the oauth server base URL
- type: string
- google:
- description: google enables user authentication using Google credentials
- type: object
- properties:
- clientID:
- description: clientID is the oauth client ID
- type: string
- clientSecret:
- description: clientSecret is a required reference to the secret
- by name containing the oauth client secret. The key "clientSecret"
- is used to locate the data. If the secret or expected key
- is not found, the identity provider is not honored. The
- namespace for this secret is openshift-config.
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced
- secret
- type: string
- hostedDomain:
- description: hostedDomain is the optional Google App domain
- (e.g. "mycompany.com") to restrict logins to
- type: string
- htpasswd:
- description: htpasswd enables user authentication using an HTPasswd
- file to validate credentials
- type: object
- properties:
- fileData:
- description: fileData is a required reference to a secret
- by name containing the data to use as the htpasswd file.
- The key "htpasswd" is used to locate the data. If the secret
- or expected key is not found, the identity provider is not
- honored. If the specified htpasswd data is not valid, the
- identity provider is not honored. The namespace for this
- secret is openshift-config.
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced
- secret
- type: string
- keystone:
- description: keystone enables user authentication using keystone
- password credentials
- type: object
- properties:
- ca:
- description: ca is an optional reference to a config map by
- name containing the PEM-encoded CA bundle. It is used as
- a trust anchor to validate the TLS certificate presented
- by the remote server. The key "ca.crt" is used to locate
- the data. If specified and the config map or expected key
- is not found, the identity provider is not honored. If the
- specified ca data is not valid, the identity provider is
- not honored. If empty, the default system roots are used.
- The namespace for this config map is openshift-config.
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced
- config map
- type: string
- domainName:
- description: domainName is required for keystone v3
- type: string
- tlsClientCert:
- description: tlsClientCert is an optional reference to a secret
- by name that contains the PEM-encoded TLS client certificate
- to present when connecting to the server. The key "tls.crt"
- is used to locate the data. If specified and the secret
- or expected key is not found, the identity provider is not
- honored. If the specified certificate data is not valid,
- the identity provider is not honored. The namespace for
- this secret is openshift-config.
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced
- secret
- type: string
- tlsClientKey:
- description: tlsClientKey is an optional reference to a secret
- by name that contains the PEM-encoded TLS private key for
- the client certificate referenced in tlsClientCert. The
- key "tls.key" is used to locate the data. If specified and
- the secret or expected key is not found, the identity provider
- is not honored. If the specified certificate data is not
- valid, the identity provider is not honored. The namespace
- for this secret is openshift-config.
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced
- secret
- type: string
- url:
- description: url is the remote URL to connect to
- type: string
- ldap:
- description: ldap enables user authentication using LDAP credentials
- type: object
- properties:
- attributes:
- description: attributes maps LDAP attributes to identities
- type: object
- properties:
- email:
- description: email is the list of attributes whose values
- should be used as the email address. Optional. If unspecified,
- no email is set for the identity
- type: array
- items:
- type: string
- id:
- description: id is the list of attributes whose values
- should be used as the user ID. Required. First non-empty
- attribute is used. At least one attribute is required.
- If none of the listed attribute have a value, authentication
- fails. LDAP standard identity attribute is "dn"
- type: array
- items:
- type: string
- name:
- description: name is the list of attributes whose values
- should be used as the display name. Optional. If unspecified,
- no display name is set for the identity LDAP standard
- display name attribute is "cn"
- type: array
- items:
- type: string
- preferredUsername:
- description: preferredUsername is the list of attributes
- whose values should be used as the preferred username.
- LDAP standard login attribute is "uid"
- type: array
- items:
- type: string
- bindDN:
- description: bindDN is an optional DN to bind with during
- the search phase.
- type: string
- bindPassword:
- description: bindPassword is an optional reference to a secret
- by name containing a password to bind with during the search
- phase. The key "bindPassword" is used to locate the data.
- If specified and the secret or expected key is not found,
- the identity provider is not honored. The namespace for
- this secret is openshift-config.
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced
- secret
- type: string
- ca:
- description: ca is an optional reference to a config map by
- name containing the PEM-encoded CA bundle. It is used as
- a trust anchor to validate the TLS certificate presented
- by the remote server. The key "ca.crt" is used to locate
- the data. If specified and the config map or expected key
- is not found, the identity provider is not honored. If the
- specified ca data is not valid, the identity provider is
- not honored. If empty, the default system roots are used.
- The namespace for this config map is openshift-config.
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced
- config map
- type: string
- insecure:
- description: 'insecure, if true, indicates the connection
- should not use TLS WARNING: Should not be set to `true`
- with the URL scheme "ldaps://" as "ldaps://" URLs always attempt
- to connect using TLS, even when `insecure` is set to `true`
- When `true`, "ldap://" URLS connect insecurely. When `false`,
- "ldap://" URLs are upgraded to a TLS connection using StartTLS
- as specified in https://tools.ietf.org/html/rfc2830.'
- type: boolean
- url:
- description: 'url is an RFC 2255 URL which specifies the LDAP
- search parameters to use. The syntax of the URL is: ldap://host:port/basedn?attribute?scope?filter'
- type: string
- mappingMethod:
- description: mappingMethod determines how identities from this
- provider are mapped to users Defaults to "claim"
- type: string
- name:
- description: 'name is used to qualify the identities returned
- by this provider. - It MUST be unique and not shared by any
- other identity provider used - It MUST be a valid path segment:
- name cannot equal "." or ".." or contain "/" or "%" or ":" Ref:
- https://godoc.org/github.com/openshift/origin/pkg/user/apis/user/validation#ValidateIdentityProviderName'
- type: string
- openID:
- description: openID enables user authentication using OpenID credentials
- type: object
- properties:
- ca:
- description: ca is an optional reference to a config map by
- name containing the PEM-encoded CA bundle. It is used as
- a trust anchor to validate the TLS certificate presented
- by the remote server. The key "ca.crt" is used to locate
- the data. If specified and the config map or expected key
- is not found, the identity provider is not honored. If the
- specified ca data is not valid, the identity provider is
- not honored. If empty, the default system roots are used.
- The namespace for this config map is openshift-config.
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced
- config map
- type: string
- claims:
- description: claims mappings
- type: object
- properties:
- email:
- description: email is the list of claims whose values
- should be used as the email address. Optional. If unspecified,
- no email is set for the identity
- type: array
- items:
- type: string
- name:
- description: name is the list of claims whose values should
- be used as the display name. Optional. If unspecified,
- no display name is set for the identity
- type: array
- items:
- type: string
- preferredUsername:
- description: preferredUsername is the list of claims whose
- values should be used as the preferred username. If
- unspecified, the preferred username is determined from
- the value of the sub claim
- type: array
- items:
- type: string
- clientID:
- description: clientID is the oauth client ID
- type: string
- clientSecret:
- description: clientSecret is a required reference to the secret
- by name containing the oauth client secret. The key "clientSecret"
- is used to locate the data. If the secret or expected key
- is not found, the identity provider is not honored. The
- namespace for this secret is openshift-config.
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced
- secret
- type: string
- extraAuthorizeParameters:
- description: extraAuthorizeParameters are any custom parameters
- to add to the authorize request.
- type: object
- additionalProperties:
- type: string
- extraScopes:
- description: extraScopes are any scopes to request in addition
- to the standard "openid" scope.
- type: array
- items:
- type: string
- issuer:
- description: issuer is the URL that the OpenID Provider asserts
- as its Issuer Identifier. It must use the https scheme with
- no query or fragment component.
- type: string
- requestHeader:
- description: requestHeader enables user authentication using request
- header credentials
- type: object
- properties:
- ca:
- description: ca is a required reference to a config map by
- name containing the PEM-encoded CA bundle. It is used as
- a trust anchor to validate the TLS certificate presented
- by the remote server. Specifically, it allows verification
- of incoming requests to prevent header spoofing. The key
- "ca.crt" is used to locate the data. If the config map or
- expected key is not found, the identity provider is not
- honored. If the specified ca data is not valid, the identity
- provider is not honored. The namespace for this config map
- is openshift-config.
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced
- config map
- type: string
- challengeURL:
- description: challengeURL is a URL to redirect unauthenticated
- /authorize requests to Unauthenticated requests from OAuth
- clients which expect WWW-Authenticate challenges will be
- redirected here. ${url} is replaced with the current URL,
- escaped to be safe in a query parameter https://www.example.com/sso-login?then=${url}
- ${query} is replaced with the current query string https://www.example.com/auth-proxy/oauth/authorize?${query}
- Required when challenge is set to true.
- type: string
- clientCommonNames:
- description: clientCommonNames is an optional list of common
- names to require a match from. If empty, any client certificate
- validated against the clientCA bundle is considered authoritative.
- type: array
- items:
- type: string
- emailHeaders:
- description: emailHeaders is the set of headers to check for
- the email address
- type: array
- items:
- type: string
- headers:
- description: headers is the set of headers to check for identity
- information
- type: array
- items:
- type: string
- loginURL:
- description: loginURL is a URL to redirect unauthenticated
- /authorize requests to Unauthenticated requests from OAuth
- clients which expect interactive logins will be redirected
- here ${url} is replaced with the current URL, escaped to
- be safe in a query parameter https://www.example.com/sso-login?then=${url}
- ${query} is replaced with the current query string https://www.example.com/auth-proxy/oauth/authorize?${query}
- Required when login is set to true.
- type: string
- nameHeaders:
- description: nameHeaders is the set of headers to check for
- the display name
- type: array
- items:
- type: string
- preferredUsernameHeaders:
- description: preferredUsernameHeaders is the set of headers
- to check for the preferred username
- type: array
- items:
- type: string
- type:
- description: type identifies the identity provider type for this
- entry.
- type: string
- templates:
- description: templates allow you to customize pages like the login page.
- type: object
- properties:
- error:
- description: error is the name of a secret that specifies a go template
- to use to render error pages during the authentication or grant
- flow. The key "errors.html" is used to locate the template data.
- If specified and the secret or expected key is not found, the
- default error page is used. If the specified template is not valid,
- the default error page is used. If unspecified, the default error
- page is used. The namespace for this secret is openshift-config.
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced secret
- type: string
- login:
- description: login is the name of a secret that specifies a go template
- to use to render the login page. The key "login.html" is used
- to locate the template data. If specified and the secret or expected
- key is not found, the default login page is used. If the specified
- template is not valid, the default login page is used. If unspecified,
- the default login page is used. The namespace for this secret
- is openshift-config.
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced secret
- type: string
- providerSelection:
- description: providerSelection is the name of a secret that specifies
- a go template to use to render the provider selection page. The
- key "providers.html" is used to locate the template data. If specified
- and the secret or expected key is not found, the default provider
- selection page is used. If the specified template is not valid,
- the default provider selection page is used. If unspecified, the
- default provider selection page is used. The namespace for this
- secret is openshift-config.
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced secret
- type: string
- tokenConfig:
- description: tokenConfig contains options for authorization and access
- tokens
- type: object
- properties:
- accessTokenInactivityTimeoutSeconds:
- description: 'accessTokenInactivityTimeoutSeconds defines the default
- token inactivity timeout for tokens granted by any client. The
- value represents the maximum amount of time that can occur between
- consecutive uses of the token. Tokens become invalid if they are
- not used within this temporal window. The user will need to acquire
- a new token to regain access once a token times out. Valid values
- are integer values: x < 0 Tokens time out is enabled but tokens
- never timeout unless configured per client (e.g. `-1`) x = 0 Tokens
- time out is disabled (default) x > 0 Tokens time out if there
- is no activity for x seconds The current minimum allowed value
- for X is 300 (5 minutes)'
- type: integer
- format: int32
- accessTokenMaxAgeSeconds:
- description: accessTokenMaxAgeSeconds defines the maximum age of
- access tokens
- type: integer
- format: int32
- status:
- description: OAuthStatus shows current known state of OAuth server in the
- cluster
- type: object
diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_project.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_project.crd.yaml
deleted file mode 100644
index a625aa617..000000000
--- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_project.crd.yaml
+++ /dev/null
@@ -1,63 +0,0 @@
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
- name: projects.config.openshift.io
-spec:
- group: config.openshift.io
- scope: Cluster
- preserveUnknownFields: false
- versions:
- - name: v1
- served: true
- storage: true
- names:
- kind: Project
- listKind: ProjectList
- plural: projects
- singular: project
- subresources:
- status: {}
- "validation":
- "openAPIV3Schema":
- description: Project holds cluster-wide information about Project. The canonical
- name is `cluster`
- type: object
- required:
- - spec
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: spec holds user settable values for configuration
- type: object
- properties:
- projectRequestMessage:
- description: projectRequestMessage is the string presented to a user
- if they are unable to request a project via the projectrequest api
- endpoint
- type: string
- projectRequestTemplate:
- description: projectRequestTemplate is the template to use for creating
- projects in response to projectrequest. This must point to a template
- in 'openshift-config' namespace. It is optional. If it is not specified,
- a default template is used.
- type: object
- properties:
- name:
- description: name is the metadata.name of the referenced project
- request template
- type: string
- status:
- description: status holds observed values from the cluster. They may not
- be overridden.
- type: object
diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler.crd.yaml
deleted file mode 100644
index 6f5336c8f..000000000
--- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler.crd.yaml
+++ /dev/null
@@ -1,88 +0,0 @@
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
- name: schedulers.config.openshift.io
-spec:
- group: config.openshift.io
- scope: Cluster
- preserveUnknownFields: false
- names:
- kind: Scheduler
- singular: scheduler
- plural: schedulers
- listKind: SchedulerList
- versions:
- - name: v1
- served: true
- storage: true
- subresources:
- status: {}
- "validation":
- "openAPIV3Schema":
- description: Scheduler holds cluster-wide config information to run the Kubernetes
- Scheduler and influence its placement decisions. The canonical name for this
- config is `cluster`.
- type: object
- required:
- - spec
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: spec holds user settable values for configuration
- type: object
- properties:
- defaultNodeSelector:
- description: 'defaultNodeSelector helps set the cluster-wide default
- node selector to restrict pod placement to specific nodes. This is
- applied to the pods created in all namespaces without a specified
- nodeSelector value. For example, defaultNodeSelector: "type=user-node,region=east"
- would set nodeSelector field in pod spec to "type=user-node,region=east"
- to all pods created in all namespaces. Namespaces having project-wide
- node selectors won''t be impacted even if this field is set. This
- adds an annotation section to the namespace. For example, if a new
- namespace is created with node-selector=''type=user-node,region=east'',
- the annotation openshift.io/node-selector: type=user-node,region=east
- gets added to the project. When the openshift.io/node-selector annotation
- is set on the project the value is used in preference to the value
- we are setting for defaultNodeSelector field. For instance, openshift.io/node-selector:
- "type=user-node,region=west" means that the default of "type=user-node,region=east"
- set in defaultNodeSelector would not be applied.'
- type: string
- mastersSchedulable:
- description: 'MastersSchedulable allows masters nodes to be schedulable.
- When this flag is turned on, all the master nodes in the cluster will
- be made schedulable, so that workload pods can run on them. The default
- value for this field is false, meaning none of the master nodes are
- schedulable. Important Note: Once the workload pods start running
- on the master nodes, extreme care must be taken to ensure that cluster-critical
- control plane components are not impacted. Please turn on this field
- after doing due diligence.'
- type: boolean
- policy:
- description: policy is a reference to a ConfigMap containing scheduler
- policy which has user specified predicates and priorities. If this
- ConfigMap is not available scheduler will default to use DefaultAlgorithmProvider.
- The namespace for this configmap is openshift-config.
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced config
- map
- type: string
- status:
- description: status holds observed values from the cluster. They may not
- be overridden.
- type: object
diff --git a/vendor/github.com/openshift/api/config/v1/doc.go b/vendor/github.com/openshift/api/config/v1/doc.go
deleted file mode 100644
index 4ff5208f2..000000000
--- a/vendor/github.com/openshift/api/config/v1/doc.go
+++ /dev/null
@@ -1,8 +0,0 @@
-// +k8s:deepcopy-gen=package,register
-// +k8s:defaulter-gen=TypeMeta
-// +k8s:openapi-gen=true
-
-// +kubebuilder:validation:Optional
-// +groupName=config.openshift.io
-// Package v1 is the v1 version of the API.
-package v1
diff --git a/vendor/github.com/openshift/api/config/v1/register.go b/vendor/github.com/openshift/api/config/v1/register.go
deleted file mode 100644
index 35eace370..000000000
--- a/vendor/github.com/openshift/api/config/v1/register.go
+++ /dev/null
@@ -1,70 +0,0 @@
-package v1
-
-import (
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/schema"
-)
-
-var (
- GroupName = "config.openshift.io"
- GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
- schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
- // Install is a function which adds this version to a scheme
- Install = schemeBuilder.AddToScheme
-
- // SchemeGroupVersion generated code relies on this name
- // Deprecated
- SchemeGroupVersion = GroupVersion
- // AddToScheme exists solely to keep the old generators creating valid code
- // DEPRECATED
- AddToScheme = schemeBuilder.AddToScheme
-)
-
-// Resource generated code relies on this being here, but it logically belongs to the group
-// DEPRECATED
-func Resource(resource string) schema.GroupResource {
- return schema.GroupResource{Group: GroupName, Resource: resource}
-}
-
-// Adds the list of known types to api.Scheme.
-func addKnownTypes(scheme *runtime.Scheme) error {
- scheme.AddKnownTypes(GroupVersion,
- &APIServer{},
- &APIServerList{},
- &Authentication{},
- &AuthenticationList{},
- &Build{},
- &BuildList{},
- &ClusterOperator{},
- &ClusterOperatorList{},
- &ClusterVersion{},
- &ClusterVersionList{},
- &Console{},
- &ConsoleList{},
- &DNS{},
- &DNSList{},
- &FeatureGate{},
- &FeatureGateList{},
- &Image{},
- &ImageList{},
- &Infrastructure{},
- &InfrastructureList{},
- &Ingress{},
- &IngressList{},
- &Network{},
- &NetworkList{},
- &OAuth{},
- &OAuthList{},
- &OperatorHub{},
- &OperatorHubList{},
- &Project{},
- &ProjectList{},
- &Proxy{},
- &ProxyList{},
- &Scheduler{},
- &SchedulerList{},
- )
- metav1.AddToGroupVersion(scheme, GroupVersion)
- return nil
-}
diff --git a/vendor/github.com/openshift/api/config/v1/stringsource.go b/vendor/github.com/openshift/api/config/v1/stringsource.go
deleted file mode 100644
index 6a5718c1d..000000000
--- a/vendor/github.com/openshift/api/config/v1/stringsource.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package v1
-
-import "encoding/json"
-
-// UnmarshalJSON implements the json.Unmarshaller interface.
-// If the value is a string, it sets the Value field of the StringSource.
-// Otherwise, it is unmarshaled into the StringSourceSpec struct
-func (s *StringSource) UnmarshalJSON(value []byte) error {
- // If we can unmarshal to a simple string, just set the value
- var simpleValue string
- if err := json.Unmarshal(value, &simpleValue); err == nil {
- s.Value = simpleValue
- return nil
- }
-
- // Otherwise do the full struct unmarshal
- return json.Unmarshal(value, &s.StringSourceSpec)
-}
-
-// MarshalJSON implements the json.Marshaller interface.
-// If the StringSource contains only a string Value (or is empty), it is marshaled as a JSON string.
-// Otherwise, the StringSourceSpec struct is marshaled as a JSON object.
-func (s *StringSource) MarshalJSON() ([]byte, error) {
- // If we have only a cleartext value set, do a simple string marshal
- if s.StringSourceSpec == (StringSourceSpec{Value: s.Value}) {
- return json.Marshal(s.Value)
- }
-
- // Otherwise do the full struct marshal of the externalized bits
- return json.Marshal(s.StringSourceSpec)
-}
diff --git a/vendor/github.com/openshift/api/config/v1/types.go b/vendor/github.com/openshift/api/config/v1/types.go
deleted file mode 100644
index 142748423..000000000
--- a/vendor/github.com/openshift/api/config/v1/types.go
+++ /dev/null
@@ -1,312 +0,0 @@
-package v1
-
-import (
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
-)
-
-// ConfigMapFileReference references a config map in a specific namespace.
-// The namespace must be specified at the point of use.
-type ConfigMapFileReference struct {
- Name string `json:"name"`
- // Key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references.
- Key string `json:"key,omitempty"`
-}
-
-// ConfigMapNameReference references a config map in a specific namespace.
-// The namespace must be specified at the point of use.
-type ConfigMapNameReference struct {
- // name is the metadata.name of the referenced config map
- // +kubebuilder:validation:Required
- // +required
- Name string `json:"name"`
-}
-
-// SecretNameReference references a secret in a specific namespace.
-// The namespace must be specified at the point of use.
-type SecretNameReference struct {
- // name is the metadata.name of the referenced secret
- // +kubebuilder:validation:Required
- // +required
- Name string `json:"name"`
-}
-
-// HTTPServingInfo holds configuration for serving HTTP
-type HTTPServingInfo struct {
- // ServingInfo is the HTTP serving information
- ServingInfo `json:",inline"`
- // MaxRequestsInFlight is the number of concurrent requests allowed to the server. If zero, no limit.
- MaxRequestsInFlight int64 `json:"maxRequestsInFlight"`
- // RequestTimeoutSeconds is the number of seconds before requests are timed out. The default is 60 minutes, if
- // -1 there is no limit on requests.
- RequestTimeoutSeconds int64 `json:"requestTimeoutSeconds"`
-}
-
-// ServingInfo holds information about serving web pages
-type ServingInfo struct {
- // BindAddress is the ip:port to serve on
- BindAddress string `json:"bindAddress"`
- // BindNetwork is the type of network to bind to - defaults to "tcp4", accepts "tcp",
- // "tcp4", and "tcp6"
- BindNetwork string `json:"bindNetwork"`
- // CertInfo is the TLS cert info for serving secure traffic.
- // this is anonymous so that we can inline it for serialization
- CertInfo `json:",inline"`
- // ClientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates
- // +optional
- ClientCA string `json:"clientCA,omitempty"`
- // NamedCertificates is a list of certificates to use to secure requests to specific hostnames
- NamedCertificates []NamedCertificate `json:"namedCertificates,omitempty"`
- // MinTLSVersion is the minimum TLS version supported.
- // Values must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants
- MinTLSVersion string `json:"minTLSVersion,omitempty"`
- // CipherSuites contains an overridden list of ciphers for the server to support.
- // Values must match cipher suite IDs from https://golang.org/pkg/crypto/tls/#pkg-constants
- CipherSuites []string `json:"cipherSuites,omitempty"`
-}
-
-// CertInfo relates a certificate with a private key
-type CertInfo struct {
- // CertFile is a file containing a PEM-encoded certificate
- CertFile string `json:"certFile"`
- // KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile
- KeyFile string `json:"keyFile"`
-}
-
-// NamedCertificate specifies a certificate/key, and the names it should be served for
-type NamedCertificate struct {
- // Names is a list of DNS names this certificate should be used to secure
- // A name can be a normal DNS name, or can contain leading wildcard segments.
- Names []string `json:"names,omitempty"`
- // CertInfo is the TLS cert info for serving secure traffic
- CertInfo `json:",inline"`
-}
-
-// LeaderElection provides information to elect a leader
-type LeaderElection struct {
- // disable allows leader election to be suspended while allowing a fully defaulted "normal" startup case.
- Disable bool `json:"disable,omitempty"`
- // namespace indicates which namespace the resource is in
- Namespace string `json:"namespace,omitempty"`
- // name indicates what name to use for the resource
- Name string `json:"name,omitempty"`
-
- // leaseDuration is the duration that non-leader candidates will wait
- // after observing a leadership renewal until attempting to acquire
- // leadership of a led but unrenewed leader slot. This is effectively the
- // maximum duration that a leader can be stopped before it is replaced
- // by another candidate. This is only applicable if leader election is
- // enabled.
- // +nullable
- LeaseDuration metav1.Duration `json:"leaseDuration"`
- // renewDeadline is the interval between attempts by the acting master to
- // renew a leadership slot before it stops leading. This must be less
- // than or equal to the lease duration. This is only applicable if leader
- // election is enabled.
- // +nullable
- RenewDeadline metav1.Duration `json:"renewDeadline"`
- // retryPeriod is the duration the clients should wait between attempting
- // acquisition and renewal of a leadership. This is only applicable if
- // leader election is enabled.
- // +nullable
- RetryPeriod metav1.Duration `json:"retryPeriod"`
-}
-
-// StringSource allows specifying a string inline, or externally via env var or file.
-// When it contains only a string value, it marshals to a simple JSON string.
-type StringSource struct {
- // StringSourceSpec specifies the string value, or external location
- StringSourceSpec `json:",inline"`
-}
-
-// StringSourceSpec specifies a string value, or external location
-type StringSourceSpec struct {
- // Value specifies the cleartext value, or an encrypted value if keyFile is specified.
- Value string `json:"value"`
-
- // Env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified.
- Env string `json:"env"`
-
- // File references a file containing the cleartext value, or an encrypted value if a keyFile is specified.
- File string `json:"file"`
-
- // KeyFile references a file containing the key to use to decrypt the value.
- KeyFile string `json:"keyFile"`
-}
-
-// RemoteConnectionInfo holds information necessary for establishing a remote connection
-type RemoteConnectionInfo struct {
- // URL is the remote URL to connect to
- URL string `json:"url"`
- // CA is the CA for verifying TLS connections
- CA string `json:"ca"`
- // CertInfo is the TLS client cert information to present
- // this is anonymous so that we can inline it for serialization
- CertInfo `json:",inline"`
-}
-
-type AdmissionConfig struct {
- PluginConfig map[string]AdmissionPluginConfig `json:"pluginConfig,omitempty"`
-
- // enabledPlugins is a list of admission plugins that must be on in addition to the default list.
- // Some admission plugins are disabled by default, but certain configurations require them. This is fairly uncommon
- // and can result in performance penalties and unexpected behavior.
- EnabledAdmissionPlugins []string `json:"enabledPlugins,omitempty"`
-
- // disabledPlugins is a list of admission plugins that must be off. Putting something in this list
- // is almost always a mistake and likely to result in cluster instability.
- DisabledAdmissionPlugins []string `json:"disabledPlugins,omitempty"`
-}
-
-// AdmissionPluginConfig holds the necessary configuration options for admission plugins
-type AdmissionPluginConfig struct {
- // Location is the path to a configuration file that contains the plugin's
- // configuration
- Location string `json:"location"`
-
- // Configuration is an embedded configuration object to be used as the plugin's
- // configuration. If present, it will be used instead of the path to the configuration file.
- // +nullable
- // +kubebuilder:pruning:PreserveUnknownFields
- Configuration runtime.RawExtension `json:"configuration"`
-}
-
-type LogFormatType string
-
-type WebHookModeType string
-
-const (
- // LogFormatLegacy saves event in 1-line text format.
- LogFormatLegacy LogFormatType = "legacy"
- // LogFormatJson saves event in structured json format.
- LogFormatJson LogFormatType = "json"
-
- // WebHookModeBatch indicates that the webhook should buffer audit events
- // internally, sending batch updates either once a certain number of
- // events have been received or a certain amount of time has passed.
- WebHookModeBatch WebHookModeType = "batch"
- // WebHookModeBlocking causes the webhook to block on every attempt to process
- // a set of events. This causes requests to the API server to wait for a
- // round trip to the external audit service before sending a response.
- WebHookModeBlocking WebHookModeType = "blocking"
-)
-
-// AuditConfig holds configuration for the audit capabilities
-type AuditConfig struct {
- // If this flag is set, audit log will be printed in the logs.
- // The logs contains, method, user and a requested URL.
- Enabled bool `json:"enabled"`
- // All requests coming to the apiserver will be logged to this file.
- AuditFilePath string `json:"auditFilePath"`
- // Maximum number of days to retain old log files based on the timestamp encoded in their filename.
- MaximumFileRetentionDays int32 `json:"maximumFileRetentionDays"`
- // Maximum number of old log files to retain.
- MaximumRetainedFiles int32 `json:"maximumRetainedFiles"`
- // Maximum size in megabytes of the log file before it gets rotated. Defaults to 100MB.
- MaximumFileSizeMegabytes int32 `json:"maximumFileSizeMegabytes"`
-
- // PolicyFile is a path to the file that defines the audit policy configuration.
- PolicyFile string `json:"policyFile"`
- // PolicyConfiguration is an embedded policy configuration object to be used
- // as the audit policy configuration. If present, it will be used instead of
- // the path to the policy file.
- // +nullable
- // +kubebuilder:pruning:PreserveUnknownFields
- PolicyConfiguration runtime.RawExtension `json:"policyConfiguration"`
-
- // Format of saved audits (legacy or json).
- LogFormat LogFormatType `json:"logFormat"`
-
- // Path to a .kubeconfig formatted file that defines the audit webhook configuration.
- WebHookKubeConfig string `json:"webHookKubeConfig"`
- // Strategy for sending audit events (block or batch).
- WebHookMode WebHookModeType `json:"webHookMode"`
-}
-
-// EtcdConnectionInfo holds information necessary for connecting to an etcd server
-type EtcdConnectionInfo struct {
- // URLs are the URLs for etcd
- URLs []string `json:"urls,omitempty"`
- // CA is a file containing trusted roots for the etcd server certificates
- CA string `json:"ca"`
- // CertInfo is the TLS client cert information for securing communication to etcd
- // this is anonymous so that we can inline it for serialization
- CertInfo `json:",inline"`
-}
-
-type EtcdStorageConfig struct {
- EtcdConnectionInfo `json:",inline"`
-
- // StoragePrefix is the path within etcd that the OpenShift resources will
- // be rooted under. This value, if changed, will mean existing objects in etcd will
- // no longer be located.
- StoragePrefix string `json:"storagePrefix"`
-}
-
-// GenericAPIServerConfig is an inline-able struct for aggregated apiservers that need to store data in etcd
-type GenericAPIServerConfig struct {
- // servingInfo describes how to start serving
- ServingInfo HTTPServingInfo `json:"servingInfo"`
-
- // corsAllowedOrigins
- CORSAllowedOrigins []string `json:"corsAllowedOrigins"`
-
- // auditConfig describes how to configure audit information
- AuditConfig AuditConfig `json:"auditConfig"`
-
- // storageConfig contains information about how to use
- StorageConfig EtcdStorageConfig `json:"storageConfig"`
-
- // admissionConfig holds information about how to configure admission.
- AdmissionConfig AdmissionConfig `json:"admission"`
-
- KubeClientConfig KubeClientConfig `json:"kubeClientConfig"`
-}
-
-type KubeClientConfig struct {
- // kubeConfig is a .kubeconfig filename for going to the owning kube-apiserver. Empty uses an in-cluster-config
- KubeConfig string `json:"kubeConfig"`
-
- // connectionOverrides specifies client overrides for system components to loop back to this master.
- ConnectionOverrides ClientConnectionOverrides `json:"connectionOverrides"`
-}
-
-type ClientConnectionOverrides struct {
- // acceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the
- // default value of 'application/json'. This field will control all connections to the server used by a particular
- // client.
- AcceptContentTypes string `json:"acceptContentTypes"`
- // contentType is the content type used when sending data to the server from this client.
- ContentType string `json:"contentType"`
-
- // qps controls the number of queries per second allowed for this connection.
- QPS float32 `json:"qps"`
- // burst allows extra queries to accumulate when a client is exceeding its rate.
- Burst int32 `json:"burst"`
-}
-
-// GenericControllerConfig provides information to configure a controller
-type GenericControllerConfig struct {
- // ServingInfo is the HTTP serving information for the controller's endpoints
- ServingInfo HTTPServingInfo `json:"servingInfo"`
-
- // leaderElection provides information to elect a leader. Only override this if you have a specific need
- LeaderElection LeaderElection `json:"leaderElection"`
-
- // authentication allows configuration of authentication for the endpoints
- Authentication DelegatedAuthentication `json:"authentication"`
- // authorization allows configuration of authentication for the endpoints
- Authorization DelegatedAuthorization `json:"authorization"`
-}
-
-// DelegatedAuthentication allows authentication to be disabled.
-type DelegatedAuthentication struct {
- // disabled indicates that authentication should be disabled. By default it will use delegated authentication.
- Disabled bool `json:"disabled,omitempty"`
-}
-
-// DelegatedAuthorization allows authorization to be disabled.
-type DelegatedAuthorization struct {
- // disabled indicates that authorization should be disabled. By default it will use delegated authorization.
- Disabled bool `json:"disabled,omitempty"`
-}
diff --git a/vendor/github.com/openshift/api/config/v1/types_apiserver.go b/vendor/github.com/openshift/api/config/v1/types_apiserver.go
deleted file mode 100644
index b347bd80e..000000000
--- a/vendor/github.com/openshift/api/config/v1/types_apiserver.go
+++ /dev/null
@@ -1,118 +0,0 @@
-package v1
-
-import (
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-// +genclient
-// +genclient:nonNamespaced
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// APIServer holds configuration (like serving certificates, client CA and CORS domains)
-// shared by all API servers in the system, among them especially kube-apiserver
-// and openshift-apiserver. The canonical name of an instance is 'cluster'.
-type APIServer struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata,omitempty"`
- // +kubebuilder:validation:Required
- // +required
- Spec APIServerSpec `json:"spec"`
- // +optional
- Status APIServerStatus `json:"status"`
-}
-
-type APIServerSpec struct {
- // servingCert is the TLS cert info for serving secure traffic. If not specified, operator managed certificates
- // will be used for serving secure traffic.
- // +optional
- ServingCerts APIServerServingCerts `json:"servingCerts"`
- // clientCA references a ConfigMap containing a certificate bundle for the signers that will be recognized for
- // incoming client certificates in addition to the operator managed signers. If this is empty, then only operator managed signers are valid.
- // You usually only have to set this if you have your own PKI you wish to honor client certificates from.
- // The ConfigMap must exist in the openshift-config namespace and contain the following required fields:
- // - ConfigMap.Data["ca-bundle.crt"] - CA bundle.
- // +optional
- ClientCA ConfigMapNameReference `json:"clientCA"`
- // additionalCORSAllowedOrigins lists additional, user-defined regular expressions describing hosts for which the
- // API server allows access using the CORS headers. This may be needed to access the API and the integrated OAuth
- // server from JavaScript applications.
- // The values are regular expressions that correspond to the Golang regular expression language.
- // +optional
- AdditionalCORSAllowedOrigins []string `json:"additionalCORSAllowedOrigins,omitempty"`
- // encryption allows the configuration of encryption of resources at the datastore layer.
- // +optional
- Encryption APIServerEncryption `json:"encryption"`
- // tlsSecurityProfile specifies settings for TLS connections for externally exposed servers.
- //
- // If unset, a default (which may change between releases) is chosen. Note that only Old and
- // Intermediate profiles are currently supported, and the maximum available MinTLSVersions
- // is VersionTLS12.
- // +optional
- TLSSecurityProfile *TLSSecurityProfile `json:"tlsSecurityProfile,omitempty"`
-}
-
-type APIServerServingCerts struct {
- // namedCertificates references secrets containing the TLS cert info for serving secure traffic to specific hostnames.
- // If no named certificates are provided, or no named certificates match the server name as understood by a client,
- // the defaultServingCertificate will be used.
- // +optional
- NamedCertificates []APIServerNamedServingCert `json:"namedCertificates,omitempty"`
-}
-
-// APIServerNamedServingCert maps a server DNS name, as understood by a client, to a certificate.
-type APIServerNamedServingCert struct {
- // names is a optional list of explicit DNS names (leading wildcards allowed) that should use this certificate to
- // serve secure traffic. If no names are provided, the implicit names will be extracted from the certificates.
- // Exact names trump over wildcard names. Explicit names defined here trump over extracted implicit names.
- // +optional
- Names []string `json:"names,omitempty"`
- // servingCertificate references a kubernetes.io/tls type secret containing the TLS cert info for serving secure traffic.
- // The secret must exist in the openshift-config namespace and contain the following required fields:
- // - Secret.Data["tls.key"] - TLS private key.
- // - Secret.Data["tls.crt"] - TLS certificate.
- ServingCertificate SecretNameReference `json:"servingCertificate"`
-}
-
-type APIServerEncryption struct {
- // type defines what encryption type should be used to encrypt resources at the datastore layer.
- // When this field is unset (i.e. when it is set to the empty string), identity is implied.
- // The behavior of unset can and will change over time. Even if encryption is enabled by default,
- // the meaning of unset may change to a different encryption type based on changes in best practices.
- //
- // When encryption is enabled, all sensitive resources shipped with the platform are encrypted.
- // This list of sensitive resources can and will change over time. The current authoritative list is:
- //
- // 1. secrets
- // 2. configmaps
- // 3. routes.route.openshift.io
- // 4. oauthaccesstokens.oauth.openshift.io
- // 5. oauthauthorizetokens.oauth.openshift.io
- //
- // +unionDiscriminator
- // +optional
- Type EncryptionType `json:"type,omitempty"`
-}
-
-// +kubebuilder:validation:Enum="";identity;aescbc
-type EncryptionType string
-
-const (
- // identity refers to a type where no encryption is performed at the datastore layer.
- // Resources are written as-is without encryption.
- EncryptionTypeIdentity EncryptionType = "identity"
-
- // aescbc refers to a type where AES-CBC with PKCS#7 padding and a 32-byte key
- // is used to perform encryption at the datastore layer.
- EncryptionTypeAESCBC EncryptionType = "aescbc"
-)
-
-type APIServerStatus struct {
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-type APIServerList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata"`
- Items []APIServer `json:"items"`
-}
diff --git a/vendor/github.com/openshift/api/config/v1/types_authentication.go b/vendor/github.com/openshift/api/config/v1/types_authentication.go
deleted file mode 100644
index eecfe75e7..000000000
--- a/vendor/github.com/openshift/api/config/v1/types_authentication.go
+++ /dev/null
@@ -1,118 +0,0 @@
-package v1
-
-import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
-// +genclient
-// +genclient:nonNamespaced
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// Authentication specifies cluster-wide settings for authentication (like OAuth and
-// webhook token authenticators). The canonical name of an instance is `cluster`.
-type Authentication struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata,omitempty"`
-
- // spec holds user settable values for configuration
- // +kubebuilder:validation:Required
- // +required
- Spec AuthenticationSpec `json:"spec"`
- // status holds observed values from the cluster. They may not be overridden.
- // +optional
- Status AuthenticationStatus `json:"status"`
-}
-
-type AuthenticationSpec struct {
- // type identifies the cluster managed, user facing authentication mode in use.
- // Specifically, it manages the component that responds to login attempts.
- // The default is IntegratedOAuth.
- // +optional
- Type AuthenticationType `json:"type"`
-
- // oauthMetadata contains the discovery endpoint data for OAuth 2.0
- // Authorization Server Metadata for an external OAuth server.
- // This discovery document can be viewed from its served location:
- // oc get --raw '/.well-known/oauth-authorization-server'
- // For further details, see the IETF Draft:
- // https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2
- // If oauthMetadata.name is non-empty, this value has precedence
- // over any metadata reference stored in status.
- // The key "oauthMetadata" is used to locate the data.
- // If specified and the config map or expected key is not found, no metadata is served.
- // If the specified metadata is not valid, no metadata is served.
- // The namespace for this config map is openshift-config.
- // +optional
- OAuthMetadata ConfigMapNameReference `json:"oauthMetadata"`
-
- // webhookTokenAuthenticators configures remote token reviewers.
- // These remote authentication webhooks can be used to verify bearer tokens
- // via the tokenreviews.authentication.k8s.io REST API. This is required to
- // honor bearer tokens that are provisioned by an external authentication service.
- // The namespace for these secrets is openshift-config.
- // +optional
- WebhookTokenAuthenticators []WebhookTokenAuthenticator `json:"webhookTokenAuthenticators,omitempty"`
-}
-
-type AuthenticationStatus struct {
- // integratedOAuthMetadata contains the discovery endpoint data for OAuth 2.0
- // Authorization Server Metadata for the in-cluster integrated OAuth server.
- // This discovery document can be viewed from its served location:
- // oc get --raw '/.well-known/oauth-authorization-server'
- // For further details, see the IETF Draft:
- // https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2
- // This contains the observed value based on cluster state.
- // An explicitly set value in spec.oauthMetadata has precedence over this field.
- // This field has no meaning if authentication spec.type is not set to IntegratedOAuth.
- // The key "oauthMetadata" is used to locate the data.
- // If the config map or expected key is not found, no metadata is served.
- // If the specified metadata is not valid, no metadata is served.
- // The namespace for this config map is openshift-config-managed.
- IntegratedOAuthMetadata ConfigMapNameReference `json:"integratedOAuthMetadata"`
-
- // TODO if we add support for an in-cluster operator managed Keycloak instance
- // KeycloakOAuthMetadata ConfigMapNameReference `json:"keycloakOAuthMetadata"`
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-type AuthenticationList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata"`
-
- Items []Authentication `json:"items"`
-}
-
-type AuthenticationType string
-
-const (
- // None means that no cluster managed authentication system is in place.
- // Note that user login will only work if a manually configured system is in place and
- // referenced in authentication spec via oauthMetadata and webhookTokenAuthenticators.
- AuthenticationTypeNone AuthenticationType = "None"
-
- // IntegratedOAuth refers to the cluster managed OAuth server.
- // It is configured via the top level OAuth config.
- AuthenticationTypeIntegratedOAuth AuthenticationType = "IntegratedOAuth"
-
- // TODO if we add support for an in-cluster operator managed Keycloak instance
- // AuthenticationTypeKeycloak AuthenticationType = "Keycloak"
-)
-
-// webhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator
-type WebhookTokenAuthenticator struct {
- // kubeConfig contains kube config file data which describes how to access the remote webhook service.
- // For further details, see:
- // https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication
- // The key "kubeConfig" is used to locate the data.
- // If the secret or expected key is not found, the webhook is not honored.
- // If the specified kube config data is not valid, the webhook is not honored.
- // The namespace for this secret is determined by the point of use.
- KubeConfig SecretNameReference `json:"kubeConfig"`
-}
-
-const (
- // OAuthMetadataKey is the key for the oauth authorization server metadata
- OAuthMetadataKey = "oauthMetadata"
-
- // KubeConfigKey is the key for the kube config file data in a secret
- KubeConfigKey = "kubeConfig"
-)
diff --git a/vendor/github.com/openshift/api/config/v1/types_build.go b/vendor/github.com/openshift/api/config/v1/types_build.go
deleted file mode 100644
index ef4512aa1..000000000
--- a/vendor/github.com/openshift/api/config/v1/types_build.go
+++ /dev/null
@@ -1,109 +0,0 @@
-package v1
-
-import (
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-// +genclient
-// +genclient:nonNamespaced
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// Build configures the behavior of OpenShift builds for the entire cluster.
-// This includes default settings that can be overridden in BuildConfig objects, and overrides which are applied to all builds.
-//
-// The canonical name is "cluster"
-type Build struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata,omitempty"`
-
- // Spec holds user-settable values for the build controller configuration
- // +kubebuilder:validation:Required
- // +required
- Spec BuildSpec `json:"spec"`
-}
-
-type BuildSpec struct {
- // AdditionalTrustedCA is a reference to a ConfigMap containing additional CAs that
- // should be trusted for image pushes and pulls during builds.
- // The namespace for this config map is openshift-config.
- //
- // DEPRECATED: Additional CAs for image pull and push should be set on
- // image.config.openshift.io/cluster instead.
- //
- // +optional
- AdditionalTrustedCA ConfigMapNameReference `json:"additionalTrustedCA"`
- // BuildDefaults controls the default information for Builds
- // +optional
- BuildDefaults BuildDefaults `json:"buildDefaults"`
- // BuildOverrides controls override settings for builds
- // +optional
- BuildOverrides BuildOverrides `json:"buildOverrides"`
-}
-
-type BuildDefaults struct {
- // DefaultProxy contains the default proxy settings for all build operations, including image pull/push
- // and source download.
- //
- // Values can be overrode by setting the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables
- // in the build config's strategy.
- // +optional
- DefaultProxy *ProxySpec `json:"defaultProxy,omitempty"`
-
- // GitProxy contains the proxy settings for git operations only. If set, this will override
- // any Proxy settings for all git commands, such as git clone.
- //
- // Values that are not set here will be inherited from DefaultProxy.
- // +optional
- GitProxy *ProxySpec `json:"gitProxy,omitempty"`
-
- // Env is a set of default environment variables that will be applied to the
- // build if the specified variables do not exist on the build
- // +optional
- Env []corev1.EnvVar `json:"env,omitempty"`
-
- // ImageLabels is a list of docker labels that are applied to the resulting image.
- // User can override a default label by providing a label with the same name in their
- // Build/BuildConfig.
- // +optional
- ImageLabels []ImageLabel `json:"imageLabels,omitempty"`
-
- // Resources defines resource requirements to execute the build.
- // +optional
- Resources corev1.ResourceRequirements `json:"resources"`
-}
-
-type ImageLabel struct {
- // Name defines the name of the label. It must have non-zero length.
- Name string `json:"name"`
-
- // Value defines the literal value of the label.
- // +optional
- Value string `json:"value,omitempty"`
-}
-
-type BuildOverrides struct {
- // ImageLabels is a list of docker labels that are applied to the resulting image.
- // If user provided a label in their Build/BuildConfig with the same name as one in this
- // list, the user's label will be overwritten.
- // +optional
- ImageLabels []ImageLabel `json:"imageLabels,omitempty"`
-
- // NodeSelector is a selector which must be true for the build pod to fit on a node
- // +optional
- NodeSelector map[string]string `json:"nodeSelector,omitempty"`
-
- // Tolerations is a list of Tolerations that will override any existing
- // tolerations set on a build pod.
- // +optional
- Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-type BuildList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata"`
-
- Items []Build `json:"items"`
-}
diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go b/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go
deleted file mode 100644
index 3681d0ff0..000000000
--- a/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go
+++ /dev/null
@@ -1,184 +0,0 @@
-package v1
-
-import (
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- runtime "k8s.io/apimachinery/pkg/runtime"
-)
-
-// +genclient
-// +genclient:nonNamespaced
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// ClusterOperator is the Custom Resource object which holds the current state
-// of an operator. This object is used by operators to convey their state to
-// the rest of the cluster.
-type ClusterOperator struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata"`
-
- // spec holds configuration that could apply to any operator.
- // +kubebuilder:validation:Required
- // +required
- Spec ClusterOperatorSpec `json:"spec"`
-
- // status holds the information about the state of an operator. It is consistent with status information across
- // the Kubernetes ecosystem.
- // +optional
- Status ClusterOperatorStatus `json:"status"`
-}
-
-// ClusterOperatorSpec is empty for now, but you could imagine holding information like "pause".
-type ClusterOperatorSpec struct {
-}
-
-// ClusterOperatorStatus provides information about the status of the operator.
-// +k8s:deepcopy-gen=true
-type ClusterOperatorStatus struct {
- // conditions describes the state of the operator's managed and monitored components.
- // +patchMergeKey=type
- // +patchStrategy=merge
- // +optional
- Conditions []ClusterOperatorStatusCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"`
-
- // versions is a slice of operator and operand version tuples. Operators which manage multiple operands will have multiple
- // operand entries in the array. Available operators must report the version of the operator itself with the name "operator".
- // An operator reports a new "operator" version when it has rolled out the new version to all of its operands.
- // +optional
- Versions []OperandVersion `json:"versions,omitempty"`
-
- // relatedObjects is a list of objects that are "interesting" or related to this operator. Common uses are:
- // 1. the detailed resource driving the operator
- // 2. operator namespaces
- // 3. operand namespaces
- // +optional
- RelatedObjects []ObjectReference `json:"relatedObjects,omitempty"`
-
- // extension contains any additional status information specific to the
- // operator which owns this status object.
- // +nullable
- // +optional
- // +kubebuilder:pruning:PreserveUnknownFields
- Extension runtime.RawExtension `json:"extension"`
-}
-
-type OperandVersion struct {
- // name is the name of the particular operand this version is for. It usually matches container images, not operators.
- // +kubebuilder:validation:Required
- // +required
- Name string `json:"name"`
-
- // version indicates which version of a particular operand is currently being managed. It must always match the Available
- // operand. If 1.0.0 is Available, then this must indicate 1.0.0 even if the operator is trying to rollout
- // 1.1.0
- // +kubebuilder:validation:Required
- // +required
- Version string `json:"version"`
-}
-
-// ObjectReference contains enough information to let you inspect or modify the referred object.
-type ObjectReference struct {
- // group of the referent.
- // +kubebuilder:validation:Required
- // +required
- Group string `json:"group"`
- // resource of the referent.
- // +kubebuilder:validation:Required
- // +required
- Resource string `json:"resource"`
- // namespace of the referent.
- // +optional
- Namespace string `json:"namespace,omitempty"`
- // name of the referent.
- // +kubebuilder:validation:Required
- // +required
- Name string `json:"name"`
-}
-
-type ConditionStatus string
-
-// These are valid condition statuses. "ConditionTrue" means a resource is in the condition.
-// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes
-// can't decide if a resource is in the condition or not. In the future, we could add other
-// intermediate conditions, e.g. ConditionDegraded.
-const (
- ConditionTrue ConditionStatus = "True"
- ConditionFalse ConditionStatus = "False"
- ConditionUnknown ConditionStatus = "Unknown"
-)
-
-// ClusterOperatorStatusCondition represents the state of the operator's
-// managed and monitored components.
-// +k8s:deepcopy-gen=true
-type ClusterOperatorStatusCondition struct {
- // type specifies the aspect reported by this condition.
- // +kubebuilder:validation:Required
- // +required
- Type ClusterStatusConditionType `json:"type"`
-
- // status of the condition, one of True, False, Unknown.
- // +kubebuilder:validation:Required
- // +required
- Status ConditionStatus `json:"status"`
-
- // lastTransitionTime is the time of the last update to the current status property.
- // +kubebuilder:validation:Required
- // +required
- LastTransitionTime metav1.Time `json:"lastTransitionTime"`
-
- // reason is the CamelCase reason for the condition's current status.
- // +optional
- Reason string `json:"reason,omitempty"`
-
- // message provides additional information about the current condition.
- // This is only to be consumed by humans.
- // +optional
- Message string `json:"message,omitempty"`
-}
-
-// ClusterStatusConditionType is an aspect of operator state.
-type ClusterStatusConditionType string
-
-const (
- // Available indicates that the operand (eg: openshift-apiserver for the
- // openshift-apiserver-operator), is functional and available in the cluster.
- OperatorAvailable ClusterStatusConditionType = "Available"
-
- // Progressing indicates that the operator is actively rolling out new code,
- // propagating config changes, or otherwise moving from one steady state to
- // another. Operators should not report progressing when they are reconciling
- // a previously known state.
- OperatorProgressing ClusterStatusConditionType = "Progressing"
-
- // Degraded indicates that the operator's current state does not match its
- // desired state over a period of time resulting in a lower quality of service.
- // The period of time may vary by component, but a Degraded state represents
- // persistent observation of a condition. As a result, a component should not
- // oscillate in and out of Degraded state. A service may be Available even
- // if its degraded. For example, your service may desire 3 running pods, but 1
- // pod is crash-looping. The service is Available but Degraded because it
- // may have a lower quality of service. A component may be Progressing but
- // not Degraded because the transition from one state to another does not
- // persist over a long enough period to report Degraded. A service should not
- // report Degraded during the course of a normal upgrade. A service may report
- // Degraded in response to a persistent infrastructure failure that requires
- // administrator intervention. For example, if a control plane host is unhealthy
- // and must be replaced. An operator should report Degraded if unexpected
- // errors occur over a period, but the expectation is that all unexpected errors
- // are handled as operators mature.
- OperatorDegraded ClusterStatusConditionType = "Degraded"
-
- // Upgradeable indicates whether the operator is in a state that is safe to upgrade. When status is `False`
- // administrators should not upgrade their cluster and the message field should contain a human readable description
- // of what the administrator should do to allow the operator to successfully update. A missing condition, True,
- // and Unknown are all treated by the CVO as allowing an upgrade.
- OperatorUpgradeable ClusterStatusConditionType = "Upgradeable"
-)
-
-// ClusterOperatorList is a list of OperatorStatus resources.
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-type ClusterOperatorList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata"`
-
- Items []ClusterOperator `json:"items"`
-}
diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_version.go b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go
deleted file mode 100644
index 771e962ad..000000000
--- a/vendor/github.com/openshift/api/config/v1/types_cluster_version.go
+++ /dev/null
@@ -1,267 +0,0 @@
-package v1
-
-import (
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-// +genclient
-// +genclient:nonNamespaced
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// ClusterVersion is the configuration for the ClusterVersionOperator. This is where
-// parameters related to automatic updates can be set.
-type ClusterVersion struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata,omitempty"`
-
- // spec is the desired state of the cluster version - the operator will work
- // to ensure that the desired version is applied to the cluster.
- // +kubebuilder:validation:Required
- // +required
- Spec ClusterVersionSpec `json:"spec"`
- // status contains information about the available updates and any in-progress
- // updates.
- // +optional
- Status ClusterVersionStatus `json:"status"`
-}
-
-// ClusterVersionSpec is the desired version state of the cluster. It includes
-// the version the cluster should be at, how the cluster is identified, and
-// where the cluster should look for version updates.
-// +k8s:deepcopy-gen=true
-type ClusterVersionSpec struct {
- // clusterID uniquely identifies this cluster. This is expected to be
- // an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx in
- // hexadecimal values). This is a required field.
- // +kubebuilder:validation:Required
- // +required
- ClusterID ClusterID `json:"clusterID"`
-
- // desiredUpdate is an optional field that indicates the desired value of
- // the cluster version. Setting this value will trigger an upgrade (if
- // the current version does not match the desired version). The set of
- // recommended update values is listed as part of available updates in
- // status, and setting values outside that range may cause the upgrade
- // to fail. You may specify the version field without setting image if
- // an update exists with that version in the availableUpdates or history.
- //
- // If an upgrade fails the operator will halt and report status
- // about the failing component. Setting the desired update value back to
- // the previous version will cause a rollback to be attempted. Not all
- // rollbacks will succeed.
- //
- // +optional
- DesiredUpdate *Update `json:"desiredUpdate,omitempty"`
-
- // upstream may be used to specify the preferred update server. By default
- // it will use the appropriate update server for the cluster and region.
- //
- // +optional
- Upstream URL `json:"upstream,omitempty"`
- // channel is an identifier for explicitly requesting that a non-default
- // set of updates be applied to this cluster. The default channel will be
- // contain stable updates that are appropriate for production clusters.
- //
- // +optional
- Channel string `json:"channel,omitempty"`
-
- // overrides is list of overides for components that are managed by
- // cluster version operator. Marking a component unmanaged will prevent
- // the operator from creating or updating the object.
- // +optional
- Overrides []ComponentOverride `json:"overrides,omitempty"`
-}
-
-// ClusterVersionStatus reports the status of the cluster versioning,
-// including any upgrades that are in progress. The current field will
-// be set to whichever version the cluster is reconciling to, and the
-// conditions array will report whether the update succeeded, is in
-// progress, or is failing.
-// +k8s:deepcopy-gen=true
-type ClusterVersionStatus struct {
- // desired is the version that the cluster is reconciling towards.
- // If the cluster is not yet fully initialized desired will be set
- // with the information available, which may be an image or a tag.
- // +kubebuilder:validation:Required
- // +required
- Desired Update `json:"desired"`
-
- // history contains a list of the most recent versions applied to the cluster.
- // This value may be empty during cluster startup, and then will be updated
- // when a new update is being applied. The newest update is first in the
- // list and it is ordered by recency. Updates in the history have state
- // Completed if the rollout completed - if an update was failing or halfway
- // applied the state will be Partial. Only a limited amount of update history
- // is preserved.
- // +optional
- History []UpdateHistory `json:"history,omitempty"`
-
- // observedGeneration reports which version of the spec is being synced.
- // If this value is not equal to metadata.generation, then the desired
- // and conditions fields may represent a previous version.
- // +kubebuilder:validation:Required
- // +required
- ObservedGeneration int64 `json:"observedGeneration"`
-
- // versionHash is a fingerprint of the content that the cluster will be
- // updated with. It is used by the operator to avoid unnecessary work
- // and is for internal use only.
- // +kubebuilder:validation:Required
- // +required
- VersionHash string `json:"versionHash"`
-
- // conditions provides information about the cluster version. The condition
- // "Available" is set to true if the desiredUpdate has been reached. The
- // condition "Progressing" is set to true if an update is being applied.
- // The condition "Degraded" is set to true if an update is currently blocked
- // by a temporary or permanent error. Conditions are only valid for the
- // current desiredUpdate when metadata.generation is equal to
- // status.generation.
- // +optional
- Conditions []ClusterOperatorStatusCondition `json:"conditions,omitempty"`
-
- // availableUpdates contains the list of updates that are appropriate
- // for this cluster. This list may be empty if no updates are recommended,
- // if the update service is unavailable, or if an invalid channel has
- // been specified.
- // +nullable
- // +kubebuilder:validation:Required
- // +required
- AvailableUpdates []Update `json:"availableUpdates"`
-}
-
-// UpdateState is a constant representing whether an update was successfully
-// applied to the cluster or not.
-type UpdateState string
-
-const (
- // CompletedUpdate indicates an update was successfully applied
- // to the cluster (all resource updates were successful).
- CompletedUpdate UpdateState = "Completed"
- // PartialUpdate indicates an update was never completely applied
- // or is currently being applied.
- PartialUpdate UpdateState = "Partial"
-)
-
-// UpdateHistory is a single attempted update to the cluster.
-type UpdateHistory struct {
- // state reflects whether the update was fully applied. The Partial state
- // indicates the update is not fully applied, while the Completed state
- // indicates the update was successfully rolled out at least once (all
- // parts of the update successfully applied).
- // +kubebuilder:validation:Required
- // +required
- State UpdateState `json:"state"`
-
- // startedTime is the time at which the update was started.
- // +kubebuilder:validation:Required
- // +required
- StartedTime metav1.Time `json:"startedTime"`
- // completionTime, if set, is when the update was fully applied. The update
- // that is currently being applied will have a null completion time.
- // Completion time will always be set for entries that are not the current
- // update (usually to the started time of the next update).
- // +kubebuilder:validation:Required
- // +required
- // +nullable
- CompletionTime *metav1.Time `json:"completionTime"`
-
- // version is a semantic versioning identifying the update version. If the
- // requested image does not define a version, or if a failure occurs
- // retrieving the image, this value may be empty.
- //
- // +optional
- Version string `json:"version"`
- // image is a container image location that contains the update. This value
- // is always populated.
- // +kubebuilder:validation:Required
- // +required
- Image string `json:"image"`
- // verified indicates whether the provided update was properly verified
- // before it was installed. If this is false the cluster may not be trusted.
- // +kubebuilder:validation:Required
- // +required
- Verified bool `json:"verified"`
-}
-
-// ClusterID is string RFC4122 uuid.
-type ClusterID string
-
-// ComponentOverride allows overriding cluster version operator's behavior
-// for a component.
-// +k8s:deepcopy-gen=true
-type ComponentOverride struct {
- // kind indentifies which object to override.
- // +kubebuilder:validation:Required
- // +required
- Kind string `json:"kind"`
- // group identifies the API group that the kind is in.
- // +kubebuilder:validation:Required
- // +required
- Group string `json:"group"`
-
- // namespace is the component's namespace. If the resource is cluster
- // scoped, the namespace should be empty.
- // +kubebuilder:validation:Required
- // +required
- Namespace string `json:"namespace"`
- // name is the component's name.
- // +kubebuilder:validation:Required
- // +required
- Name string `json:"name"`
-
- // unmanaged controls if cluster version operator should stop managing the
- // resources in this cluster.
- // Default: false
- // +kubebuilder:validation:Required
- // +required
- Unmanaged bool `json:"unmanaged"`
-}
-
-// URL is a thin wrapper around string that ensures the string is a valid URL.
-type URL string
-
-// Update represents a release of the ClusterVersionOperator, referenced by the
-// Image member.
-// +k8s:deepcopy-gen=true
-type Update struct {
- // version is a semantic versioning identifying the update version. When this
- // field is part of spec, version is optional if image is specified.
- //
- // +optional
- Version string `json:"version"`
- // image is a container image location that contains the update. When this
- // field is part of spec, image is optional if version is specified and the
- // availableUpdates field contains a matching version.
- //
- // +optional
- Image string `json:"image"`
- // force allows an administrator to update to an image that has failed
- // verification, does not appear in the availableUpdates list, or otherwise
- // would be blocked by normal protections on update. This option should only
- // be used when the authenticity of the provided image has been verified out
- // of band because the provided image will run with full administrative access
- // to the cluster. Do not use this flag with images that comes from unknown
- // or potentially malicious sources.
- //
- // This flag does not override other forms of consistency checking that are
- // required before a new update is deployed.
- //
- // +optional
- Force bool `json:"force"`
-}
-
-// RetrievedUpdates reports whether available updates have been retrieved from
-// the upstream update server. The condition is Unknown before retrieval, False
-// if the updates could not be retrieved or recently failed, or True if the
-// availableUpdates field is accurate and recent.
-const RetrievedUpdates ClusterStatusConditionType = "RetrievedUpdates"
-
-// ClusterVersionList is a list of ClusterVersion resources.
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-type ClusterVersionList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata"`
-
- Items []ClusterVersion `json:"items"`
-}
diff --git a/vendor/github.com/openshift/api/config/v1/types_console.go b/vendor/github.com/openshift/api/config/v1/types_console.go
deleted file mode 100644
index 22b0b5160..000000000
--- a/vendor/github.com/openshift/api/config/v1/types_console.go
+++ /dev/null
@@ -1,62 +0,0 @@
-package v1
-
-import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
-// +genclient
-// +genclient:nonNamespaced
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// Console holds cluster-wide configuration for the web console, including the
-// logout URL, and reports the public URL of the console. The canonical name is
-// `cluster`.
-type Console struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata,omitempty"`
-
- // spec holds user settable values for configuration
- // +kubebuilder:validation:Required
- // +required
- Spec ConsoleSpec `json:"spec"`
- // status holds observed values from the cluster. They may not be overridden.
- // +optional
- Status ConsoleStatus `json:"status"`
-}
-
-// ConsoleSpec is the specification of the desired behavior of the Console.
-type ConsoleSpec struct {
- // +optional
- Authentication ConsoleAuthentication `json:"authentication"`
-}
-
-// ConsoleStatus defines the observed status of the Console.
-type ConsoleStatus struct {
- // The URL for the console. This will be derived from the host for the route that
- // is created for the console.
- ConsoleURL string `json:"consoleURL"`
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-type ConsoleList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata"`
-
- Items []Console `json:"items"`
-}
-
-// ConsoleAuthentication defines a list of optional configuration for console authentication.
-type ConsoleAuthentication struct {
- // An optional, absolute URL to redirect web browsers to after logging out of
- // the console. If not specified, it will redirect to the default login page.
- // This is required when using an identity provider that supports single
- // sign-on (SSO) such as:
- // - OpenID (Keycloak, Azure)
- // - RequestHeader (GSSAPI, SSPI, SAML)
- // - OAuth (GitHub, GitLab, Google)
- // Logging out of the console will destroy the user's token. The logoutRedirect
- // provides the user the option to perform single logout (SLO) through the identity
- // provider to destroy their single sign-on session.
- // +optional
- // +kubebuilder:validation:Pattern=`^$|^((https):\/\/?)[^\s()<>]+(?:\([\w\d]+\)|([^[:punct:]\s]|\/?))$`
- LogoutRedirect string `json:"logoutRedirect,omitempty"`
-}
diff --git a/vendor/github.com/openshift/api/config/v1/types_dns.go b/vendor/github.com/openshift/api/config/v1/types_dns.go
deleted file mode 100644
index 989ef99c3..000000000
--- a/vendor/github.com/openshift/api/config/v1/types_dns.go
+++ /dev/null
@@ -1,87 +0,0 @@
-package v1
-
-import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
-// +genclient
-// +genclient:nonNamespaced
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// DNS holds cluster-wide information about DNS. The canonical name is `cluster`
-type DNS struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata,omitempty"`
-
- // spec holds user settable values for configuration
- // +kubebuilder:validation:Required
- // +required
- Spec DNSSpec `json:"spec"`
- // status holds observed values from the cluster. They may not be overridden.
- // +optional
- Status DNSStatus `json:"status"`
-}
-
-type DNSSpec struct {
- // baseDomain is the base domain of the cluster. All managed DNS records will
- // be sub-domains of this base.
- //
- // For example, given the base domain `openshift.example.com`, an API server
- // DNS record may be created for `cluster-api.openshift.example.com`.
- //
- // Once set, this field cannot be changed.
- BaseDomain string `json:"baseDomain"`
- // publicZone is the location where all the DNS records that are publicly accessible to
- // the internet exist.
- //
- // If this field is nil, no public records should be created.
- //
- // Once set, this field cannot be changed.
- //
- // +optional
- PublicZone *DNSZone `json:"publicZone,omitempty"`
- // privateZone is the location where all the DNS records that are only available internally
- // to the cluster exist.
- //
- // If this field is nil, no private records should be created.
- //
- // Once set, this field cannot be changed.
- //
- // +optional
- PrivateZone *DNSZone `json:"privateZone,omitempty"`
-}
-
-// DNSZone is used to define a DNS hosted zone.
-// A zone can be identified by an ID or tags.
-type DNSZone struct {
- // id is the identifier that can be used to find the DNS hosted zone.
- //
- // on AWS zone can be fetched using `ID` as id in [1]
- // on Azure zone can be fetched using `ID` as a pre-determined name in [2],
- // on GCP zone can be fetched using `ID` as a pre-determined name in [3].
- //
- // [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options
- // [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show
- // [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get
- // +optional
- ID string `json:"id,omitempty"`
-
- // tags can be used to query the DNS hosted zone.
- //
- // on AWS, resourcegroupstaggingapi [1] can be used to fetch a zone using `Tags` as tag-filters,
- //
- // [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options
- // +optional
- Tags map[string]string `json:"tags,omitempty"`
-}
-
-type DNSStatus struct {
- // dnsSuffix (service-ca amongst others)
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-type DNSList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata"`
-
- Items []DNS `json:"items"`
-}
diff --git a/vendor/github.com/openshift/api/config/v1/types_feature.go b/vendor/github.com/openshift/api/config/v1/types_feature.go
deleted file mode 100644
index ce9012627..000000000
--- a/vendor/github.com/openshift/api/config/v1/types_feature.go
+++ /dev/null
@@ -1,194 +0,0 @@
-package v1
-
-import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
-// +genclient
-// +genclient:nonNamespaced
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// Feature holds cluster-wide information about feature gates. The canonical name is `cluster`
-type FeatureGate struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata,omitempty"`
-
- // spec holds user settable values for configuration
- // +kubebuilder:validation:Required
- // +required
- Spec FeatureGateSpec `json:"spec"`
- // status holds observed values from the cluster. They may not be overridden.
- // +optional
- Status FeatureGateStatus `json:"status"`
-}
-
-type FeatureSet string
-
-var (
- // Default feature set that allows upgrades.
- Default FeatureSet = ""
-
- // TechPreviewNoUpgrade turns on tech preview features that are not part of the normal supported platform. Turning
- // this feature set on CANNOT BE UNDONE and PREVENTS UPGRADES.
- TechPreviewNoUpgrade FeatureSet = "TechPreviewNoUpgrade"
-
- // CustomNoUpgrade allows the enabling or disabling of any feature. Turning this feature set on IS NOT SUPPORTED, CANNOT BE UNDONE, and PREVENTS UPGRADES.
- // Because of its nature, this setting cannot be validated. If you have any typos or accidentally apply invalid combinations
- // your cluster may fail in an unrecoverable way.
- CustomNoUpgrade FeatureSet = "CustomNoUpgrade"
-
- // TopologyManager enables ToplogyManager support. Upgrades are enabled with this feature.
- LatencySensitive FeatureSet = "LatencySensitive"
-)
-
-type FeatureGateSpec struct {
- FeatureGateSelection `json:",inline"`
-}
-
-// +union
-type FeatureGateSelection struct {
- // featureSet changes the list of features in the cluster. The default is empty. Be very careful adjusting this setting.
- // Turning on or off features may cause irreversible changes in your cluster which cannot be undone.
- // +unionDiscriminator
- // +optional
- FeatureSet FeatureSet `json:"featureSet,omitempty"`
-
- // customNoUpgrade allows the enabling or disabling of any feature. Turning this feature set on IS NOT SUPPORTED, CANNOT BE UNDONE, and PREVENTS UPGRADES.
- // Because of its nature, this setting cannot be validated. If you have any typos or accidentally apply invalid combinations
- // your cluster may fail in an unrecoverable way. featureSet must equal "CustomNoUpgrade" must be set to use this field.
- // +optional
- // +nullable
- CustomNoUpgrade *CustomFeatureGates `json:"customNoUpgrade,omitempty"`
-}
-
-type CustomFeatureGates struct {
- // enabled is a list of all feature gates that you want to force on
- // +optional
- Enabled []string `json:"enabled,omitempty"`
- // disabled is a list of all feature gates that you want to force off
- // +optional
- Disabled []string `json:"disabled,omitempty"`
-}
-
-type FeatureGateStatus struct {
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-type FeatureGateList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata"`
-
- Items []FeatureGate `json:"items"`
-}
-
-type FeatureGateEnabledDisabled struct {
- Enabled []string
- Disabled []string
-}
-
-// FeatureSets Contains a map of Feature names to Enabled/Disabled Feature.
-//
-// NOTE: The caller needs to make sure to check for the existence of the value
-// using golang's existence field. A possible scenario is an upgrade where new
-// FeatureSets are added and a controller has not been upgraded with a newer
-// version of this file. In this upgrade scenario the map could return nil.
-//
-// example:
-// if featureSet, ok := FeatureSets["SomeNewFeature"]; ok { }
-//
-// If you put an item in either of these lists, put your area and name on it so we can find owners.
-var FeatureSets = map[FeatureSet]*FeatureGateEnabledDisabled{
- Default: defaultFeatures,
- CustomNoUpgrade: {
- Enabled: []string{},
- Disabled: []string{},
- },
- TechPreviewNoUpgrade: newDefaultFeatures().toFeatures(),
- LatencySensitive: newDefaultFeatures().
- with(
- "TopologyManager", // sig-pod, sjenning
- ).
- toFeatures(),
-}
-
-var defaultFeatures = &FeatureGateEnabledDisabled{
- Enabled: []string{
- "RotateKubeletServerCertificate", // sig-pod, sjenning
- "SupportPodPidsLimit", // sig-pod, sjenning
- "NodeDisruptionExclusion", // sig-scheduling, ccoleman
- "ServiceNodeExclusion", // sig-scheduling, ccoleman
- "SCTPSupport", // sig-network, ccallend
- },
- Disabled: []string{
- "LegacyNodeRoleBehavior", // sig-scheduling, ccoleman
- },
-}
-
-type featureSetBuilder struct {
- forceOn []string
- forceOff []string
-}
-
-func newDefaultFeatures() *featureSetBuilder {
- return &featureSetBuilder{}
-}
-
-func (f *featureSetBuilder) with(forceOn ...string) *featureSetBuilder {
- f.forceOn = append(f.forceOn, forceOn...)
- return f
-}
-
-func (f *featureSetBuilder) without(forceOff ...string) *featureSetBuilder {
- f.forceOff = append(f.forceOff, forceOff...)
- return f
-}
-
-func (f *featureSetBuilder) isForcedOff(needle string) bool {
- for _, forcedOff := range f.forceOff {
- if needle == forcedOff {
- return true
- }
- }
- return false
-}
-
-func (f *featureSetBuilder) isForcedOn(needle string) bool {
- for _, forceOn := range f.forceOn {
- if needle == forceOn {
- return true
- }
- }
- return false
-}
-
-func (f *featureSetBuilder) toFeatures() *FeatureGateEnabledDisabled {
- finalOn := []string{}
- finalOff := []string{}
-
- // only add the default enabled features if they haven't been explicitly set off
- for _, defaultOn := range defaultFeatures.Enabled {
- if !f.isForcedOff(defaultOn) {
- finalOn = append(finalOn, defaultOn)
- }
- }
- for _, currOn := range f.forceOn {
- if f.isForcedOff(currOn) {
- panic("coding error, you can't have features both on and off")
- }
- finalOn = append(finalOn, currOn)
- }
-
- // only add the default disabled features if they haven't been explicitly set on
- for _, defaultOff := range defaultFeatures.Disabled {
- if !f.isForcedOn(defaultOff) {
- finalOff = append(finalOff, defaultOff)
- }
- }
- for _, currOff := range f.forceOff {
- finalOff = append(finalOff, currOff)
- }
-
- return &FeatureGateEnabledDisabled{
- Enabled: finalOn,
- Disabled: finalOff,
- }
-}
diff --git a/vendor/github.com/openshift/api/config/v1/types_image.go b/vendor/github.com/openshift/api/config/v1/types_image.go
deleted file mode 100644
index bf594c1b7..000000000
--- a/vendor/github.com/openshift/api/config/v1/types_image.go
+++ /dev/null
@@ -1,115 +0,0 @@
-package v1
-
-import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
-// +genclient
-// +genclient:nonNamespaced
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// Image governs policies related to imagestream imports and runtime configuration
-// for external registries. It allows cluster admins to configure which registries
-// OpenShift is allowed to import images from, extra CA trust bundles for external
-// registries, and policies to blacklist/whitelist registry hostnames.
-// When exposing OpenShift's image registry to the public, this also lets cluster
-// admins specify the external hostname.
-type Image struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata,omitempty"`
-
- // spec holds user settable values for configuration
- // +kubebuilder:validation:Required
- // +required
- Spec ImageSpec `json:"spec"`
- // status holds observed values from the cluster. They may not be overridden.
- // +optional
- Status ImageStatus `json:"status"`
-}
-
-type ImageSpec struct {
- // allowedRegistriesForImport limits the container image registries that normal users may import
- // images from. Set this list to the registries that you trust to contain valid Docker
- // images and that you want applications to be able to import from. Users with
- // permission to create Images or ImageStreamMappings via the API are not affected by
- // this policy - typically only administrators or system integrations will have those
- // permissions.
- // +optional
- AllowedRegistriesForImport []RegistryLocation `json:"allowedRegistriesForImport,omitempty"`
-
- // externalRegistryHostnames provides the hostnames for the default external image
- // registry. The external hostname should be set only when the image registry
- // is exposed externally. The first value is used in 'publicDockerImageRepository'
- // field in ImageStreams. The value must be in "hostname[:port]" format.
- // +optional
- ExternalRegistryHostnames []string `json:"externalRegistryHostnames,omitempty"`
-
- // additionalTrustedCA is a reference to a ConfigMap containing additional CAs that
- // should be trusted during imagestream import, pod image pull, build image pull, and
- // imageregistry pullthrough.
- // The namespace for this config map is openshift-config.
- // +optional
- AdditionalTrustedCA ConfigMapNameReference `json:"additionalTrustedCA"`
-
- // registrySources contains configuration that determines how the container runtime
- // should treat individual registries when accessing images for builds+pods. (e.g.
- // whether or not to allow insecure access). It does not contain configuration for the
- // internal cluster registry.
- // +optional
- RegistrySources RegistrySources `json:"registrySources"`
-}
-
-type ImageStatus struct {
-
- // internalRegistryHostname sets the hostname for the default internal image
- // registry. The value must be in "hostname[:port]" format.
- // This value is set by the image registry operator which controls the internal registry
- // hostname. For backward compatibility, users can still use OPENSHIFT_DEFAULT_REGISTRY
- // environment variable but this setting overrides the environment variable.
- // +optional
- InternalRegistryHostname string `json:"internalRegistryHostname,omitempty"`
-
- // externalRegistryHostnames provides the hostnames for the default external image
- // registry. The external hostname should be set only when the image registry
- // is exposed externally. The first value is used in 'publicDockerImageRepository'
- // field in ImageStreams. The value must be in "hostname[:port]" format.
- // +optional
- ExternalRegistryHostnames []string `json:"externalRegistryHostnames,omitempty"`
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-type ImageList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata"`
-
- Items []Image `json:"items"`
-}
-
-// RegistryLocation contains a location of the registry specified by the registry domain
-// name. The domain name might include wildcards, like '*' or '??'.
-type RegistryLocation struct {
- // domainName specifies a domain name for the registry
- // In case the registry use non-standard (80 or 443) port, the port should be included
- // in the domain name as well.
- DomainName string `json:"domainName"`
- // insecure indicates whether the registry is secure (https) or insecure (http)
- // By default (if not specified) the registry is assumed as secure.
- // +optional
- Insecure bool `json:"insecure,omitempty"`
-}
-
-// RegistrySources holds cluster-wide information about how to handle the registries config.
-type RegistrySources struct {
- // insecureRegistries are registries which do not have a valid TLS certificates or only support HTTP connections.
- // +optional
- InsecureRegistries []string `json:"insecureRegistries,omitempty"`
- // blockedRegistries are blacklisted from image pull/push. All other registries are allowed.
- //
- // Only one of BlockedRegistries or AllowedRegistries may be set.
- // +optional
- BlockedRegistries []string `json:"blockedRegistries,omitempty"`
- // allowedRegistries are whitelisted for image pull/push. All other registries are blocked.
- //
- // Only one of BlockedRegistries or AllowedRegistries may be set.
- // +optional
- AllowedRegistries []string `json:"allowedRegistries,omitempty"`
-}
diff --git a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go
deleted file mode 100644
index ac1e5048e..000000000
--- a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go
+++ /dev/null
@@ -1,241 +0,0 @@
-package v1
-
-import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
-// +genclient
-// +genclient:nonNamespaced
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// Infrastructure holds cluster-wide information about Infrastructure. The canonical name is `cluster`
-type Infrastructure struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata,omitempty"`
-
- // spec holds user settable values for configuration
- // +kubebuilder:validation:Required
- // +required
- Spec InfrastructureSpec `json:"spec"`
- // status holds observed values from the cluster. They may not be overridden.
- // +optional
- Status InfrastructureStatus `json:"status"`
-}
-
-// InfrastructureSpec contains settings that apply to the cluster infrastructure.
-type InfrastructureSpec struct {
- // cloudConfig is a reference to a ConfigMap containing the cloud provider configuration file.
- // This configuration file is used to configure the Kubernetes cloud provider integration
- // when using the built-in cloud provider integration or the external cloud controller manager.
- // The namespace for this config map is openshift-config.
- // +optional
- CloudConfig ConfigMapFileReference `json:"cloudConfig"`
-}
-
-// InfrastructureStatus describes the infrastructure the cluster is leveraging.
-type InfrastructureStatus struct {
- // infrastructureName uniquely identifies a cluster with a human friendly name.
- // Once set it should not be changed. Must be of max length 27 and must have only
- // alphanumeric or hyphen characters.
- InfrastructureName string `json:"infrastructureName"`
-
- // platform is the underlying infrastructure provider for the cluster.
- //
- // Deprecated: Use platformStatus.type instead.
- Platform PlatformType `json:"platform,omitempty"`
-
- // platformStatus holds status information specific to the underlying
- // infrastructure provider.
- // +optional
- PlatformStatus *PlatformStatus `json:"platformStatus,omitempty"`
-
- // etcdDiscoveryDomain is the domain used to fetch the SRV records for discovering
- // etcd servers and clients.
- // For more info: https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery
- EtcdDiscoveryDomain string `json:"etcdDiscoveryDomain"`
-
- // apiServerURL is a valid URI with scheme(http/https), address and
- // port. apiServerURL can be used by components like the web console
- // to tell users where to find the Kubernetes API.
- APIServerURL string `json:"apiServerURL"`
-
- // apiServerInternalURL is a valid URI with scheme(http/https),
- // address and port. apiServerInternalURL can be used by components
- // like kubelets, to contact the Kubernetes API server using the
- // infrastructure provider rather than Kubernetes networking.
- APIServerInternalURL string `json:"apiServerInternalURI"`
-}
-
-// PlatformType is a specific supported infrastructure provider.
-type PlatformType string
-
-const (
- // AWSPlatformType represents Amazon Web Services infrastructure.
- AWSPlatformType PlatformType = "AWS"
-
- // AzurePlatformType represents Microsoft Azure infrastructure.
- AzurePlatformType PlatformType = "Azure"
-
- // BareMetalPlatformType represents managed bare metal infrastructure.
- BareMetalPlatformType PlatformType = "BareMetal"
-
- // GCPPlatformType represents Google Cloud Platform infrastructure.
- GCPPlatformType PlatformType = "GCP"
-
- // LibvirtPlatformType represents libvirt infrastructure.
- LibvirtPlatformType PlatformType = "Libvirt"
-
- // OpenStackPlatformType represents OpenStack infrastructure.
- OpenStackPlatformType PlatformType = "OpenStack"
-
- // NonePlatformType means there is no infrastructure provider.
- NonePlatformType PlatformType = "None"
-
- // VSpherePlatformType represents VMWare vSphere infrastructure.
- VSpherePlatformType PlatformType = "VSphere"
-
- // OvirtPlatformType represents oVirt/RHV infrastructure.
- OvirtPlatformType PlatformType = "oVirt"
-)
-
-// PlatformStatus holds the current status specific to the underlying infrastructure provider
-// of the current cluster. Since these are used at status-level for the underlying cluster, it
-// is supposed that only one of the status structs is set.
-type PlatformStatus struct {
- // type is the underlying infrastructure provider for the cluster. This
- // value controls whether infrastructure automation such as service load
- // balancers, dynamic volume provisioning, machine creation and deletion, and
- // other integrations are enabled. If None, no infrastructure automation is
- // enabled. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "Libvirt",
- // "OpenStack", "VSphere", "oVirt", and "None". Individual components may not support
- // all platforms, and must handle unrecognized platforms as None if they do
- // not support that platform.
- Type PlatformType `json:"type"`
-
- // AWS contains settings specific to the Amazon Web Services infrastructure provider.
- // +optional
- AWS *AWSPlatformStatus `json:"aws,omitempty"`
-
- // Azure contains settings specific to the Azure infrastructure provider.
- // +optional
- Azure *AzurePlatformStatus `json:"azure,omitempty"`
-
- // GCP contains settings specific to the Google Cloud Platform infrastructure provider.
- // +optional
- GCP *GCPPlatformStatus `json:"gcp,omitempty"`
-
- // BareMetal contains settings specific to the BareMetal platform.
- // +optional
- BareMetal *BareMetalPlatformStatus `json:"baremetal,omitempty"`
-
- // OpenStack contains settings specific to the OpenStack infrastructure provider.
- // +optional
- OpenStack *OpenStackPlatformStatus `json:"openstack,omitempty"`
-
- // Ovirt contains settings specific to the oVirt infrastructure provider.
- // +optional
- Ovirt *OvirtPlatformStatus `json:"ovirt,omitempty"`
-}
-
-// AWSPlatformStatus holds the current status of the Amazon Web Services infrastructure provider.
-type AWSPlatformStatus struct {
- // region holds the default AWS region for new AWS resources created by the cluster.
- Region string `json:"region"`
-}
-
-// AzurePlatformStatus holds the current status of the Azure infrastructure provider.
-type AzurePlatformStatus struct {
- // resourceGroupName is the Resource Group for new Azure resources created for the cluster.
- ResourceGroupName string `json:"resourceGroupName"`
-
- // networkResourceGroupName is the Resource Group for network resources like the Virtual Network and Subnets used by the cluster.
- // If empty, the value is same as ResourceGroupName.
- // +optional
- NetworkResourceGroupName string `json:"networkResourceGroupName,omitempty"`
-}
-
-// GCPPlatformStatus holds the current status of the Google Cloud Platform infrastructure provider.
-type GCPPlatformStatus struct {
- // resourceGroupName is the Project ID for new GCP resources created for the cluster.
- ProjectID string `json:"projectID"`
-
- // region holds the region for new GCP resources created for the cluster.
- Region string `json:"region"`
-}
-
-// BareMetalPlatformStatus holds the current status of the BareMetal infrastructure provider.
-// For more information about the network architecture used with the BareMetal platform type, see:
-// https://github.com/openshift/installer/blob/master/docs/design/baremetal/networking-infrastructure.md
-type BareMetalPlatformStatus struct {
- // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used
- // by components inside the cluster, like kubelets using the infrastructure rather
- // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI
- // points to. It is the IP for a self-hosted load balancer in front of the API servers.
- APIServerInternalIP string `json:"apiServerInternalIP,omitempty"`
-
- // ingressIP is an external IP which routes to the default ingress controller.
- // The IP is a suitable target of a wildcard DNS record used to resolve default route host names.
- IngressIP string `json:"ingressIP,omitempty"`
-
- // nodeDNSIP is the IP address for the internal DNS used by the
- // nodes. Unlike the one managed by the DNS operator, `NodeDNSIP`
- // provides name resolution for the nodes themselves. There is no DNS-as-a-service for
- // BareMetal deployments. In order to minimize necessary changes to the
- // datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames
- // to the nodes in the cluster.
- NodeDNSIP string `json:"nodeDNSIP,omitempty"`
-}
-
-// OpenStackPlatformStatus holds the current status of the OpenStack infrastructure provider.
-type OpenStackPlatformStatus struct {
- // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used
- // by components inside the cluster, like kubelets using the infrastructure rather
- // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI
- // points to. It is the IP for a self-hosted load balancer in front of the API servers.
- APIServerInternalIP string `json:"apiServerInternalIP,omitempty"`
-
- // cloudName is the name of the desired OpenStack cloud in the
- // client configuration file (`clouds.yaml`).
- CloudName string `json:"cloudName,omitempty"`
-
- // ingressIP is an external IP which routes to the default ingress controller.
- // The IP is a suitable target of a wildcard DNS record used to resolve default route host names.
- IngressIP string `json:"ingressIP,omitempty"`
-
- // nodeDNSIP is the IP address for the internal DNS used by the
- // nodes. Unlike the one managed by the DNS operator, `NodeDNSIP`
- // provides name resolution for the nodes themselves. There is no DNS-as-a-service for
- // OpenStack deployments. In order to minimize necessary changes to the
- // datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames
- // to the nodes in the cluster.
- NodeDNSIP string `json:"nodeDNSIP,omitempty"`
-}
-
-// OvirtPlatformStatus holds the current status of the oVirt infrastructure provider.
-type OvirtPlatformStatus struct {
- // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used
- // by components inside the cluster, like kubelets using the infrastructure rather
- // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI
- // points to. It is the IP for a self-hosted load balancer in front of the API servers.
- APIServerInternalIP string `json:"apiServerInternalIP,omitempty"`
-
- // ingressIP is an external IP which routes to the default ingress controller.
- // The IP is a suitable target of a wildcard DNS record used to resolve default route host names.
- IngressIP string `json:"ingressIP,omitempty"`
-
- // nodeDNSIP is the IP address for the internal DNS used by the
- // nodes. Unlike the one managed by the DNS operator, `NodeDNSIP`
- // provides name resolution for the nodes themselves. There is no DNS-as-a-service for
- // oVirt deployments. In order to minimize necessary changes to the
- // datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames
- // to the nodes in the cluster.
- NodeDNSIP string `json:"nodeDNSIP,omitempty"`
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// InfrastructureList is
-type InfrastructureList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata"`
-
- Items []Infrastructure `json:"items"`
-}
diff --git a/vendor/github.com/openshift/api/config/v1/types_ingress.go b/vendor/github.com/openshift/api/config/v1/types_ingress.go
deleted file mode 100644
index 0216919ad..000000000
--- a/vendor/github.com/openshift/api/config/v1/types_ingress.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package v1
-
-import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
-// +genclient
-// +genclient:nonNamespaced
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// Ingress holds cluster-wide information about ingress, including the default ingress domain
-// used for routes. The canonical name is `cluster`.
-type Ingress struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata,omitempty"`
-
- // spec holds user settable values for configuration
- // +kubebuilder:validation:Required
- // +required
- Spec IngressSpec `json:"spec"`
- // status holds observed values from the cluster. They may not be overridden.
- // +optional
- Status IngressStatus `json:"status"`
-}
-
-type IngressSpec struct {
- // domain is used to generate a default host name for a route when the
- // route's host name is empty. The generated host name will follow this
- // pattern: "<route-name>.<route-namespace>.<domain>".
- //
- // It is also used as the default wildcard domain suffix for ingress. The
- // default ingresscontroller domain will follow this pattern: "*.<domain>".
- //
- // Once set, changing domain is not currently supported.
- Domain string `json:"domain"`
-}
-
-type IngressStatus struct {
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-type IngressList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata"`
-
- Items []Ingress `json:"items"`
-}
diff --git a/vendor/github.com/openshift/api/config/v1/types_network.go b/vendor/github.com/openshift/api/config/v1/types_network.go
deleted file mode 100644
index a09c5fe8e..000000000
--- a/vendor/github.com/openshift/api/config/v1/types_network.go
+++ /dev/null
@@ -1,122 +0,0 @@
-package v1
-
-import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
-// +genclient
-// +genclient:nonNamespaced
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// Network holds cluster-wide information about Network. The canonical name is `cluster`. It is used to configure the desired network configuration, such as: IP address pools for services/pod IPs, network plugin, etc.
-// Please view network.spec for an explanation on what applies when configuring this resource.
-type Network struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata,omitempty"`
-
- // spec holds user settable values for configuration.
- // As a general rule, this SHOULD NOT be read directly. Instead, you should
- // consume the NetworkStatus, as it indicates the currently deployed configuration.
- // Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each.
- // +kubebuilder:validation:Required
- // +required
- Spec NetworkSpec `json:"spec"`
- // status holds observed values from the cluster. They may not be overridden.
- // +optional
- Status NetworkStatus `json:"status"`
-}
-
-// NetworkSpec is the desired network configuration.
-// As a general rule, this SHOULD NOT be read directly. Instead, you should
-// consume the NetworkStatus, as it indicates the currently deployed configuration.
-// Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each.
-type NetworkSpec struct {
- // IP address pool to use for pod IPs.
- // This field is immutable after installation.
- ClusterNetwork []ClusterNetworkEntry `json:"clusterNetwork"`
-
- // IP address pool for services.
- // Currently, we only support a single entry here.
- // This field is immutable after installation.
- ServiceNetwork []string `json:"serviceNetwork"`
-
- // NetworkType is the plugin that is to be deployed (e.g. OpenShiftSDN).
- // This should match a value that the cluster-network-operator understands,
- // or else no networking will be installed.
- // Currently supported values are:
- // - OpenShiftSDN
- // This field is immutable after installation.
- NetworkType string `json:"networkType"`
-
- // externalIP defines configuration for controllers that
- // affect Service.ExternalIP. If nil, then ExternalIP is
- // not allowed to be set.
- // +optional
- ExternalIP *ExternalIPConfig `json:"externalIP,omitempty"`
-}
-
-// NetworkStatus is the current network configuration.
-type NetworkStatus struct {
- // IP address pool to use for pod IPs.
- ClusterNetwork []ClusterNetworkEntry `json:"clusterNetwork,omitempty"`
-
- // IP address pool for services.
- // Currently, we only support a single entry here.
- ServiceNetwork []string `json:"serviceNetwork,omitempty"`
-
- // NetworkType is the plugin that is deployed (e.g. OpenShiftSDN).
- NetworkType string `json:"networkType,omitempty"`
-
- // ClusterNetworkMTU is the MTU for inter-pod networking.
- ClusterNetworkMTU int `json:"clusterNetworkMTU,omitempty"`
-}
-
-// ClusterNetworkEntry is a contiguous block of IP addresses from which pod IPs
-// are allocated.
-type ClusterNetworkEntry struct {
- // The complete block for pod IPs.
- CIDR string `json:"cidr"`
-
- // The size (prefix) of block to allocate to each node.
- // +kubebuilder:validation:Minimum=0
- HostPrefix uint32 `json:"hostPrefix"`
-}
-
-// ExternalIPConfig specifies some IP blocks relevant for the ExternalIP field
-// of a Service resource.
-type ExternalIPConfig struct {
- // policy is a set of restrictions applied to the ExternalIP field.
- // If nil or empty, then ExternalIP is not allowed to be set.
- // +optional
- Policy *ExternalIPPolicy `json:"policy,omitempty"`
-
- // autoAssignCIDRs is a list of CIDRs from which to automatically assign
- // Service.ExternalIP. These are assigned when the service is of type
- // LoadBalancer. In general, this is only useful for bare-metal clusters.
- // In Openshift 3.x, this was misleadingly called "IngressIPs".
- // Automatically assigned External IPs are not affected by any
- // ExternalIPPolicy rules.
- // Currently, only one entry may be provided.
- // +optional
- AutoAssignCIDRs []string `json:"autoAssignCIDRs,omitempty"`
-}
-
-// ExternalIPPolicy configures exactly which IPs are allowed for the ExternalIP
-// field in a Service. If the zero struct is supplied, then none are permitted.
-// The policy controller always allows automatically assigned external IPs.
-type ExternalIPPolicy struct {
- // allowedCIDRs is the list of allowed CIDRs.
- AllowedCIDRs []string `json:"allowedCIDRs,omitempty"`
-
- // rejectedCIDRs is the list of disallowed CIDRs. These take precedence
- // over allowedCIDRs.
- // +optional
- RejectedCIDRs []string `json:"rejectedCIDRs,omitempty"`
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-type NetworkList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata"`
-
- Items []Network `json:"items"`
-}
diff --git a/vendor/github.com/openshift/api/config/v1/types_oauth.go b/vendor/github.com/openshift/api/config/v1/types_oauth.go
deleted file mode 100644
index 15bc5b1c1..000000000
--- a/vendor/github.com/openshift/api/config/v1/types_oauth.go
+++ /dev/null
@@ -1,557 +0,0 @@
-package v1
-
-import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
-// OAuth Server and Identity Provider Config
-
-// +genclient
-// +genclient:nonNamespaced
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// OAuth holds cluster-wide information about OAuth. The canonical name is `cluster`.
-// It is used to configure the integrated OAuth server.
-// This configuration is only honored when the top level Authentication config has type set to IntegratedOAuth.
-type OAuth struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata"`
-
- // +kubebuilder:validation:Required
- // +required
- Spec OAuthSpec `json:"spec"`
- // +optional
- Status OAuthStatus `json:"status"`
-}
-
-// OAuthSpec contains desired cluster auth configuration
-type OAuthSpec struct {
- // identityProviders is an ordered list of ways for a user to identify themselves.
- // When this list is empty, no identities are provisioned for users.
- // +optional
- IdentityProviders []IdentityProvider `json:"identityProviders,omitempty"`
-
- // tokenConfig contains options for authorization and access tokens
- TokenConfig TokenConfig `json:"tokenConfig"`
-
- // templates allow you to customize pages like the login page.
- // +optional
- Templates OAuthTemplates `json:"templates"`
-}
-
-// OAuthStatus shows current known state of OAuth server in the cluster
-type OAuthStatus struct {
- // TODO Fill in with status of identityProviders and templates (and maybe tokenConfig)
-}
-
-// TokenConfig holds the necessary configuration options for authorization and access tokens
-type TokenConfig struct {
- // accessTokenMaxAgeSeconds defines the maximum age of access tokens
- AccessTokenMaxAgeSeconds int32 `json:"accessTokenMaxAgeSeconds"`
-
- // accessTokenInactivityTimeoutSeconds defines the default token
- // inactivity timeout for tokens granted by any client.
- // The value represents the maximum amount of time that can occur between
- // consecutive uses of the token. Tokens become invalid if they are not
- // used within this temporal window. The user will need to acquire a new
- // token to regain access once a token times out.
- // Valid values are integer values:
- // x < 0 Tokens time out is enabled but tokens never timeout unless configured per client (e.g. `-1`)
- // x = 0 Tokens time out is disabled (default)
- // x > 0 Tokens time out if there is no activity for x seconds
- // The current minimum allowed value for X is 300 (5 minutes)
- // +optional
- AccessTokenInactivityTimeoutSeconds int32 `json:"accessTokenInactivityTimeoutSeconds,omitempty"`
-}
-
-const (
- // LoginTemplateKey is the key of the login template in a secret
- LoginTemplateKey = "login.html"
-
- // ProviderSelectionTemplateKey is the key for the provider selection template in a secret
- ProviderSelectionTemplateKey = "providers.html"
-
- // ErrorsTemplateKey is the key for the errors template in a secret
- ErrorsTemplateKey = "errors.html"
-
- // BindPasswordKey is the key for the LDAP bind password in a secret
- BindPasswordKey = "bindPassword"
-
- // ClientSecretKey is the key for the oauth client secret data in a secret
- ClientSecretKey = "clientSecret"
-
- // HTPasswdDataKey is the key for the htpasswd file data in a secret
- HTPasswdDataKey = "htpasswd"
-)
-
-// OAuthTemplates allow for customization of pages like the login page
-type OAuthTemplates struct {
- // login is the name of a secret that specifies a go template to use to render the login page.
- // The key "login.html" is used to locate the template data.
- // If specified and the secret or expected key is not found, the default login page is used.
- // If the specified template is not valid, the default login page is used.
- // If unspecified, the default login page is used.
- // The namespace for this secret is openshift-config.
- // +optional
- Login SecretNameReference `json:"login"`
-
- // providerSelection is the name of a secret that specifies a go template to use to render
- // the provider selection page.
- // The key "providers.html" is used to locate the template data.
- // If specified and the secret or expected key is not found, the default provider selection page is used.
- // If the specified template is not valid, the default provider selection page is used.
- // If unspecified, the default provider selection page is used.
- // The namespace for this secret is openshift-config.
- // +optional
- ProviderSelection SecretNameReference `json:"providerSelection"`
-
- // error is the name of a secret that specifies a go template to use to render error pages
- // during the authentication or grant flow.
- // The key "errors.html" is used to locate the template data.
- // If specified and the secret or expected key is not found, the default error page is used.
- // If the specified template is not valid, the default error page is used.
- // If unspecified, the default error page is used.
- // The namespace for this secret is openshift-config.
- // +optional
- Error SecretNameReference `json:"error"`
-}
-
-// IdentityProvider provides identities for users authenticating using credentials
-type IdentityProvider struct {
- // name is used to qualify the identities returned by this provider.
- // - It MUST be unique and not shared by any other identity provider used
- // - It MUST be a valid path segment: name cannot equal "." or ".." or contain "/" or "%" or ":"
- // Ref: https://godoc.org/github.com/openshift/origin/pkg/user/apis/user/validation#ValidateIdentityProviderName
- Name string `json:"name"`
-
- // mappingMethod determines how identities from this provider are mapped to users
- // Defaults to "claim"
- // +optional
- MappingMethod MappingMethodType `json:"mappingMethod,omitempty"`
-
- IdentityProviderConfig `json:",inline"`
-}
-
-// MappingMethodType specifies how new identities should be mapped to users when they log in
-type MappingMethodType string
-
-const (
- // MappingMethodClaim provisions a user with the identity’s preferred user name. Fails if a user
- // with that user name is already mapped to another identity.
- // Default.
- MappingMethodClaim MappingMethodType = "claim"
-
- // MappingMethodLookup looks up existing users already mapped to an identity but does not
- // automatically provision users or identities. Requires identities and users be set up
- // manually or using an external process.
- MappingMethodLookup MappingMethodType = "lookup"
-
- // MappingMethodAdd provisions a user with the identity’s preferred user name. If a user with
- // that user name already exists, the identity is mapped to the existing user, adding to any
- // existing identity mappings for the user.
- MappingMethodAdd MappingMethodType = "add"
-)
-
-type IdentityProviderType string
-
-const (
- // IdentityProviderTypeBasicAuth provides identities for users authenticating with HTTP Basic Auth
- IdentityProviderTypeBasicAuth IdentityProviderType = "BasicAuth"
-
- // IdentityProviderTypeGitHub provides identities for users authenticating using GitHub credentials
- IdentityProviderTypeGitHub IdentityProviderType = "GitHub"
-
- // IdentityProviderTypeGitLab provides identities for users authenticating using GitLab credentials
- IdentityProviderTypeGitLab IdentityProviderType = "GitLab"
-
- // IdentityProviderTypeGoogle provides identities for users authenticating using Google credentials
- IdentityProviderTypeGoogle IdentityProviderType = "Google"
-
- // IdentityProviderTypeHTPasswd provides identities from an HTPasswd file
- IdentityProviderTypeHTPasswd IdentityProviderType = "HTPasswd"
-
- // IdentityProviderTypeKeystone provides identitities for users authenticating using keystone password credentials
- IdentityProviderTypeKeystone IdentityProviderType = "Keystone"
-
- // IdentityProviderTypeLDAP provides identities for users authenticating using LDAP credentials
- IdentityProviderTypeLDAP IdentityProviderType = "LDAP"
-
- // IdentityProviderTypeOpenID provides identities for users authenticating using OpenID credentials
- IdentityProviderTypeOpenID IdentityProviderType = "OpenID"
-
- // IdentityProviderTypeRequestHeader provides identities for users authenticating using request header credentials
- IdentityProviderTypeRequestHeader IdentityProviderType = "RequestHeader"
-)
-
-// IdentityProviderConfig contains configuration for using a specific identity provider
-type IdentityProviderConfig struct {
- // type identifies the identity provider type for this entry.
- Type IdentityProviderType `json:"type"`
-
- // Provider-specific configuration
- // The json tag MUST match the `Type` specified above, case-insensitively
- // e.g. For `Type: "LDAP"`, the `ldap` configuration should be provided
-
- // basicAuth contains configuration options for the BasicAuth IdP
- // +optional
- BasicAuth *BasicAuthIdentityProvider `json:"basicAuth,omitempty"`
-
- // github enables user authentication using GitHub credentials
- // +optional
- GitHub *GitHubIdentityProvider `json:"github,omitempty"`
-
- // gitlab enables user authentication using GitLab credentials
- // +optional
- GitLab *GitLabIdentityProvider `json:"gitlab,omitempty"`
-
- // google enables user authentication using Google credentials
- // +optional
- Google *GoogleIdentityProvider `json:"google,omitempty"`
-
- // htpasswd enables user authentication using an HTPasswd file to validate credentials
- // +optional
- HTPasswd *HTPasswdIdentityProvider `json:"htpasswd,omitempty"`
-
- // keystone enables user authentication using keystone password credentials
- // +optional
- Keystone *KeystoneIdentityProvider `json:"keystone,omitempty"`
-
- // ldap enables user authentication using LDAP credentials
- // +optional
- LDAP *LDAPIdentityProvider `json:"ldap,omitempty"`
-
- // openID enables user authentication using OpenID credentials
- // +optional
- OpenID *OpenIDIdentityProvider `json:"openID,omitempty"`
-
- // requestHeader enables user authentication using request header credentials
- // +optional
- RequestHeader *RequestHeaderIdentityProvider `json:"requestHeader,omitempty"`
-}
-
-// BasicAuthPasswordIdentityProvider provides identities for users authenticating using HTTP basic auth credentials
-type BasicAuthIdentityProvider struct {
- // OAuthRemoteConnectionInfo contains information about how to connect to the external basic auth server
- OAuthRemoteConnectionInfo `json:",inline"`
-}
-
-// OAuthRemoteConnectionInfo holds information necessary for establishing a remote connection
-type OAuthRemoteConnectionInfo struct {
- // url is the remote URL to connect to
- URL string `json:"url"`
-
- // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle.
- // It is used as a trust anchor to validate the TLS certificate presented by the remote server.
- // The key "ca.crt" is used to locate the data.
- // If specified and the config map or expected key is not found, the identity provider is not honored.
- // If the specified ca data is not valid, the identity provider is not honored.
- // If empty, the default system roots are used.
- // The namespace for this config map is openshift-config.
- // +optional
- CA ConfigMapNameReference `json:"ca"`
-
- // tlsClientCert is an optional reference to a secret by name that contains the
- // PEM-encoded TLS client certificate to present when connecting to the server.
- // The key "tls.crt" is used to locate the data.
- // If specified and the secret or expected key is not found, the identity provider is not honored.
- // If the specified certificate data is not valid, the identity provider is not honored.
- // The namespace for this secret is openshift-config.
- // +optional
- TLSClientCert SecretNameReference `json:"tlsClientCert"`
-
- // tlsClientKey is an optional reference to a secret by name that contains the
- // PEM-encoded TLS private key for the client certificate referenced in tlsClientCert.
- // The key "tls.key" is used to locate the data.
- // If specified and the secret or expected key is not found, the identity provider is not honored.
- // If the specified certificate data is not valid, the identity provider is not honored.
- // The namespace for this secret is openshift-config.
- // +optional
- TLSClientKey SecretNameReference `json:"tlsClientKey"`
-}
-
-// HTPasswdPasswordIdentityProvider provides identities for users authenticating using htpasswd credentials
-type HTPasswdIdentityProvider struct {
- // fileData is a required reference to a secret by name containing the data to use as the htpasswd file.
- // The key "htpasswd" is used to locate the data.
- // If the secret or expected key is not found, the identity provider is not honored.
- // If the specified htpasswd data is not valid, the identity provider is not honored.
- // The namespace for this secret is openshift-config.
- FileData SecretNameReference `json:"fileData"`
-}
-
-// LDAPPasswordIdentityProvider provides identities for users authenticating using LDAP credentials
-type LDAPIdentityProvider struct {
- // url is an RFC 2255 URL which specifies the LDAP search parameters to use.
- // The syntax of the URL is:
- // ldap://host:port/basedn?attribute?scope?filter
- URL string `json:"url"`
-
- // bindDN is an optional DN to bind with during the search phase.
- // +optional
- BindDN string `json:"bindDN"`
-
- // bindPassword is an optional reference to a secret by name
- // containing a password to bind with during the search phase.
- // The key "bindPassword" is used to locate the data.
- // If specified and the secret or expected key is not found, the identity provider is not honored.
- // The namespace for this secret is openshift-config.
- // +optional
- BindPassword SecretNameReference `json:"bindPassword"`
-
- // insecure, if true, indicates the connection should not use TLS
- // WARNING: Should not be set to `true` with the URL scheme "ldaps://" as "ldaps://" URLs always
- // attempt to connect using TLS, even when `insecure` is set to `true`
- // When `true`, "ldap://" URLS connect insecurely. When `false`, "ldap://" URLs are upgraded to
- // a TLS connection using StartTLS as specified in https://tools.ietf.org/html/rfc2830.
- Insecure bool `json:"insecure"`
-
- // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle.
- // It is used as a trust anchor to validate the TLS certificate presented by the remote server.
- // The key "ca.crt" is used to locate the data.
- // If specified and the config map or expected key is not found, the identity provider is not honored.
- // If the specified ca data is not valid, the identity provider is not honored.
- // If empty, the default system roots are used.
- // The namespace for this config map is openshift-config.
- // +optional
- CA ConfigMapNameReference `json:"ca"`
-
- // attributes maps LDAP attributes to identities
- Attributes LDAPAttributeMapping `json:"attributes"`
-}
-
-// LDAPAttributeMapping maps LDAP attributes to OpenShift identity fields
-type LDAPAttributeMapping struct {
- // id is the list of attributes whose values should be used as the user ID. Required.
- // First non-empty attribute is used. At least one attribute is required. If none of the listed
- // attribute have a value, authentication fails.
- // LDAP standard identity attribute is "dn"
- ID []string `json:"id"`
-
- // preferredUsername is the list of attributes whose values should be used as the preferred username.
- // LDAP standard login attribute is "uid"
- // +optional
- PreferredUsername []string `json:"preferredUsername,omitempty"`
-
- // name is the list of attributes whose values should be used as the display name. Optional.
- // If unspecified, no display name is set for the identity
- // LDAP standard display name attribute is "cn"
- // +optional
- Name []string `json:"name,omitempty"`
-
- // email is the list of attributes whose values should be used as the email address. Optional.
- // If unspecified, no email is set for the identity
- // +optional
- Email []string `json:"email,omitempty"`
-}
-
-// KeystonePasswordIdentityProvider provides identities for users authenticating using keystone password credentials
-type KeystoneIdentityProvider struct {
- // OAuthRemoteConnectionInfo contains information about how to connect to the keystone server
- OAuthRemoteConnectionInfo `json:",inline"`
-
- // domainName is required for keystone v3
- DomainName string `json:"domainName"`
-
- // TODO if we ever add support for 3.11 to 4.0 upgrades, add this configuration
- // useUsernameIdentity indicates that users should be authenticated by username, not keystone ID
- // DEPRECATED - only use this option for legacy systems to ensure backwards compatibility
- // +optional
- // UseUsernameIdentity bool `json:"useUsernameIdentity"`
-}
-
-// RequestHeaderIdentityProvider provides identities for users authenticating using request header credentials
-type RequestHeaderIdentityProvider struct {
- // loginURL is a URL to redirect unauthenticated /authorize requests to
- // Unauthenticated requests from OAuth clients which expect interactive logins will be redirected here
- // ${url} is replaced with the current URL, escaped to be safe in a query parameter
- // https://www.example.com/sso-login?then=${url}
- // ${query} is replaced with the current query string
- // https://www.example.com/auth-proxy/oauth/authorize?${query}
- // Required when login is set to true.
- LoginURL string `json:"loginURL"`
-
- // challengeURL is a URL to redirect unauthenticated /authorize requests to
- // Unauthenticated requests from OAuth clients which expect WWW-Authenticate challenges will be
- // redirected here.
- // ${url} is replaced with the current URL, escaped to be safe in a query parameter
- // https://www.example.com/sso-login?then=${url}
- // ${query} is replaced with the current query string
- // https://www.example.com/auth-proxy/oauth/authorize?${query}
- // Required when challenge is set to true.
- ChallengeURL string `json:"challengeURL"`
-
- // ca is a required reference to a config map by name containing the PEM-encoded CA bundle.
- // It is used as a trust anchor to validate the TLS certificate presented by the remote server.
- // Specifically, it allows verification of incoming requests to prevent header spoofing.
- // The key "ca.crt" is used to locate the data.
- // If the config map or expected key is not found, the identity provider is not honored.
- // If the specified ca data is not valid, the identity provider is not honored.
- // The namespace for this config map is openshift-config.
- ClientCA ConfigMapNameReference `json:"ca"`
-
- // clientCommonNames is an optional list of common names to require a match from. If empty, any
- // client certificate validated against the clientCA bundle is considered authoritative.
- // +optional
- ClientCommonNames []string `json:"clientCommonNames,omitempty"`
-
- // headers is the set of headers to check for identity information
- Headers []string `json:"headers"`
-
- // preferredUsernameHeaders is the set of headers to check for the preferred username
- PreferredUsernameHeaders []string `json:"preferredUsernameHeaders"`
-
- // nameHeaders is the set of headers to check for the display name
- NameHeaders []string `json:"nameHeaders"`
-
- // emailHeaders is the set of headers to check for the email address
- EmailHeaders []string `json:"emailHeaders"`
-}
-
-// GitHubIdentityProvider provides identities for users authenticating using GitHub credentials
-type GitHubIdentityProvider struct {
- // clientID is the oauth client ID
- ClientID string `json:"clientID"`
-
- // clientSecret is a required reference to the secret by name containing the oauth client secret.
- // The key "clientSecret" is used to locate the data.
- // If the secret or expected key is not found, the identity provider is not honored.
- // The namespace for this secret is openshift-config.
- ClientSecret SecretNameReference `json:"clientSecret"`
-
- // organizations optionally restricts which organizations are allowed to log in
- // +optional
- Organizations []string `json:"organizations,omitempty"`
-
- // teams optionally restricts which teams are allowed to log in. Format is <org>/<team>.
- // +optional
- Teams []string `json:"teams,omitempty"`
-
- // hostname is the optional domain (e.g. "mycompany.com") for use with a hosted instance of
- // GitHub Enterprise.
- // It must match the GitHub Enterprise settings value configured at /setup/settings#hostname.
- // +optional
- Hostname string `json:"hostname"`
-
- // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle.
- // It is used as a trust anchor to validate the TLS certificate presented by the remote server.
- // The key "ca.crt" is used to locate the data.
- // If specified and the config map or expected key is not found, the identity provider is not honored.
- // If the specified ca data is not valid, the identity provider is not honored.
- // If empty, the default system roots are used.
- // This can only be configured when hostname is set to a non-empty value.
- // The namespace for this config map is openshift-config.
- // +optional
- CA ConfigMapNameReference `json:"ca"`
-}
-
-// GitLabIdentityProvider provides identities for users authenticating using GitLab credentials
-type GitLabIdentityProvider struct {
- // clientID is the oauth client ID
- ClientID string `json:"clientID"`
-
- // clientSecret is a required reference to the secret by name containing the oauth client secret.
- // The key "clientSecret" is used to locate the data.
- // If the secret or expected key is not found, the identity provider is not honored.
- // The namespace for this secret is openshift-config.
- ClientSecret SecretNameReference `json:"clientSecret"`
-
- // url is the oauth server base URL
- URL string `json:"url"`
-
- // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle.
- // It is used as a trust anchor to validate the TLS certificate presented by the remote server.
- // The key "ca.crt" is used to locate the data.
- // If specified and the config map or expected key is not found, the identity provider is not honored.
- // If the specified ca data is not valid, the identity provider is not honored.
- // If empty, the default system roots are used.
- // The namespace for this config map is openshift-config.
- // +optional
- CA ConfigMapNameReference `json:"ca"`
-}
-
-// GoogleIdentityProvider provides identities for users authenticating using Google credentials
-type GoogleIdentityProvider struct {
- // clientID is the oauth client ID
- ClientID string `json:"clientID"`
-
- // clientSecret is a required reference to the secret by name containing the oauth client secret.
- // The key "clientSecret" is used to locate the data.
- // If the secret or expected key is not found, the identity provider is not honored.
- // The namespace for this secret is openshift-config.
- ClientSecret SecretNameReference `json:"clientSecret"`
-
- // hostedDomain is the optional Google App domain (e.g. "mycompany.com") to restrict logins to
- // +optional
- HostedDomain string `json:"hostedDomain"`
-}
-
-// OpenIDIdentityProvider provides identities for users authenticating using OpenID credentials
-type OpenIDIdentityProvider struct {
- // clientID is the oauth client ID
- ClientID string `json:"clientID"`
-
- // clientSecret is a required reference to the secret by name containing the oauth client secret.
- // The key "clientSecret" is used to locate the data.
- // If the secret or expected key is not found, the identity provider is not honored.
- // The namespace for this secret is openshift-config.
- ClientSecret SecretNameReference `json:"clientSecret"`
-
- // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle.
- // It is used as a trust anchor to validate the TLS certificate presented by the remote server.
- // The key "ca.crt" is used to locate the data.
- // If specified and the config map or expected key is not found, the identity provider is not honored.
- // If the specified ca data is not valid, the identity provider is not honored.
- // If empty, the default system roots are used.
- // The namespace for this config map is openshift-config.
- // +optional
- CA ConfigMapNameReference `json:"ca"`
-
- // extraScopes are any scopes to request in addition to the standard "openid" scope.
- // +optional
- ExtraScopes []string `json:"extraScopes,omitempty"`
-
- // extraAuthorizeParameters are any custom parameters to add to the authorize request.
- // +optional
- ExtraAuthorizeParameters map[string]string `json:"extraAuthorizeParameters,omitempty"`
-
- // issuer is the URL that the OpenID Provider asserts as its Issuer Identifier.
- // It must use the https scheme with no query or fragment component.
- Issuer string `json:"issuer"`
-
- // claims mappings
- Claims OpenIDClaims `json:"claims"`
-}
-
-// UserIDClaim is the claim used to provide a stable identifier for OIDC identities.
-// Per http://openid.net/specs/openid-connect-core-1_0.html#ClaimStability
-// "The sub (subject) and iss (issuer) Claims, used together, are the only Claims that an RP can
-// rely upon as a stable identifier for the End-User, since the sub Claim MUST be locally unique
-// and never reassigned within the Issuer for a particular End-User, as described in Section 2.
-// Therefore, the only guaranteed unique identifier for a given End-User is the combination of the
-// iss Claim and the sub Claim."
-const UserIDClaim = "sub"
-
-// OpenIDClaims contains a list of OpenID claims to use when authenticating with an OpenID identity provider
-type OpenIDClaims struct {
- // preferredUsername is the list of claims whose values should be used as the preferred username.
- // If unspecified, the preferred username is determined from the value of the sub claim
- // +optional
- PreferredUsername []string `json:"preferredUsername,omitempty"`
-
- // name is the list of claims whose values should be used as the display name. Optional.
- // If unspecified, no display name is set for the identity
- // +optional
- Name []string `json:"name,omitempty"`
-
- // email is the list of claims whose values should be used as the email address. Optional.
- // If unspecified, no email is set for the identity
- // +optional
- Email []string `json:"email,omitempty"`
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-type OAuthList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata"`
-
- Items []OAuth `json:"items"`
-}
diff --git a/vendor/github.com/openshift/api/config/v1/types_operatorhub.go b/vendor/github.com/openshift/api/config/v1/types_operatorhub.go
deleted file mode 100644
index 1d998bf37..000000000
--- a/vendor/github.com/openshift/api/config/v1/types_operatorhub.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package v1
-
-import (
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-// OperatorHubSpec defines the desired state of OperatorHub
-type OperatorHubSpec struct {
- // disableAllDefaultSources allows you to disable all the default hub
- // sources. If this is true, a specific entry in sources can be used to
- // enable a default source. If this is false, a specific entry in
- // sources can be used to disable or enable a default source.
- // +optional
- DisableAllDefaultSources bool `json:"disableAllDefaultSources,omitempty"`
- // sources is the list of default hub sources and their configuration.
- // If the list is empty, it implies that the default hub sources are
- // enabled on the cluster unless disableAllDefaultSources is true.
- // If disableAllDefaultSources is true and sources is not empty,
- // the configuration present in sources will take precedence. The list of
- // default hub sources and their current state will always be reflected in
- // the status block.
- // +optional
- Sources []HubSource `json:"sources,omitempty"`
-}
-
-// OperatorHubStatus defines the observed state of OperatorHub. The current
-// state of the default hub sources will always be reflected here.
-type OperatorHubStatus struct {
- // sources encapsulates the result of applying the configuration for each
- // hub source
- Sources []HubSourceStatus `json:"sources,omitempty"`
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// OperatorHub is the Schema for the operatorhubs API. It can be used to change
-// the state of the default hub sources for OperatorHub on the cluster from
-// enabled to disabled and vice versa.
-// +kubebuilder:subresource:status
-// +genclient:nonNamespaced
-type OperatorHub struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata"`
-
- Spec OperatorHubSpec `json:"spec"`
- Status OperatorHubStatus `json:"status"`
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// OperatorHubList contains a list of OperatorHub
-type OperatorHubList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata"`
- Items []OperatorHub `json:"items"`
-}
-
-// HubSource is used to specify the hub source and its configuration
-type HubSource struct {
- // name is the name of one of the default hub sources
- // +kubebuilder:validation:MaxLength=253
- // +kubebuilder:validation:MinLength=1
- // +kubebuilder:Required
- Name string `json:"name"`
- // disabled is used to disable a default hub source on cluster
- // +kubebuilder:Required
- Disabled bool `json:"disabled"`
-}
-
-// HubSourceStatus is used to reflect the current state of applying the
-// configuration to a default source
-type HubSourceStatus struct {
- HubSource `json:",omitempty"`
- // status indicates success or failure in applying the configuration
- Status string `json:"status,omitempty"`
- // message provides more information regarding failures
- Message string `json:"message,omitempty"`
-}
diff --git a/vendor/github.com/openshift/api/config/v1/types_project.go b/vendor/github.com/openshift/api/config/v1/types_project.go
deleted file mode 100644
index 244ce3ef8..000000000
--- a/vendor/github.com/openshift/api/config/v1/types_project.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package v1
-
-import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
-// +genclient
-// +genclient:nonNamespaced
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// Project holds cluster-wide information about Project. The canonical name is `cluster`
-type Project struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata,omitempty"`
-
- // spec holds user settable values for configuration
- // +kubebuilder:validation:Required
- // +required
- Spec ProjectSpec `json:"spec"`
- // status holds observed values from the cluster. They may not be overridden.
- // +optional
- Status ProjectStatus `json:"status"`
-}
-
-// TemplateReference references a template in a specific namespace.
-// The namespace must be specified at the point of use.
-type TemplateReference struct {
- // name is the metadata.name of the referenced project request template
- Name string `json:"name"`
-}
-
-// ProjectSpec holds the project creation configuration.
-type ProjectSpec struct {
- // projectRequestMessage is the string presented to a user if they are unable to request a project via the projectrequest api endpoint
- // +optional
- ProjectRequestMessage string `json:"projectRequestMessage"`
-
- // projectRequestTemplate is the template to use for creating projects in response to projectrequest.
- // This must point to a template in 'openshift-config' namespace. It is optional.
- // If it is not specified, a default template is used.
- //
- // +optional
- ProjectRequestTemplate TemplateReference `json:"projectRequestTemplate"`
-}
-
-type ProjectStatus struct {
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-type ProjectList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata"`
-
- Items []Project `json:"items"`
-}
diff --git a/vendor/github.com/openshift/api/config/v1/types_proxy.go b/vendor/github.com/openshift/api/config/v1/types_proxy.go
deleted file mode 100644
index 93f4c487e..000000000
--- a/vendor/github.com/openshift/api/config/v1/types_proxy.go
+++ /dev/null
@@ -1,90 +0,0 @@
-package v1
-
-import (
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-// +genclient
-// +genclient:nonNamespaced
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// Proxy holds cluster-wide information on how to configure default proxies for the cluster. The canonical name is `cluster`
-type Proxy struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata,omitempty"`
-
- // Spec holds user-settable values for the proxy configuration
- // +kubebuilder:validation:Required
- // +required
- Spec ProxySpec `json:"spec"`
- // status holds observed values from the cluster. They may not be overridden.
- // +optional
- Status ProxyStatus `json:"status"`
-}
-
-// ProxySpec contains cluster proxy creation configuration.
-type ProxySpec struct {
- // httpProxy is the URL of the proxy for HTTP requests. Empty means unset and will not result in an env var.
- // +optional
- HTTPProxy string `json:"httpProxy,omitempty"`
-
- // httpsProxy is the URL of the proxy for HTTPS requests. Empty means unset and will not result in an env var.
- // +optional
- HTTPSProxy string `json:"httpsProxy,omitempty"`
-
- // noProxy is a comma-separated list of hostnames and/or CIDRs for which the proxy should not be used.
- // Empty means unset and will not result in an env var.
- // +optional
- NoProxy string `json:"noProxy,omitempty"`
-
- // readinessEndpoints is a list of endpoints used to verify readiness of the proxy.
- // +optional
- ReadinessEndpoints []string `json:"readinessEndpoints,omitempty"`
-
- // trustedCA is a reference to a ConfigMap containing a CA certificate bundle used
- // for client egress HTTPS connections. The certificate bundle must be from the CA
- // that signed the proxy's certificate and be signed for everything. The trustedCA
- // field should only be consumed by a proxy validator. The validator is responsible
- // for reading the certificate bundle from required key "ca-bundle.crt" and copying
- // it to a ConfigMap named "trusted-ca-bundle" in the "openshift-config-managed"
- // namespace. The namespace for the ConfigMap referenced by trustedCA is
- // "openshift-config". Here is an example ConfigMap (in yaml):
- //
- // apiVersion: v1
- // kind: ConfigMap
- // metadata:
- // name: user-ca-bundle
- // namespace: openshift-config
- // data:
- // ca-bundle.crt: |
- // -----BEGIN CERTIFICATE-----
- // Custom CA certificate bundle.
- // -----END CERTIFICATE-----
- //
- // +optional
- TrustedCA ConfigMapNameReference `json:"trustedCA,omitempty"`
-}
-
-// ProxyStatus shows current known state of the cluster proxy.
-type ProxyStatus struct {
- // httpProxy is the URL of the proxy for HTTP requests.
- // +optional
- HTTPProxy string `json:"httpProxy,omitempty"`
-
- // httpsProxy is the URL of the proxy for HTTPS requests.
- // +optional
- HTTPSProxy string `json:"httpsProxy,omitempty"`
-
- // noProxy is a comma-separated list of hostnames and/or CIDRs for which the proxy should not be used.
- // +optional
- NoProxy string `json:"noProxy,omitempty"`
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-type ProxyList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata"`
-
- Items []Proxy `json:"items"`
-}
diff --git a/vendor/github.com/openshift/api/config/v1/types_scheduling.go b/vendor/github.com/openshift/api/config/v1/types_scheduling.go
deleted file mode 100644
index d5bf0c362..000000000
--- a/vendor/github.com/openshift/api/config/v1/types_scheduling.go
+++ /dev/null
@@ -1,74 +0,0 @@
-package v1
-
-import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
-// +genclient
-// +genclient:nonNamespaced
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// Scheduler holds cluster-wide config information to run the Kubernetes Scheduler
-// and influence its placement decisions. The canonical name for this config is `cluster`.
-type Scheduler struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata,omitempty"`
-
- // spec holds user settable values for configuration
- // +kubebuilder:validation:Required
- // +required
- Spec SchedulerSpec `json:"spec"`
- // status holds observed values from the cluster. They may not be overridden.
- // +optional
- Status SchedulerStatus `json:"status"`
-}
-
-type SchedulerSpec struct {
- // policy is a reference to a ConfigMap containing scheduler policy which has
- // user specified predicates and priorities. If this ConfigMap is not available
- // scheduler will default to use DefaultAlgorithmProvider.
- // The namespace for this configmap is openshift-config.
- // +optional
- Policy ConfigMapNameReference `json:"policy"`
- // defaultNodeSelector helps set the cluster-wide default node selector to
- // restrict pod placement to specific nodes. This is applied to the pods
- // created in all namespaces without a specified nodeSelector value.
- // For example,
- // defaultNodeSelector: "type=user-node,region=east" would set nodeSelector
- // field in pod spec to "type=user-node,region=east" to all pods created
- // in all namespaces. Namespaces having project-wide node selectors won't be
- // impacted even if this field is set. This adds an annotation section to
- // the namespace.
- // For example, if a new namespace is created with
- // node-selector='type=user-node,region=east',
- // the annotation openshift.io/node-selector: type=user-node,region=east
- // gets added to the project. When the openshift.io/node-selector annotation
- // is set on the project the value is used in preference to the value we are setting
- // for defaultNodeSelector field.
- // For instance,
- // openshift.io/node-selector: "type=user-node,region=west" means
- // that the default of "type=user-node,region=east" set in defaultNodeSelector
- // would not be applied.
- // +optional
- DefaultNodeSelector string `json:"defaultNodeSelector,omitempty"`
- // MastersSchedulable allows masters nodes to be schedulable. When this flag is
- // turned on, all the master nodes in the cluster will be made schedulable,
- // so that workload pods can run on them. The default value for this field is false,
- // meaning none of the master nodes are schedulable.
- // Important Note: Once the workload pods start running on the master nodes,
- // extreme care must be taken to ensure that cluster-critical control plane components
- // are not impacted.
- // Please turn on this field after doing due diligence.
- // +optional
- MastersSchedulable bool `json:"mastersSchedulable"`
-}
-
-type SchedulerStatus struct {
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-type SchedulerList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata"`
-
- Items []Scheduler `json:"items"`
-}
diff --git a/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go b/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go
deleted file mode 100644
index ea788dc16..000000000
--- a/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go
+++ /dev/null
@@ -1,260 +0,0 @@
-package v1
-
-// TLSSecurityProfile defines the schema for a TLS security profile. This object
-// is used by operators to apply TLS security settings to operands.
-// +union
-type TLSSecurityProfile struct {
- // type is one of Old, Intermediate, Modern or Custom. Custom provides
- // the ability to specify individual TLS security profile parameters.
- // Old, Intermediate and Modern are TLS security profiles based on:
- //
- // https://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations
- //
- // The profiles are intent based, so they may change over time as new ciphers are developed and existing ciphers
- // are found to be insecure. Depending on precisely which ciphers are available to a process, the list may be
- // reduced.
- //
- // Note that the Modern profile is currently not supported because it is not
- // yet well adopted by common software libraries.
- //
- // +unionDiscriminator
- // +optional
- Type TLSProfileType `json:"type"`
- // old is a TLS security profile based on:
- //
- // https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility
- //
- // and looks like this (yaml):
- //
- // ciphers:
- // - TLS_AES_128_GCM_SHA256
- // - TLS_AES_256_GCM_SHA384
- // - TLS_CHACHA20_POLY1305_SHA256
- // - ECDHE-ECDSA-AES128-GCM-SHA256
- // - ECDHE-RSA-AES128-GCM-SHA256
- // - ECDHE-ECDSA-AES256-GCM-SHA384
- // - ECDHE-RSA-AES256-GCM-SHA384
- // - ECDHE-ECDSA-CHACHA20-POLY1305
- // - ECDHE-RSA-CHACHA20-POLY1305
- // - DHE-RSA-AES128-GCM-SHA256
- // - DHE-RSA-AES256-GCM-SHA384
- // - DHE-RSA-CHACHA20-POLY1305
- // - ECDHE-ECDSA-AES128-SHA256
- // - ECDHE-RSA-AES128-SHA256
- // - ECDHE-ECDSA-AES128-SHA
- // - ECDHE-RSA-AES128-SHA
- // - ECDHE-ECDSA-AES256-SHA384
- // - ECDHE-RSA-AES256-SHA384
- // - ECDHE-ECDSA-AES256-SHA
- // - ECDHE-RSA-AES256-SHA
- // - DHE-RSA-AES128-SHA256
- // - DHE-RSA-AES256-SHA256
- // - AES128-GCM-SHA256
- // - AES256-GCM-SHA384
- // - AES128-SHA256
- // - AES256-SHA256
- // - AES128-SHA
- // - AES256-SHA
- // - DES-CBC3-SHA
- // minTLSVersion: TLSv1.0
- //
- // +optional
- // +nullable
- Old *OldTLSProfile `json:"old,omitempty"`
- // intermediate is a TLS security profile based on:
- //
- // https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29
- //
- // and looks like this (yaml):
- //
- // ciphers:
- // - TLS_AES_128_GCM_SHA256
- // - TLS_AES_256_GCM_SHA384
- // - TLS_CHACHA20_POLY1305_SHA256
- // - ECDHE-ECDSA-AES128-GCM-SHA256
- // - ECDHE-RSA-AES128-GCM-SHA256
- // - ECDHE-ECDSA-AES256-GCM-SHA384
- // - ECDHE-RSA-AES256-GCM-SHA384
- // - ECDHE-ECDSA-CHACHA20-POLY1305
- // - ECDHE-RSA-CHACHA20-POLY1305
- // - DHE-RSA-AES128-GCM-SHA256
- // - DHE-RSA-AES256-GCM-SHA384
- // minTLSVersion: TLSv1.2
- //
- // +optional
- // +nullable
- Intermediate *IntermediateTLSProfile `json:"intermediate,omitempty"`
- // modern is a TLS security profile based on:
- //
- // https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility
- //
- // and looks like this (yaml):
- //
- // ciphers:
- // - TLS_AES_128_GCM_SHA256
- // - TLS_AES_256_GCM_SHA384
- // - TLS_CHACHA20_POLY1305_SHA256
- // minTLSVersion: TLSv1.3
- //
- // NOTE: Currently unsupported.
- //
- // +optional
- // +nullable
- Modern *ModernTLSProfile `json:"modern,omitempty"`
- // custom is a user-defined TLS security profile. Be extremely careful using a custom
- // profile as invalid configurations can be catastrophic. An example custom profile
- // looks like this:
- //
- // ciphers:
- // - ECDHE-ECDSA-CHACHA20-POLY1305
- // - ECDHE-RSA-CHACHA20-POLY1305
- // - ECDHE-RSA-AES128-GCM-SHA256
- // - ECDHE-ECDSA-AES128-GCM-SHA256
- // minTLSVersion: TLSv1.1
- //
- // +optional
- // +nullable
- Custom *CustomTLSProfile `json:"custom,omitempty"`
-}
-
-// OldTLSProfile is a TLS security profile based on:
-// https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility
-type OldTLSProfile struct{}
-
-// IntermediateTLSProfile is a TLS security profile based on:
-// https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28default.29
-type IntermediateTLSProfile struct{}
-
-// ModernTLSProfile is a TLS security profile based on:
-// https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility
-type ModernTLSProfile struct{}
-
-// CustomTLSProfile is a user-defined TLS security profile. Be extremely careful
-// using a custom TLS profile as invalid configurations can be catastrophic.
-type CustomTLSProfile struct {
- TLSProfileSpec `json:",inline"`
-}
-
-// TLSProfileType defines a TLS security profile type.
-type TLSProfileType string
-
-const (
- // Old is a TLS security profile based on:
- // https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility
- TLSProfileOldType TLSProfileType = "Old"
- // Intermediate is a TLS security profile based on:
- // https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28default.29
- TLSProfileIntermediateType TLSProfileType = "Intermediate"
- // Modern is a TLS security profile based on:
- // https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility
- TLSProfileModernType TLSProfileType = "Modern"
- // Custom is a TLS security profile that allows for user-defined parameters.
- TLSProfileCustomType TLSProfileType = "Custom"
-)
-
-// TLSProfileSpec is the desired behavior of a TLSSecurityProfile.
-type TLSProfileSpec struct {
- // ciphers is used to specify the cipher algorithms that are negotiated
- // during the TLS handshake. Operators may remove entries their operands
- // do not support. For example, to use DES-CBC3-SHA (yaml):
- //
- // ciphers:
- // - DES-CBC3-SHA
- //
- Ciphers []string `json:"ciphers"`
- // minTLSVersion is used to specify the minimal version of the TLS protocol
- // that is negotiated during the TLS handshake. For example, to use TLS
- // versions 1.1, 1.2 and 1.3 (yaml):
- //
- // minTLSVersion: TLSv1.1
- //
- // NOTE: currently the highest minTLSVersion allowed is VersionTLS12
- //
- MinTLSVersion TLSProtocolVersion `json:"minTLSVersion"`
-}
-
-// TLSProtocolVersion is a way to specify the protocol version used for TLS connections.
-// Protocol versions are based on the following most common TLS configurations:
-//
-// https://ssl-config.mozilla.org/
-//
-// Note that SSLv3.0 is not a supported protocol version due to well known
-// vulnerabilities such as POODLE: https://en.wikipedia.org/wiki/POODLE
-type TLSProtocolVersion string
-
-const (
- // VersionTLSv10 is version 1.0 of the TLS security protocol.
- VersionTLS10 TLSProtocolVersion = "VersionTLS10"
- // VersionTLSv11 is version 1.1 of the TLS security protocol.
- VersionTLS11 TLSProtocolVersion = "VersionTLS11"
- // VersionTLSv12 is version 1.2 of the TLS security protocol.
- VersionTLS12 TLSProtocolVersion = "VersionTLS12"
- // VersionTLSv13 is version 1.3 of the TLS security protocol.
- VersionTLS13 TLSProtocolVersion = "VersionTLS13"
-)
-
-// TLSProfiles Contains a map of TLSProfileType names to TLSProfileSpec.
-//
-// NOTE: The caller needs to make sure to check that these constants are valid for their binary. Not all
-// entries map to values for all binaries. In the case of ties, the kube-apiserver wins. Do not fail,
-// just be sure to whitelist only and everything will be ok.
-var TLSProfiles = map[TLSProfileType]*TLSProfileSpec{
- TLSProfileOldType: {
- Ciphers: []string{
- "TLS_AES_128_GCM_SHA256",
- "TLS_AES_256_GCM_SHA384",
- "TLS_CHACHA20_POLY1305_SHA256",
- "ECDHE-ECDSA-AES128-GCM-SHA256",
- "ECDHE-RSA-AES128-GCM-SHA256",
- "ECDHE-ECDSA-AES256-GCM-SHA384",
- "ECDHE-RSA-AES256-GCM-SHA384",
- "ECDHE-ECDSA-CHACHA20-POLY1305",
- "ECDHE-RSA-CHACHA20-POLY1305",
- "DHE-RSA-AES128-GCM-SHA256",
- "DHE-RSA-AES256-GCM-SHA384",
- "DHE-RSA-CHACHA20-POLY1305",
- "ECDHE-ECDSA-AES128-SHA256",
- "ECDHE-RSA-AES128-SHA256",
- "ECDHE-ECDSA-AES128-SHA",
- "ECDHE-RSA-AES128-SHA",
- "ECDHE-ECDSA-AES256-SHA384",
- "ECDHE-RSA-AES256-SHA384",
- "ECDHE-ECDSA-AES256-SHA",
- "ECDHE-RSA-AES256-SHA",
- "DHE-RSA-AES128-SHA256",
- "DHE-RSA-AES256-SHA256",
- "AES128-GCM-SHA256",
- "AES256-GCM-SHA384",
- "AES128-SHA256",
- "AES256-SHA256",
- "AES128-SHA",
- "AES256-SHA",
- "DES-CBC3-SHA",
- },
- MinTLSVersion: VersionTLS10,
- },
- TLSProfileIntermediateType: {
- Ciphers: []string{
- "TLS_AES_128_GCM_SHA256",
- "TLS_AES_256_GCM_SHA384",
- "TLS_CHACHA20_POLY1305_SHA256",
- "ECDHE-ECDSA-AES128-GCM-SHA256",
- "ECDHE-RSA-AES128-GCM-SHA256",
- "ECDHE-ECDSA-AES256-GCM-SHA384",
- "ECDHE-RSA-AES256-GCM-SHA384",
- "ECDHE-ECDSA-CHACHA20-POLY1305",
- "ECDHE-RSA-CHACHA20-POLY1305",
- "DHE-RSA-AES128-GCM-SHA256",
- "DHE-RSA-AES256-GCM-SHA384",
- },
- MinTLSVersion: VersionTLS12,
- },
- TLSProfileModernType: {
- Ciphers: []string{
- "TLS_AES_128_GCM_SHA256",
- "TLS_AES_256_GCM_SHA384",
- "TLS_CHACHA20_POLY1305_SHA256",
- },
- MinTLSVersion: VersionTLS13,
- },
-}
diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go
deleted file mode 100644
index 37888a939..000000000
--- a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go
+++ /dev/null
@@ -1,3365 +0,0 @@
-// +build !ignore_autogenerated
-
-// Code generated by deepcopy-gen. DO NOT EDIT.
-
-package v1
-
-import (
- corev1 "k8s.io/api/core/v1"
- runtime "k8s.io/apimachinery/pkg/runtime"
-)
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *APIServer) DeepCopyInto(out *APIServer) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- in.Spec.DeepCopyInto(&out.Spec)
- out.Status = in.Status
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServer.
-func (in *APIServer) DeepCopy() *APIServer {
- if in == nil {
- return nil
- }
- out := new(APIServer)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *APIServer) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *APIServerEncryption) DeepCopyInto(out *APIServerEncryption) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerEncryption.
-func (in *APIServerEncryption) DeepCopy() *APIServerEncryption {
- if in == nil {
- return nil
- }
- out := new(APIServerEncryption)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *APIServerList) DeepCopyInto(out *APIServerList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]APIServer, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerList.
-func (in *APIServerList) DeepCopy() *APIServerList {
- if in == nil {
- return nil
- }
- out := new(APIServerList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *APIServerList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *APIServerNamedServingCert) DeepCopyInto(out *APIServerNamedServingCert) {
- *out = *in
- if in.Names != nil {
- in, out := &in.Names, &out.Names
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- out.ServingCertificate = in.ServingCertificate
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerNamedServingCert.
-func (in *APIServerNamedServingCert) DeepCopy() *APIServerNamedServingCert {
- if in == nil {
- return nil
- }
- out := new(APIServerNamedServingCert)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *APIServerServingCerts) DeepCopyInto(out *APIServerServingCerts) {
- *out = *in
- if in.NamedCertificates != nil {
- in, out := &in.NamedCertificates, &out.NamedCertificates
- *out = make([]APIServerNamedServingCert, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerServingCerts.
-func (in *APIServerServingCerts) DeepCopy() *APIServerServingCerts {
- if in == nil {
- return nil
- }
- out := new(APIServerServingCerts)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *APIServerSpec) DeepCopyInto(out *APIServerSpec) {
- *out = *in
- in.ServingCerts.DeepCopyInto(&out.ServingCerts)
- out.ClientCA = in.ClientCA
- if in.AdditionalCORSAllowedOrigins != nil {
- in, out := &in.AdditionalCORSAllowedOrigins, &out.AdditionalCORSAllowedOrigins
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- out.Encryption = in.Encryption
- if in.TLSSecurityProfile != nil {
- in, out := &in.TLSSecurityProfile, &out.TLSSecurityProfile
- *out = new(TLSSecurityProfile)
- (*in).DeepCopyInto(*out)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerSpec.
-func (in *APIServerSpec) DeepCopy() *APIServerSpec {
- if in == nil {
- return nil
- }
- out := new(APIServerSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *APIServerStatus) DeepCopyInto(out *APIServerStatus) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerStatus.
-func (in *APIServerStatus) DeepCopy() *APIServerStatus {
- if in == nil {
- return nil
- }
- out := new(APIServerStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AWSPlatformStatus) DeepCopyInto(out *AWSPlatformStatus) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSPlatformStatus.
-func (in *AWSPlatformStatus) DeepCopy() *AWSPlatformStatus {
- if in == nil {
- return nil
- }
- out := new(AWSPlatformStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AdmissionConfig) DeepCopyInto(out *AdmissionConfig) {
- *out = *in
- if in.PluginConfig != nil {
- in, out := &in.PluginConfig, &out.PluginConfig
- *out = make(map[string]AdmissionPluginConfig, len(*in))
- for key, val := range *in {
- (*out)[key] = *val.DeepCopy()
- }
- }
- if in.EnabledAdmissionPlugins != nil {
- in, out := &in.EnabledAdmissionPlugins, &out.EnabledAdmissionPlugins
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.DisabledAdmissionPlugins != nil {
- in, out := &in.DisabledAdmissionPlugins, &out.DisabledAdmissionPlugins
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionConfig.
-func (in *AdmissionConfig) DeepCopy() *AdmissionConfig {
- if in == nil {
- return nil
- }
- out := new(AdmissionConfig)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AdmissionPluginConfig) DeepCopyInto(out *AdmissionPluginConfig) {
- *out = *in
- in.Configuration.DeepCopyInto(&out.Configuration)
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionPluginConfig.
-func (in *AdmissionPluginConfig) DeepCopy() *AdmissionPluginConfig {
- if in == nil {
- return nil
- }
- out := new(AdmissionPluginConfig)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AuditConfig) DeepCopyInto(out *AuditConfig) {
- *out = *in
- in.PolicyConfiguration.DeepCopyInto(&out.PolicyConfiguration)
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditConfig.
-func (in *AuditConfig) DeepCopy() *AuditConfig {
- if in == nil {
- return nil
- }
- out := new(AuditConfig)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Authentication) DeepCopyInto(out *Authentication) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- in.Spec.DeepCopyInto(&out.Spec)
- out.Status = in.Status
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Authentication.
-func (in *Authentication) DeepCopy() *Authentication {
- if in == nil {
- return nil
- }
- out := new(Authentication)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *Authentication) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AuthenticationList) DeepCopyInto(out *AuthenticationList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]Authentication, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationList.
-func (in *AuthenticationList) DeepCopy() *AuthenticationList {
- if in == nil {
- return nil
- }
- out := new(AuthenticationList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *AuthenticationList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AuthenticationSpec) DeepCopyInto(out *AuthenticationSpec) {
- *out = *in
- out.OAuthMetadata = in.OAuthMetadata
- if in.WebhookTokenAuthenticators != nil {
- in, out := &in.WebhookTokenAuthenticators, &out.WebhookTokenAuthenticators
- *out = make([]WebhookTokenAuthenticator, len(*in))
- copy(*out, *in)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationSpec.
-func (in *AuthenticationSpec) DeepCopy() *AuthenticationSpec {
- if in == nil {
- return nil
- }
- out := new(AuthenticationSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AuthenticationStatus) DeepCopyInto(out *AuthenticationStatus) {
- *out = *in
- out.IntegratedOAuthMetadata = in.IntegratedOAuthMetadata
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationStatus.
-func (in *AuthenticationStatus) DeepCopy() *AuthenticationStatus {
- if in == nil {
- return nil
- }
- out := new(AuthenticationStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AzurePlatformStatus) DeepCopyInto(out *AzurePlatformStatus) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzurePlatformStatus.
-func (in *AzurePlatformStatus) DeepCopy() *AzurePlatformStatus {
- if in == nil {
- return nil
- }
- out := new(AzurePlatformStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *BareMetalPlatformStatus) DeepCopyInto(out *BareMetalPlatformStatus) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BareMetalPlatformStatus.
-func (in *BareMetalPlatformStatus) DeepCopy() *BareMetalPlatformStatus {
- if in == nil {
- return nil
- }
- out := new(BareMetalPlatformStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *BasicAuthIdentityProvider) DeepCopyInto(out *BasicAuthIdentityProvider) {
- *out = *in
- out.OAuthRemoteConnectionInfo = in.OAuthRemoteConnectionInfo
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicAuthIdentityProvider.
-func (in *BasicAuthIdentityProvider) DeepCopy() *BasicAuthIdentityProvider {
- if in == nil {
- return nil
- }
- out := new(BasicAuthIdentityProvider)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Build) DeepCopyInto(out *Build) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- in.Spec.DeepCopyInto(&out.Spec)
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Build.
-func (in *Build) DeepCopy() *Build {
- if in == nil {
- return nil
- }
- out := new(Build)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *Build) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *BuildDefaults) DeepCopyInto(out *BuildDefaults) {
- *out = *in
- if in.DefaultProxy != nil {
- in, out := &in.DefaultProxy, &out.DefaultProxy
- *out = new(ProxySpec)
- (*in).DeepCopyInto(*out)
- }
- if in.GitProxy != nil {
- in, out := &in.GitProxy, &out.GitProxy
- *out = new(ProxySpec)
- (*in).DeepCopyInto(*out)
- }
- if in.Env != nil {
- in, out := &in.Env, &out.Env
- *out = make([]corev1.EnvVar, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- if in.ImageLabels != nil {
- in, out := &in.ImageLabels, &out.ImageLabels
- *out = make([]ImageLabel, len(*in))
- copy(*out, *in)
- }
- in.Resources.DeepCopyInto(&out.Resources)
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildDefaults.
-func (in *BuildDefaults) DeepCopy() *BuildDefaults {
- if in == nil {
- return nil
- }
- out := new(BuildDefaults)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *BuildList) DeepCopyInto(out *BuildList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]Build, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildList.
-func (in *BuildList) DeepCopy() *BuildList {
- if in == nil {
- return nil
- }
- out := new(BuildList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *BuildList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *BuildOverrides) DeepCopyInto(out *BuildOverrides) {
- *out = *in
- if in.ImageLabels != nil {
- in, out := &in.ImageLabels, &out.ImageLabels
- *out = make([]ImageLabel, len(*in))
- copy(*out, *in)
- }
- if in.NodeSelector != nil {
- in, out := &in.NodeSelector, &out.NodeSelector
- *out = make(map[string]string, len(*in))
- for key, val := range *in {
- (*out)[key] = val
- }
- }
- if in.Tolerations != nil {
- in, out := &in.Tolerations, &out.Tolerations
- *out = make([]corev1.Toleration, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildOverrides.
-func (in *BuildOverrides) DeepCopy() *BuildOverrides {
- if in == nil {
- return nil
- }
- out := new(BuildOverrides)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *BuildSpec) DeepCopyInto(out *BuildSpec) {
- *out = *in
- out.AdditionalTrustedCA = in.AdditionalTrustedCA
- in.BuildDefaults.DeepCopyInto(&out.BuildDefaults)
- in.BuildOverrides.DeepCopyInto(&out.BuildOverrides)
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildSpec.
-func (in *BuildSpec) DeepCopy() *BuildSpec {
- if in == nil {
- return nil
- }
- out := new(BuildSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *CertInfo) DeepCopyInto(out *CertInfo) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertInfo.
-func (in *CertInfo) DeepCopy() *CertInfo {
- if in == nil {
- return nil
- }
- out := new(CertInfo)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ClientConnectionOverrides) DeepCopyInto(out *ClientConnectionOverrides) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientConnectionOverrides.
-func (in *ClientConnectionOverrides) DeepCopy() *ClientConnectionOverrides {
- if in == nil {
- return nil
- }
- out := new(ClientConnectionOverrides)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ClusterNetworkEntry) DeepCopyInto(out *ClusterNetworkEntry) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetworkEntry.
-func (in *ClusterNetworkEntry) DeepCopy() *ClusterNetworkEntry {
- if in == nil {
- return nil
- }
- out := new(ClusterNetworkEntry)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ClusterOperator) DeepCopyInto(out *ClusterOperator) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- out.Spec = in.Spec
- in.Status.DeepCopyInto(&out.Status)
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOperator.
-func (in *ClusterOperator) DeepCopy() *ClusterOperator {
- if in == nil {
- return nil
- }
- out := new(ClusterOperator)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *ClusterOperator) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ClusterOperatorList) DeepCopyInto(out *ClusterOperatorList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]ClusterOperator, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOperatorList.
-func (in *ClusterOperatorList) DeepCopy() *ClusterOperatorList {
- if in == nil {
- return nil
- }
- out := new(ClusterOperatorList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *ClusterOperatorList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ClusterOperatorSpec) DeepCopyInto(out *ClusterOperatorSpec) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOperatorSpec.
-func (in *ClusterOperatorSpec) DeepCopy() *ClusterOperatorSpec {
- if in == nil {
- return nil
- }
- out := new(ClusterOperatorSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ClusterOperatorStatus) DeepCopyInto(out *ClusterOperatorStatus) {
- *out = *in
- if in.Conditions != nil {
- in, out := &in.Conditions, &out.Conditions
- *out = make([]ClusterOperatorStatusCondition, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- if in.Versions != nil {
- in, out := &in.Versions, &out.Versions
- *out = make([]OperandVersion, len(*in))
- copy(*out, *in)
- }
- if in.RelatedObjects != nil {
- in, out := &in.RelatedObjects, &out.RelatedObjects
- *out = make([]ObjectReference, len(*in))
- copy(*out, *in)
- }
- in.Extension.DeepCopyInto(&out.Extension)
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOperatorStatus.
-func (in *ClusterOperatorStatus) DeepCopy() *ClusterOperatorStatus {
- if in == nil {
- return nil
- }
- out := new(ClusterOperatorStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ClusterOperatorStatusCondition) DeepCopyInto(out *ClusterOperatorStatusCondition) {
- *out = *in
- in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOperatorStatusCondition.
-func (in *ClusterOperatorStatusCondition) DeepCopy() *ClusterOperatorStatusCondition {
- if in == nil {
- return nil
- }
- out := new(ClusterOperatorStatusCondition)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ClusterVersion) DeepCopyInto(out *ClusterVersion) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- in.Spec.DeepCopyInto(&out.Spec)
- in.Status.DeepCopyInto(&out.Status)
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersion.
-func (in *ClusterVersion) DeepCopy() *ClusterVersion {
- if in == nil {
- return nil
- }
- out := new(ClusterVersion)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *ClusterVersion) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ClusterVersionList) DeepCopyInto(out *ClusterVersionList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]ClusterVersion, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersionList.
-func (in *ClusterVersionList) DeepCopy() *ClusterVersionList {
- if in == nil {
- return nil
- }
- out := new(ClusterVersionList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *ClusterVersionList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ClusterVersionSpec) DeepCopyInto(out *ClusterVersionSpec) {
- *out = *in
- if in.DesiredUpdate != nil {
- in, out := &in.DesiredUpdate, &out.DesiredUpdate
- *out = new(Update)
- **out = **in
- }
- if in.Overrides != nil {
- in, out := &in.Overrides, &out.Overrides
- *out = make([]ComponentOverride, len(*in))
- copy(*out, *in)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersionSpec.
-func (in *ClusterVersionSpec) DeepCopy() *ClusterVersionSpec {
- if in == nil {
- return nil
- }
- out := new(ClusterVersionSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ClusterVersionStatus) DeepCopyInto(out *ClusterVersionStatus) {
- *out = *in
- out.Desired = in.Desired
- if in.History != nil {
- in, out := &in.History, &out.History
- *out = make([]UpdateHistory, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- if in.Conditions != nil {
- in, out := &in.Conditions, &out.Conditions
- *out = make([]ClusterOperatorStatusCondition, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- if in.AvailableUpdates != nil {
- in, out := &in.AvailableUpdates, &out.AvailableUpdates
- *out = make([]Update, len(*in))
- copy(*out, *in)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersionStatus.
-func (in *ClusterVersionStatus) DeepCopy() *ClusterVersionStatus {
- if in == nil {
- return nil
- }
- out := new(ClusterVersionStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ComponentOverride) DeepCopyInto(out *ComponentOverride) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentOverride.
-func (in *ComponentOverride) DeepCopy() *ComponentOverride {
- if in == nil {
- return nil
- }
- out := new(ComponentOverride)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ConfigMapFileReference) DeepCopyInto(out *ConfigMapFileReference) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapFileReference.
-func (in *ConfigMapFileReference) DeepCopy() *ConfigMapFileReference {
- if in == nil {
- return nil
- }
- out := new(ConfigMapFileReference)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ConfigMapNameReference) DeepCopyInto(out *ConfigMapNameReference) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapNameReference.
-func (in *ConfigMapNameReference) DeepCopy() *ConfigMapNameReference {
- if in == nil {
- return nil
- }
- out := new(ConfigMapNameReference)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Console) DeepCopyInto(out *Console) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- out.Spec = in.Spec
- out.Status = in.Status
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Console.
-func (in *Console) DeepCopy() *Console {
- if in == nil {
- return nil
- }
- out := new(Console)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *Console) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ConsoleAuthentication) DeepCopyInto(out *ConsoleAuthentication) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleAuthentication.
-func (in *ConsoleAuthentication) DeepCopy() *ConsoleAuthentication {
- if in == nil {
- return nil
- }
- out := new(ConsoleAuthentication)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ConsoleList) DeepCopyInto(out *ConsoleList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]Console, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleList.
-func (in *ConsoleList) DeepCopy() *ConsoleList {
- if in == nil {
- return nil
- }
- out := new(ConsoleList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *ConsoleList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ConsoleSpec) DeepCopyInto(out *ConsoleSpec) {
- *out = *in
- out.Authentication = in.Authentication
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleSpec.
-func (in *ConsoleSpec) DeepCopy() *ConsoleSpec {
- if in == nil {
- return nil
- }
- out := new(ConsoleSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ConsoleStatus) DeepCopyInto(out *ConsoleStatus) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleStatus.
-func (in *ConsoleStatus) DeepCopy() *ConsoleStatus {
- if in == nil {
- return nil
- }
- out := new(ConsoleStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *CustomFeatureGates) DeepCopyInto(out *CustomFeatureGates) {
- *out = *in
- if in.Enabled != nil {
- in, out := &in.Enabled, &out.Enabled
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.Disabled != nil {
- in, out := &in.Disabled, &out.Disabled
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomFeatureGates.
-func (in *CustomFeatureGates) DeepCopy() *CustomFeatureGates {
- if in == nil {
- return nil
- }
- out := new(CustomFeatureGates)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *CustomTLSProfile) DeepCopyInto(out *CustomTLSProfile) {
- *out = *in
- in.TLSProfileSpec.DeepCopyInto(&out.TLSProfileSpec)
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomTLSProfile.
-func (in *CustomTLSProfile) DeepCopy() *CustomTLSProfile {
- if in == nil {
- return nil
- }
- out := new(CustomTLSProfile)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *DNS) DeepCopyInto(out *DNS) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- in.Spec.DeepCopyInto(&out.Spec)
- out.Status = in.Status
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNS.
-func (in *DNS) DeepCopy() *DNS {
- if in == nil {
- return nil
- }
- out := new(DNS)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *DNS) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *DNSList) DeepCopyInto(out *DNSList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]DNS, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSList.
-func (in *DNSList) DeepCopy() *DNSList {
- if in == nil {
- return nil
- }
- out := new(DNSList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *DNSList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *DNSSpec) DeepCopyInto(out *DNSSpec) {
- *out = *in
- if in.PublicZone != nil {
- in, out := &in.PublicZone, &out.PublicZone
- *out = new(DNSZone)
- (*in).DeepCopyInto(*out)
- }
- if in.PrivateZone != nil {
- in, out := &in.PrivateZone, &out.PrivateZone
- *out = new(DNSZone)
- (*in).DeepCopyInto(*out)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSSpec.
-func (in *DNSSpec) DeepCopy() *DNSSpec {
- if in == nil {
- return nil
- }
- out := new(DNSSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *DNSStatus) DeepCopyInto(out *DNSStatus) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSStatus.
-func (in *DNSStatus) DeepCopy() *DNSStatus {
- if in == nil {
- return nil
- }
- out := new(DNSStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *DNSZone) DeepCopyInto(out *DNSZone) {
- *out = *in
- if in.Tags != nil {
- in, out := &in.Tags, &out.Tags
- *out = make(map[string]string, len(*in))
- for key, val := range *in {
- (*out)[key] = val
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSZone.
-func (in *DNSZone) DeepCopy() *DNSZone {
- if in == nil {
- return nil
- }
- out := new(DNSZone)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *DelegatedAuthentication) DeepCopyInto(out *DelegatedAuthentication) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DelegatedAuthentication.
-func (in *DelegatedAuthentication) DeepCopy() *DelegatedAuthentication {
- if in == nil {
- return nil
- }
- out := new(DelegatedAuthentication)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *DelegatedAuthorization) DeepCopyInto(out *DelegatedAuthorization) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DelegatedAuthorization.
-func (in *DelegatedAuthorization) DeepCopy() *DelegatedAuthorization {
- if in == nil {
- return nil
- }
- out := new(DelegatedAuthorization)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *EtcdConnectionInfo) DeepCopyInto(out *EtcdConnectionInfo) {
- *out = *in
- if in.URLs != nil {
- in, out := &in.URLs, &out.URLs
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- out.CertInfo = in.CertInfo
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdConnectionInfo.
-func (in *EtcdConnectionInfo) DeepCopy() *EtcdConnectionInfo {
- if in == nil {
- return nil
- }
- out := new(EtcdConnectionInfo)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *EtcdStorageConfig) DeepCopyInto(out *EtcdStorageConfig) {
- *out = *in
- in.EtcdConnectionInfo.DeepCopyInto(&out.EtcdConnectionInfo)
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdStorageConfig.
-func (in *EtcdStorageConfig) DeepCopy() *EtcdStorageConfig {
- if in == nil {
- return nil
- }
- out := new(EtcdStorageConfig)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ExternalIPConfig) DeepCopyInto(out *ExternalIPConfig) {
- *out = *in
- if in.Policy != nil {
- in, out := &in.Policy, &out.Policy
- *out = new(ExternalIPPolicy)
- (*in).DeepCopyInto(*out)
- }
- if in.AutoAssignCIDRs != nil {
- in, out := &in.AutoAssignCIDRs, &out.AutoAssignCIDRs
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalIPConfig.
-func (in *ExternalIPConfig) DeepCopy() *ExternalIPConfig {
- if in == nil {
- return nil
- }
- out := new(ExternalIPConfig)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ExternalIPPolicy) DeepCopyInto(out *ExternalIPPolicy) {
- *out = *in
- if in.AllowedCIDRs != nil {
- in, out := &in.AllowedCIDRs, &out.AllowedCIDRs
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.RejectedCIDRs != nil {
- in, out := &in.RejectedCIDRs, &out.RejectedCIDRs
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalIPPolicy.
-func (in *ExternalIPPolicy) DeepCopy() *ExternalIPPolicy {
- if in == nil {
- return nil
- }
- out := new(ExternalIPPolicy)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *FeatureGate) DeepCopyInto(out *FeatureGate) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- in.Spec.DeepCopyInto(&out.Spec)
- out.Status = in.Status
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGate.
-func (in *FeatureGate) DeepCopy() *FeatureGate {
- if in == nil {
- return nil
- }
- out := new(FeatureGate)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *FeatureGate) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *FeatureGateEnabledDisabled) DeepCopyInto(out *FeatureGateEnabledDisabled) {
- *out = *in
- if in.Enabled != nil {
- in, out := &in.Enabled, &out.Enabled
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.Disabled != nil {
- in, out := &in.Disabled, &out.Disabled
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateEnabledDisabled.
-func (in *FeatureGateEnabledDisabled) DeepCopy() *FeatureGateEnabledDisabled {
- if in == nil {
- return nil
- }
- out := new(FeatureGateEnabledDisabled)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *FeatureGateList) DeepCopyInto(out *FeatureGateList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]FeatureGate, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateList.
-func (in *FeatureGateList) DeepCopy() *FeatureGateList {
- if in == nil {
- return nil
- }
- out := new(FeatureGateList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *FeatureGateList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *FeatureGateSelection) DeepCopyInto(out *FeatureGateSelection) {
- *out = *in
- if in.CustomNoUpgrade != nil {
- in, out := &in.CustomNoUpgrade, &out.CustomNoUpgrade
- *out = new(CustomFeatureGates)
- (*in).DeepCopyInto(*out)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateSelection.
-func (in *FeatureGateSelection) DeepCopy() *FeatureGateSelection {
- if in == nil {
- return nil
- }
- out := new(FeatureGateSelection)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *FeatureGateSpec) DeepCopyInto(out *FeatureGateSpec) {
- *out = *in
- in.FeatureGateSelection.DeepCopyInto(&out.FeatureGateSelection)
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateSpec.
-func (in *FeatureGateSpec) DeepCopy() *FeatureGateSpec {
- if in == nil {
- return nil
- }
- out := new(FeatureGateSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *FeatureGateStatus) DeepCopyInto(out *FeatureGateStatus) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateStatus.
-func (in *FeatureGateStatus) DeepCopy() *FeatureGateStatus {
- if in == nil {
- return nil
- }
- out := new(FeatureGateStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *GCPPlatformStatus) DeepCopyInto(out *GCPPlatformStatus) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPPlatformStatus.
-func (in *GCPPlatformStatus) DeepCopy() *GCPPlatformStatus {
- if in == nil {
- return nil
- }
- out := new(GCPPlatformStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *GenericAPIServerConfig) DeepCopyInto(out *GenericAPIServerConfig) {
- *out = *in
- in.ServingInfo.DeepCopyInto(&out.ServingInfo)
- if in.CORSAllowedOrigins != nil {
- in, out := &in.CORSAllowedOrigins, &out.CORSAllowedOrigins
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- in.AuditConfig.DeepCopyInto(&out.AuditConfig)
- in.StorageConfig.DeepCopyInto(&out.StorageConfig)
- in.AdmissionConfig.DeepCopyInto(&out.AdmissionConfig)
- out.KubeClientConfig = in.KubeClientConfig
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericAPIServerConfig.
-func (in *GenericAPIServerConfig) DeepCopy() *GenericAPIServerConfig {
- if in == nil {
- return nil
- }
- out := new(GenericAPIServerConfig)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *GenericControllerConfig) DeepCopyInto(out *GenericControllerConfig) {
- *out = *in
- in.ServingInfo.DeepCopyInto(&out.ServingInfo)
- out.LeaderElection = in.LeaderElection
- out.Authentication = in.Authentication
- out.Authorization = in.Authorization
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericControllerConfig.
-func (in *GenericControllerConfig) DeepCopy() *GenericControllerConfig {
- if in == nil {
- return nil
- }
- out := new(GenericControllerConfig)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *GitHubIdentityProvider) DeepCopyInto(out *GitHubIdentityProvider) {
- *out = *in
- out.ClientSecret = in.ClientSecret
- if in.Organizations != nil {
- in, out := &in.Organizations, &out.Organizations
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.Teams != nil {
- in, out := &in.Teams, &out.Teams
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- out.CA = in.CA
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitHubIdentityProvider.
-func (in *GitHubIdentityProvider) DeepCopy() *GitHubIdentityProvider {
- if in == nil {
- return nil
- }
- out := new(GitHubIdentityProvider)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *GitLabIdentityProvider) DeepCopyInto(out *GitLabIdentityProvider) {
- *out = *in
- out.ClientSecret = in.ClientSecret
- out.CA = in.CA
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitLabIdentityProvider.
-func (in *GitLabIdentityProvider) DeepCopy() *GitLabIdentityProvider {
- if in == nil {
- return nil
- }
- out := new(GitLabIdentityProvider)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *GoogleIdentityProvider) DeepCopyInto(out *GoogleIdentityProvider) {
- *out = *in
- out.ClientSecret = in.ClientSecret
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GoogleIdentityProvider.
-func (in *GoogleIdentityProvider) DeepCopy() *GoogleIdentityProvider {
- if in == nil {
- return nil
- }
- out := new(GoogleIdentityProvider)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *HTPasswdIdentityProvider) DeepCopyInto(out *HTPasswdIdentityProvider) {
- *out = *in
- out.FileData = in.FileData
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTPasswdIdentityProvider.
-func (in *HTPasswdIdentityProvider) DeepCopy() *HTPasswdIdentityProvider {
- if in == nil {
- return nil
- }
- out := new(HTPasswdIdentityProvider)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *HTTPServingInfo) DeepCopyInto(out *HTTPServingInfo) {
- *out = *in
- in.ServingInfo.DeepCopyInto(&out.ServingInfo)
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPServingInfo.
-func (in *HTTPServingInfo) DeepCopy() *HTTPServingInfo {
- if in == nil {
- return nil
- }
- out := new(HTTPServingInfo)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *HubSource) DeepCopyInto(out *HubSource) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HubSource.
-func (in *HubSource) DeepCopy() *HubSource {
- if in == nil {
- return nil
- }
- out := new(HubSource)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *HubSourceStatus) DeepCopyInto(out *HubSourceStatus) {
- *out = *in
- out.HubSource = in.HubSource
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HubSourceStatus.
-func (in *HubSourceStatus) DeepCopy() *HubSourceStatus {
- if in == nil {
- return nil
- }
- out := new(HubSourceStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *IdentityProvider) DeepCopyInto(out *IdentityProvider) {
- *out = *in
- in.IdentityProviderConfig.DeepCopyInto(&out.IdentityProviderConfig)
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityProvider.
-func (in *IdentityProvider) DeepCopy() *IdentityProvider {
- if in == nil {
- return nil
- }
- out := new(IdentityProvider)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *IdentityProviderConfig) DeepCopyInto(out *IdentityProviderConfig) {
- *out = *in
- if in.BasicAuth != nil {
- in, out := &in.BasicAuth, &out.BasicAuth
- *out = new(BasicAuthIdentityProvider)
- **out = **in
- }
- if in.GitHub != nil {
- in, out := &in.GitHub, &out.GitHub
- *out = new(GitHubIdentityProvider)
- (*in).DeepCopyInto(*out)
- }
- if in.GitLab != nil {
- in, out := &in.GitLab, &out.GitLab
- *out = new(GitLabIdentityProvider)
- **out = **in
- }
- if in.Google != nil {
- in, out := &in.Google, &out.Google
- *out = new(GoogleIdentityProvider)
- **out = **in
- }
- if in.HTPasswd != nil {
- in, out := &in.HTPasswd, &out.HTPasswd
- *out = new(HTPasswdIdentityProvider)
- **out = **in
- }
- if in.Keystone != nil {
- in, out := &in.Keystone, &out.Keystone
- *out = new(KeystoneIdentityProvider)
- **out = **in
- }
- if in.LDAP != nil {
- in, out := &in.LDAP, &out.LDAP
- *out = new(LDAPIdentityProvider)
- (*in).DeepCopyInto(*out)
- }
- if in.OpenID != nil {
- in, out := &in.OpenID, &out.OpenID
- *out = new(OpenIDIdentityProvider)
- (*in).DeepCopyInto(*out)
- }
- if in.RequestHeader != nil {
- in, out := &in.RequestHeader, &out.RequestHeader
- *out = new(RequestHeaderIdentityProvider)
- (*in).DeepCopyInto(*out)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityProviderConfig.
-func (in *IdentityProviderConfig) DeepCopy() *IdentityProviderConfig {
- if in == nil {
- return nil
- }
- out := new(IdentityProviderConfig)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Image) DeepCopyInto(out *Image) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- in.Spec.DeepCopyInto(&out.Spec)
- in.Status.DeepCopyInto(&out.Status)
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Image.
-func (in *Image) DeepCopy() *Image {
- if in == nil {
- return nil
- }
- out := new(Image)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *Image) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ImageLabel) DeepCopyInto(out *ImageLabel) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageLabel.
-func (in *ImageLabel) DeepCopy() *ImageLabel {
- if in == nil {
- return nil
- }
- out := new(ImageLabel)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ImageList) DeepCopyInto(out *ImageList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]Image, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageList.
-func (in *ImageList) DeepCopy() *ImageList {
- if in == nil {
- return nil
- }
- out := new(ImageList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *ImageList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ImageSpec) DeepCopyInto(out *ImageSpec) {
- *out = *in
- if in.AllowedRegistriesForImport != nil {
- in, out := &in.AllowedRegistriesForImport, &out.AllowedRegistriesForImport
- *out = make([]RegistryLocation, len(*in))
- copy(*out, *in)
- }
- if in.ExternalRegistryHostnames != nil {
- in, out := &in.ExternalRegistryHostnames, &out.ExternalRegistryHostnames
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- out.AdditionalTrustedCA = in.AdditionalTrustedCA
- in.RegistrySources.DeepCopyInto(&out.RegistrySources)
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageSpec.
-func (in *ImageSpec) DeepCopy() *ImageSpec {
- if in == nil {
- return nil
- }
- out := new(ImageSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ImageStatus) DeepCopyInto(out *ImageStatus) {
- *out = *in
- if in.ExternalRegistryHostnames != nil {
- in, out := &in.ExternalRegistryHostnames, &out.ExternalRegistryHostnames
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStatus.
-func (in *ImageStatus) DeepCopy() *ImageStatus {
- if in == nil {
- return nil
- }
- out := new(ImageStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Infrastructure) DeepCopyInto(out *Infrastructure) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- out.Spec = in.Spec
- in.Status.DeepCopyInto(&out.Status)
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Infrastructure.
-func (in *Infrastructure) DeepCopy() *Infrastructure {
- if in == nil {
- return nil
- }
- out := new(Infrastructure)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *Infrastructure) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *InfrastructureList) DeepCopyInto(out *InfrastructureList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]Infrastructure, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfrastructureList.
-func (in *InfrastructureList) DeepCopy() *InfrastructureList {
- if in == nil {
- return nil
- }
- out := new(InfrastructureList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *InfrastructureList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *InfrastructureSpec) DeepCopyInto(out *InfrastructureSpec) {
- *out = *in
- out.CloudConfig = in.CloudConfig
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfrastructureSpec.
-func (in *InfrastructureSpec) DeepCopy() *InfrastructureSpec {
- if in == nil {
- return nil
- }
- out := new(InfrastructureSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *InfrastructureStatus) DeepCopyInto(out *InfrastructureStatus) {
- *out = *in
- if in.PlatformStatus != nil {
- in, out := &in.PlatformStatus, &out.PlatformStatus
- *out = new(PlatformStatus)
- (*in).DeepCopyInto(*out)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfrastructureStatus.
-func (in *InfrastructureStatus) DeepCopy() *InfrastructureStatus {
- if in == nil {
- return nil
- }
- out := new(InfrastructureStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Ingress) DeepCopyInto(out *Ingress) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- out.Spec = in.Spec
- out.Status = in.Status
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Ingress.
-func (in *Ingress) DeepCopy() *Ingress {
- if in == nil {
- return nil
- }
- out := new(Ingress)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *Ingress) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *IngressList) DeepCopyInto(out *IngressList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]Ingress, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressList.
-func (in *IngressList) DeepCopy() *IngressList {
- if in == nil {
- return nil
- }
- out := new(IngressList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *IngressList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *IngressSpec) DeepCopyInto(out *IngressSpec) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressSpec.
-func (in *IngressSpec) DeepCopy() *IngressSpec {
- if in == nil {
- return nil
- }
- out := new(IngressSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *IngressStatus) DeepCopyInto(out *IngressStatus) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressStatus.
-func (in *IngressStatus) DeepCopy() *IngressStatus {
- if in == nil {
- return nil
- }
- out := new(IngressStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *IntermediateTLSProfile) DeepCopyInto(out *IntermediateTLSProfile) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntermediateTLSProfile.
-func (in *IntermediateTLSProfile) DeepCopy() *IntermediateTLSProfile {
- if in == nil {
- return nil
- }
- out := new(IntermediateTLSProfile)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *KeystoneIdentityProvider) DeepCopyInto(out *KeystoneIdentityProvider) {
- *out = *in
- out.OAuthRemoteConnectionInfo = in.OAuthRemoteConnectionInfo
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeystoneIdentityProvider.
-func (in *KeystoneIdentityProvider) DeepCopy() *KeystoneIdentityProvider {
- if in == nil {
- return nil
- }
- out := new(KeystoneIdentityProvider)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *KubeClientConfig) DeepCopyInto(out *KubeClientConfig) {
- *out = *in
- out.ConnectionOverrides = in.ConnectionOverrides
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeClientConfig.
-func (in *KubeClientConfig) DeepCopy() *KubeClientConfig {
- if in == nil {
- return nil
- }
- out := new(KubeClientConfig)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *LDAPAttributeMapping) DeepCopyInto(out *LDAPAttributeMapping) {
- *out = *in
- if in.ID != nil {
- in, out := &in.ID, &out.ID
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.PreferredUsername != nil {
- in, out := &in.PreferredUsername, &out.PreferredUsername
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.Name != nil {
- in, out := &in.Name, &out.Name
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.Email != nil {
- in, out := &in.Email, &out.Email
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LDAPAttributeMapping.
-func (in *LDAPAttributeMapping) DeepCopy() *LDAPAttributeMapping {
- if in == nil {
- return nil
- }
- out := new(LDAPAttributeMapping)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *LDAPIdentityProvider) DeepCopyInto(out *LDAPIdentityProvider) {
- *out = *in
- out.BindPassword = in.BindPassword
- out.CA = in.CA
- in.Attributes.DeepCopyInto(&out.Attributes)
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LDAPIdentityProvider.
-func (in *LDAPIdentityProvider) DeepCopy() *LDAPIdentityProvider {
- if in == nil {
- return nil
- }
- out := new(LDAPIdentityProvider)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *LeaderElection) DeepCopyInto(out *LeaderElection) {
- *out = *in
- out.LeaseDuration = in.LeaseDuration
- out.RenewDeadline = in.RenewDeadline
- out.RetryPeriod = in.RetryPeriod
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaderElection.
-func (in *LeaderElection) DeepCopy() *LeaderElection {
- if in == nil {
- return nil
- }
- out := new(LeaderElection)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ModernTLSProfile) DeepCopyInto(out *ModernTLSProfile) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModernTLSProfile.
-func (in *ModernTLSProfile) DeepCopy() *ModernTLSProfile {
- if in == nil {
- return nil
- }
- out := new(ModernTLSProfile)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *NamedCertificate) DeepCopyInto(out *NamedCertificate) {
- *out = *in
- if in.Names != nil {
- in, out := &in.Names, &out.Names
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- out.CertInfo = in.CertInfo
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedCertificate.
-func (in *NamedCertificate) DeepCopy() *NamedCertificate {
- if in == nil {
- return nil
- }
- out := new(NamedCertificate)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Network) DeepCopyInto(out *Network) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- in.Spec.DeepCopyInto(&out.Spec)
- in.Status.DeepCopyInto(&out.Status)
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Network.
-func (in *Network) DeepCopy() *Network {
- if in == nil {
- return nil
- }
- out := new(Network)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *Network) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *NetworkList) DeepCopyInto(out *NetworkList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]Network, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkList.
-func (in *NetworkList) DeepCopy() *NetworkList {
- if in == nil {
- return nil
- }
- out := new(NetworkList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *NetworkList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *NetworkSpec) DeepCopyInto(out *NetworkSpec) {
- *out = *in
- if in.ClusterNetwork != nil {
- in, out := &in.ClusterNetwork, &out.ClusterNetwork
- *out = make([]ClusterNetworkEntry, len(*in))
- copy(*out, *in)
- }
- if in.ServiceNetwork != nil {
- in, out := &in.ServiceNetwork, &out.ServiceNetwork
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.ExternalIP != nil {
- in, out := &in.ExternalIP, &out.ExternalIP
- *out = new(ExternalIPConfig)
- (*in).DeepCopyInto(*out)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSpec.
-func (in *NetworkSpec) DeepCopy() *NetworkSpec {
- if in == nil {
- return nil
- }
- out := new(NetworkSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *NetworkStatus) DeepCopyInto(out *NetworkStatus) {
- *out = *in
- if in.ClusterNetwork != nil {
- in, out := &in.ClusterNetwork, &out.ClusterNetwork
- *out = make([]ClusterNetworkEntry, len(*in))
- copy(*out, *in)
- }
- if in.ServiceNetwork != nil {
- in, out := &in.ServiceNetwork, &out.ServiceNetwork
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkStatus.
-func (in *NetworkStatus) DeepCopy() *NetworkStatus {
- if in == nil {
- return nil
- }
- out := new(NetworkStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *OAuth) DeepCopyInto(out *OAuth) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- in.Spec.DeepCopyInto(&out.Spec)
- out.Status = in.Status
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuth.
-func (in *OAuth) DeepCopy() *OAuth {
- if in == nil {
- return nil
- }
- out := new(OAuth)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *OAuth) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *OAuthList) DeepCopyInto(out *OAuthList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]OAuth, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthList.
-func (in *OAuthList) DeepCopy() *OAuthList {
- if in == nil {
- return nil
- }
- out := new(OAuthList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *OAuthList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *OAuthRemoteConnectionInfo) DeepCopyInto(out *OAuthRemoteConnectionInfo) {
- *out = *in
- out.CA = in.CA
- out.TLSClientCert = in.TLSClientCert
- out.TLSClientKey = in.TLSClientKey
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthRemoteConnectionInfo.
-func (in *OAuthRemoteConnectionInfo) DeepCopy() *OAuthRemoteConnectionInfo {
- if in == nil {
- return nil
- }
- out := new(OAuthRemoteConnectionInfo)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *OAuthSpec) DeepCopyInto(out *OAuthSpec) {
- *out = *in
- if in.IdentityProviders != nil {
- in, out := &in.IdentityProviders, &out.IdentityProviders
- *out = make([]IdentityProvider, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- out.TokenConfig = in.TokenConfig
- out.Templates = in.Templates
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthSpec.
-func (in *OAuthSpec) DeepCopy() *OAuthSpec {
- if in == nil {
- return nil
- }
- out := new(OAuthSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *OAuthStatus) DeepCopyInto(out *OAuthStatus) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthStatus.
-func (in *OAuthStatus) DeepCopy() *OAuthStatus {
- if in == nil {
- return nil
- }
- out := new(OAuthStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *OAuthTemplates) DeepCopyInto(out *OAuthTemplates) {
- *out = *in
- out.Login = in.Login
- out.ProviderSelection = in.ProviderSelection
- out.Error = in.Error
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthTemplates.
-func (in *OAuthTemplates) DeepCopy() *OAuthTemplates {
- if in == nil {
- return nil
- }
- out := new(OAuthTemplates)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ObjectReference) DeepCopyInto(out *ObjectReference) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectReference.
-func (in *ObjectReference) DeepCopy() *ObjectReference {
- if in == nil {
- return nil
- }
- out := new(ObjectReference)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *OldTLSProfile) DeepCopyInto(out *OldTLSProfile) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OldTLSProfile.
-func (in *OldTLSProfile) DeepCopy() *OldTLSProfile {
- if in == nil {
- return nil
- }
- out := new(OldTLSProfile)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *OpenIDClaims) DeepCopyInto(out *OpenIDClaims) {
- *out = *in
- if in.PreferredUsername != nil {
- in, out := &in.PreferredUsername, &out.PreferredUsername
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.Name != nil {
- in, out := &in.Name, &out.Name
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.Email != nil {
- in, out := &in.Email, &out.Email
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDClaims.
-func (in *OpenIDClaims) DeepCopy() *OpenIDClaims {
- if in == nil {
- return nil
- }
- out := new(OpenIDClaims)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *OpenIDIdentityProvider) DeepCopyInto(out *OpenIDIdentityProvider) {
- *out = *in
- out.ClientSecret = in.ClientSecret
- out.CA = in.CA
- if in.ExtraScopes != nil {
- in, out := &in.ExtraScopes, &out.ExtraScopes
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.ExtraAuthorizeParameters != nil {
- in, out := &in.ExtraAuthorizeParameters, &out.ExtraAuthorizeParameters
- *out = make(map[string]string, len(*in))
- for key, val := range *in {
- (*out)[key] = val
- }
- }
- in.Claims.DeepCopyInto(&out.Claims)
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDIdentityProvider.
-func (in *OpenIDIdentityProvider) DeepCopy() *OpenIDIdentityProvider {
- if in == nil {
- return nil
- }
- out := new(OpenIDIdentityProvider)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *OpenStackPlatformStatus) DeepCopyInto(out *OpenStackPlatformStatus) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackPlatformStatus.
-func (in *OpenStackPlatformStatus) DeepCopy() *OpenStackPlatformStatus {
- if in == nil {
- return nil
- }
- out := new(OpenStackPlatformStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *OperandVersion) DeepCopyInto(out *OperandVersion) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperandVersion.
-func (in *OperandVersion) DeepCopy() *OperandVersion {
- if in == nil {
- return nil
- }
- out := new(OperandVersion)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *OperatorHub) DeepCopyInto(out *OperatorHub) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- in.Spec.DeepCopyInto(&out.Spec)
- in.Status.DeepCopyInto(&out.Status)
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorHub.
-func (in *OperatorHub) DeepCopy() *OperatorHub {
- if in == nil {
- return nil
- }
- out := new(OperatorHub)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *OperatorHub) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *OperatorHubList) DeepCopyInto(out *OperatorHubList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]OperatorHub, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorHubList.
-func (in *OperatorHubList) DeepCopy() *OperatorHubList {
- if in == nil {
- return nil
- }
- out := new(OperatorHubList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *OperatorHubList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *OperatorHubSpec) DeepCopyInto(out *OperatorHubSpec) {
- *out = *in
- if in.Sources != nil {
- in, out := &in.Sources, &out.Sources
- *out = make([]HubSource, len(*in))
- copy(*out, *in)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorHubSpec.
-func (in *OperatorHubSpec) DeepCopy() *OperatorHubSpec {
- if in == nil {
- return nil
- }
- out := new(OperatorHubSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *OperatorHubStatus) DeepCopyInto(out *OperatorHubStatus) {
- *out = *in
- if in.Sources != nil {
- in, out := &in.Sources, &out.Sources
- *out = make([]HubSourceStatus, len(*in))
- copy(*out, *in)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorHubStatus.
-func (in *OperatorHubStatus) DeepCopy() *OperatorHubStatus {
- if in == nil {
- return nil
- }
- out := new(OperatorHubStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *OvirtPlatformStatus) DeepCopyInto(out *OvirtPlatformStatus) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OvirtPlatformStatus.
-func (in *OvirtPlatformStatus) DeepCopy() *OvirtPlatformStatus {
- if in == nil {
- return nil
- }
- out := new(OvirtPlatformStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *PlatformStatus) DeepCopyInto(out *PlatformStatus) {
- *out = *in
- if in.AWS != nil {
- in, out := &in.AWS, &out.AWS
- *out = new(AWSPlatformStatus)
- **out = **in
- }
- if in.Azure != nil {
- in, out := &in.Azure, &out.Azure
- *out = new(AzurePlatformStatus)
- **out = **in
- }
- if in.GCP != nil {
- in, out := &in.GCP, &out.GCP
- *out = new(GCPPlatformStatus)
- **out = **in
- }
- if in.BareMetal != nil {
- in, out := &in.BareMetal, &out.BareMetal
- *out = new(BareMetalPlatformStatus)
- **out = **in
- }
- if in.OpenStack != nil {
- in, out := &in.OpenStack, &out.OpenStack
- *out = new(OpenStackPlatformStatus)
- **out = **in
- }
- if in.Ovirt != nil {
- in, out := &in.Ovirt, &out.Ovirt
- *out = new(OvirtPlatformStatus)
- **out = **in
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlatformStatus.
-func (in *PlatformStatus) DeepCopy() *PlatformStatus {
- if in == nil {
- return nil
- }
- out := new(PlatformStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Project) DeepCopyInto(out *Project) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- out.Spec = in.Spec
- out.Status = in.Status
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Project.
-func (in *Project) DeepCopy() *Project {
- if in == nil {
- return nil
- }
- out := new(Project)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *Project) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ProjectList) DeepCopyInto(out *ProjectList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]Project, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectList.
-func (in *ProjectList) DeepCopy() *ProjectList {
- if in == nil {
- return nil
- }
- out := new(ProjectList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *ProjectList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ProjectSpec) DeepCopyInto(out *ProjectSpec) {
- *out = *in
- out.ProjectRequestTemplate = in.ProjectRequestTemplate
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectSpec.
-func (in *ProjectSpec) DeepCopy() *ProjectSpec {
- if in == nil {
- return nil
- }
- out := new(ProjectSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ProjectStatus) DeepCopyInto(out *ProjectStatus) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectStatus.
-func (in *ProjectStatus) DeepCopy() *ProjectStatus {
- if in == nil {
- return nil
- }
- out := new(ProjectStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Proxy) DeepCopyInto(out *Proxy) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- in.Spec.DeepCopyInto(&out.Spec)
- out.Status = in.Status
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Proxy.
-func (in *Proxy) DeepCopy() *Proxy {
- if in == nil {
- return nil
- }
- out := new(Proxy)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *Proxy) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ProxyList) DeepCopyInto(out *ProxyList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]Proxy, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyList.
-func (in *ProxyList) DeepCopy() *ProxyList {
- if in == nil {
- return nil
- }
- out := new(ProxyList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *ProxyList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ProxySpec) DeepCopyInto(out *ProxySpec) {
- *out = *in
- if in.ReadinessEndpoints != nil {
- in, out := &in.ReadinessEndpoints, &out.ReadinessEndpoints
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- out.TrustedCA = in.TrustedCA
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxySpec.
-func (in *ProxySpec) DeepCopy() *ProxySpec {
- if in == nil {
- return nil
- }
- out := new(ProxySpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ProxyStatus) DeepCopyInto(out *ProxyStatus) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyStatus.
-func (in *ProxyStatus) DeepCopy() *ProxyStatus {
- if in == nil {
- return nil
- }
- out := new(ProxyStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *RegistryLocation) DeepCopyInto(out *RegistryLocation) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryLocation.
-func (in *RegistryLocation) DeepCopy() *RegistryLocation {
- if in == nil {
- return nil
- }
- out := new(RegistryLocation)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *RegistrySources) DeepCopyInto(out *RegistrySources) {
- *out = *in
- if in.InsecureRegistries != nil {
- in, out := &in.InsecureRegistries, &out.InsecureRegistries
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.BlockedRegistries != nil {
- in, out := &in.BlockedRegistries, &out.BlockedRegistries
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.AllowedRegistries != nil {
- in, out := &in.AllowedRegistries, &out.AllowedRegistries
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistrySources.
-func (in *RegistrySources) DeepCopy() *RegistrySources {
- if in == nil {
- return nil
- }
- out := new(RegistrySources)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *RemoteConnectionInfo) DeepCopyInto(out *RemoteConnectionInfo) {
- *out = *in
- out.CertInfo = in.CertInfo
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteConnectionInfo.
-func (in *RemoteConnectionInfo) DeepCopy() *RemoteConnectionInfo {
- if in == nil {
- return nil
- }
- out := new(RemoteConnectionInfo)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *RequestHeaderIdentityProvider) DeepCopyInto(out *RequestHeaderIdentityProvider) {
- *out = *in
- out.ClientCA = in.ClientCA
- if in.ClientCommonNames != nil {
- in, out := &in.ClientCommonNames, &out.ClientCommonNames
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.Headers != nil {
- in, out := &in.Headers, &out.Headers
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.PreferredUsernameHeaders != nil {
- in, out := &in.PreferredUsernameHeaders, &out.PreferredUsernameHeaders
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.NameHeaders != nil {
- in, out := &in.NameHeaders, &out.NameHeaders
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.EmailHeaders != nil {
- in, out := &in.EmailHeaders, &out.EmailHeaders
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestHeaderIdentityProvider.
-func (in *RequestHeaderIdentityProvider) DeepCopy() *RequestHeaderIdentityProvider {
- if in == nil {
- return nil
- }
- out := new(RequestHeaderIdentityProvider)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Scheduler) DeepCopyInto(out *Scheduler) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- out.Spec = in.Spec
- out.Status = in.Status
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scheduler.
-func (in *Scheduler) DeepCopy() *Scheduler {
- if in == nil {
- return nil
- }
- out := new(Scheduler)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *Scheduler) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *SchedulerList) DeepCopyInto(out *SchedulerList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]Scheduler, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulerList.
-func (in *SchedulerList) DeepCopy() *SchedulerList {
- if in == nil {
- return nil
- }
- out := new(SchedulerList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *SchedulerList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *SchedulerSpec) DeepCopyInto(out *SchedulerSpec) {
- *out = *in
- out.Policy = in.Policy
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulerSpec.
-func (in *SchedulerSpec) DeepCopy() *SchedulerSpec {
- if in == nil {
- return nil
- }
- out := new(SchedulerSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *SchedulerStatus) DeepCopyInto(out *SchedulerStatus) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulerStatus.
-func (in *SchedulerStatus) DeepCopy() *SchedulerStatus {
- if in == nil {
- return nil
- }
- out := new(SchedulerStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *SecretNameReference) DeepCopyInto(out *SecretNameReference) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretNameReference.
-func (in *SecretNameReference) DeepCopy() *SecretNameReference {
- if in == nil {
- return nil
- }
- out := new(SecretNameReference)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ServingInfo) DeepCopyInto(out *ServingInfo) {
- *out = *in
- out.CertInfo = in.CertInfo
- if in.NamedCertificates != nil {
- in, out := &in.NamedCertificates, &out.NamedCertificates
- *out = make([]NamedCertificate, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- if in.CipherSuites != nil {
- in, out := &in.CipherSuites, &out.CipherSuites
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServingInfo.
-func (in *ServingInfo) DeepCopy() *ServingInfo {
- if in == nil {
- return nil
- }
- out := new(ServingInfo)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *StringSource) DeepCopyInto(out *StringSource) {
- *out = *in
- out.StringSourceSpec = in.StringSourceSpec
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StringSource.
-func (in *StringSource) DeepCopy() *StringSource {
- if in == nil {
- return nil
- }
- out := new(StringSource)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *StringSourceSpec) DeepCopyInto(out *StringSourceSpec) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StringSourceSpec.
-func (in *StringSourceSpec) DeepCopy() *StringSourceSpec {
- if in == nil {
- return nil
- }
- out := new(StringSourceSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *TLSProfileSpec) DeepCopyInto(out *TLSProfileSpec) {
- *out = *in
- if in.Ciphers != nil {
- in, out := &in.Ciphers, &out.Ciphers
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSProfileSpec.
-func (in *TLSProfileSpec) DeepCopy() *TLSProfileSpec {
- if in == nil {
- return nil
- }
- out := new(TLSProfileSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *TLSSecurityProfile) DeepCopyInto(out *TLSSecurityProfile) {
- *out = *in
- if in.Old != nil {
- in, out := &in.Old, &out.Old
- *out = new(OldTLSProfile)
- **out = **in
- }
- if in.Intermediate != nil {
- in, out := &in.Intermediate, &out.Intermediate
- *out = new(IntermediateTLSProfile)
- **out = **in
- }
- if in.Modern != nil {
- in, out := &in.Modern, &out.Modern
- *out = new(ModernTLSProfile)
- **out = **in
- }
- if in.Custom != nil {
- in, out := &in.Custom, &out.Custom
- *out = new(CustomTLSProfile)
- (*in).DeepCopyInto(*out)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSSecurityProfile.
-func (in *TLSSecurityProfile) DeepCopy() *TLSSecurityProfile {
- if in == nil {
- return nil
- }
- out := new(TLSSecurityProfile)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *TemplateReference) DeepCopyInto(out *TemplateReference) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateReference.
-func (in *TemplateReference) DeepCopy() *TemplateReference {
- if in == nil {
- return nil
- }
- out := new(TemplateReference)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *TokenConfig) DeepCopyInto(out *TokenConfig) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenConfig.
-func (in *TokenConfig) DeepCopy() *TokenConfig {
- if in == nil {
- return nil
- }
- out := new(TokenConfig)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Update) DeepCopyInto(out *Update) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Update.
-func (in *Update) DeepCopy() *Update {
- if in == nil {
- return nil
- }
- out := new(Update)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *UpdateHistory) DeepCopyInto(out *UpdateHistory) {
- *out = *in
- in.StartedTime.DeepCopyInto(&out.StartedTime)
- if in.CompletionTime != nil {
- in, out := &in.CompletionTime, &out.CompletionTime
- *out = (*in).DeepCopy()
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdateHistory.
-func (in *UpdateHistory) DeepCopy() *UpdateHistory {
- if in == nil {
- return nil
- }
- out := new(UpdateHistory)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *WebhookTokenAuthenticator) DeepCopyInto(out *WebhookTokenAuthenticator) {
- *out = *in
- out.KubeConfig = in.KubeConfig
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookTokenAuthenticator.
-func (in *WebhookTokenAuthenticator) DeepCopy() *WebhookTokenAuthenticator {
- if in == nil {
- return nil
- }
- out := new(WebhookTokenAuthenticator)
- in.DeepCopyInto(out)
- return out
-}
diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go
deleted file mode 100644
index 2d6b19d2d..000000000
--- a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go
+++ /dev/null
@@ -1,1292 +0,0 @@
-package v1
-
-// This file contains a collection of methods that can be used from go-restful to
-// generate Swagger API documentation for its models. Please read this PR for more
-// information on the implementation: https://github.com/emicklei/go-restful/pull/215
-//
-// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
-// they are on one line! For multiple line or blocks that you want to ignore use ---.
-// Any context after a --- is ignored.
-//
-// Those methods can be generated by using hack/update-swagger-docs.sh
-
-// AUTO-GENERATED FUNCTIONS START HERE
-var map_AdmissionConfig = map[string]string{
- "enabledPlugins": "enabledPlugins is a list of admission plugins that must be on in addition to the default list. Some admission plugins are disabled by default, but certain configurations require them. This is fairly uncommon and can result in performance penalties and unexpected behavior.",
- "disabledPlugins": "disabledPlugins is a list of admission plugins that must be off. Putting something in this list is almost always a mistake and likely to result in cluster instability.",
-}
-
-func (AdmissionConfig) SwaggerDoc() map[string]string {
- return map_AdmissionConfig
-}
-
-var map_AdmissionPluginConfig = map[string]string{
- "": "AdmissionPluginConfig holds the necessary configuration options for admission plugins",
- "location": "Location is the path to a configuration file that contains the plugin's configuration",
- "configuration": "Configuration is an embedded configuration object to be used as the plugin's configuration. If present, it will be used instead of the path to the configuration file.",
-}
-
-func (AdmissionPluginConfig) SwaggerDoc() map[string]string {
- return map_AdmissionPluginConfig
-}
-
-var map_AuditConfig = map[string]string{
- "": "AuditConfig holds configuration for the audit capabilities",
- "enabled": "If this flag is set, audit log will be printed in the logs. The logs contains, method, user and a requested URL.",
- "auditFilePath": "All requests coming to the apiserver will be logged to this file.",
- "maximumFileRetentionDays": "Maximum number of days to retain old log files based on the timestamp encoded in their filename.",
- "maximumRetainedFiles": "Maximum number of old log files to retain.",
- "maximumFileSizeMegabytes": "Maximum size in megabytes of the log file before it gets rotated. Defaults to 100MB.",
- "policyFile": "PolicyFile is a path to the file that defines the audit policy configuration.",
- "policyConfiguration": "PolicyConfiguration is an embedded policy configuration object to be used as the audit policy configuration. If present, it will be used instead of the path to the policy file.",
- "logFormat": "Format of saved audits (legacy or json).",
- "webHookKubeConfig": "Path to a .kubeconfig formatted file that defines the audit webhook configuration.",
- "webHookMode": "Strategy for sending audit events (block or batch).",
-}
-
-func (AuditConfig) SwaggerDoc() map[string]string {
- return map_AuditConfig
-}
-
-var map_CertInfo = map[string]string{
- "": "CertInfo relates a certificate with a private key",
- "certFile": "CertFile is a file containing a PEM-encoded certificate",
- "keyFile": "KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile",
-}
-
-func (CertInfo) SwaggerDoc() map[string]string {
- return map_CertInfo
-}
-
-var map_ClientConnectionOverrides = map[string]string{
- "acceptContentTypes": "acceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the default value of 'application/json'. This field will control all connections to the server used by a particular client.",
- "contentType": "contentType is the content type used when sending data to the server from this client.",
- "qps": "qps controls the number of queries per second allowed for this connection.",
- "burst": "burst allows extra queries to accumulate when a client is exceeding its rate.",
-}
-
-func (ClientConnectionOverrides) SwaggerDoc() map[string]string {
- return map_ClientConnectionOverrides
-}
-
-var map_ConfigMapFileReference = map[string]string{
- "": "ConfigMapFileReference references a config map in a specific namespace. The namespace must be specified at the point of use.",
- "key": "Key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references.",
-}
-
-func (ConfigMapFileReference) SwaggerDoc() map[string]string {
- return map_ConfigMapFileReference
-}
-
-var map_ConfigMapNameReference = map[string]string{
- "": "ConfigMapNameReference references a config map in a specific namespace. The namespace must be specified at the point of use.",
- "name": "name is the metadata.name of the referenced config map",
-}
-
-func (ConfigMapNameReference) SwaggerDoc() map[string]string {
- return map_ConfigMapNameReference
-}
-
-var map_DelegatedAuthentication = map[string]string{
- "": "DelegatedAuthentication allows authentication to be disabled.",
- "disabled": "disabled indicates that authentication should be disabled. By default it will use delegated authentication.",
-}
-
-func (DelegatedAuthentication) SwaggerDoc() map[string]string {
- return map_DelegatedAuthentication
-}
-
-var map_DelegatedAuthorization = map[string]string{
- "": "DelegatedAuthorization allows authorization to be disabled.",
- "disabled": "disabled indicates that authorization should be disabled. By default it will use delegated authorization.",
-}
-
-func (DelegatedAuthorization) SwaggerDoc() map[string]string {
- return map_DelegatedAuthorization
-}
-
-var map_EtcdConnectionInfo = map[string]string{
- "": "EtcdConnectionInfo holds information necessary for connecting to an etcd server",
- "urls": "URLs are the URLs for etcd",
- "ca": "CA is a file containing trusted roots for the etcd server certificates",
-}
-
-func (EtcdConnectionInfo) SwaggerDoc() map[string]string {
- return map_EtcdConnectionInfo
-}
-
-var map_EtcdStorageConfig = map[string]string{
- "storagePrefix": "StoragePrefix is the path within etcd that the OpenShift resources will be rooted under. This value, if changed, will mean existing objects in etcd will no longer be located.",
-}
-
-func (EtcdStorageConfig) SwaggerDoc() map[string]string {
- return map_EtcdStorageConfig
-}
-
-var map_GenericAPIServerConfig = map[string]string{
- "": "GenericAPIServerConfig is an inline-able struct for aggregated apiservers that need to store data in etcd",
- "servingInfo": "servingInfo describes how to start serving",
- "corsAllowedOrigins": "corsAllowedOrigins",
- "auditConfig": "auditConfig describes how to configure audit information",
- "storageConfig": "storageConfig contains information about how to use",
- "admission": "admissionConfig holds information about how to configure admission.",
-}
-
-func (GenericAPIServerConfig) SwaggerDoc() map[string]string {
- return map_GenericAPIServerConfig
-}
-
-var map_GenericControllerConfig = map[string]string{
- "": "GenericControllerConfig provides information to configure a controller",
- "servingInfo": "ServingInfo is the HTTP serving information for the controller's endpoints",
- "leaderElection": "leaderElection provides information to elect a leader. Only override this if you have a specific need",
- "authentication": "authentication allows configuration of authentication for the endpoints",
- "authorization": "authorization allows configuration of authentication for the endpoints",
-}
-
-func (GenericControllerConfig) SwaggerDoc() map[string]string {
- return map_GenericControllerConfig
-}
-
-var map_HTTPServingInfo = map[string]string{
- "": "HTTPServingInfo holds configuration for serving HTTP",
- "maxRequestsInFlight": "MaxRequestsInFlight is the number of concurrent requests allowed to the server. If zero, no limit.",
- "requestTimeoutSeconds": "RequestTimeoutSeconds is the number of seconds before requests are timed out. The default is 60 minutes, if -1 there is no limit on requests.",
-}
-
-func (HTTPServingInfo) SwaggerDoc() map[string]string {
- return map_HTTPServingInfo
-}
-
-var map_KubeClientConfig = map[string]string{
- "kubeConfig": "kubeConfig is a .kubeconfig filename for going to the owning kube-apiserver. Empty uses an in-cluster-config",
- "connectionOverrides": "connectionOverrides specifies client overrides for system components to loop back to this master.",
-}
-
-func (KubeClientConfig) SwaggerDoc() map[string]string {
- return map_KubeClientConfig
-}
-
-var map_LeaderElection = map[string]string{
- "": "LeaderElection provides information to elect a leader",
- "disable": "disable allows leader election to be suspended while allowing a fully defaulted \"normal\" startup case.",
- "namespace": "namespace indicates which namespace the resource is in",
- "name": "name indicates what name to use for the resource",
- "leaseDuration": "leaseDuration is the duration that non-leader candidates will wait after observing a leadership renewal until attempting to acquire leadership of a led but unrenewed leader slot. This is effectively the maximum duration that a leader can be stopped before it is replaced by another candidate. This is only applicable if leader election is enabled.",
- "renewDeadline": "renewDeadline is the interval between attempts by the acting master to renew a leadership slot before it stops leading. This must be less than or equal to the lease duration. This is only applicable if leader election is enabled.",
- "retryPeriod": "retryPeriod is the duration the clients should wait between attempting acquisition and renewal of a leadership. This is only applicable if leader election is enabled.",
-}
-
-func (LeaderElection) SwaggerDoc() map[string]string {
- return map_LeaderElection
-}
-
-var map_NamedCertificate = map[string]string{
- "": "NamedCertificate specifies a certificate/key, and the names it should be served for",
- "names": "Names is a list of DNS names this certificate should be used to secure A name can be a normal DNS name, or can contain leading wildcard segments.",
-}
-
-func (NamedCertificate) SwaggerDoc() map[string]string {
- return map_NamedCertificate
-}
-
-var map_RemoteConnectionInfo = map[string]string{
- "": "RemoteConnectionInfo holds information necessary for establishing a remote connection",
- "url": "URL is the remote URL to connect to",
- "ca": "CA is the CA for verifying TLS connections",
-}
-
-func (RemoteConnectionInfo) SwaggerDoc() map[string]string {
- return map_RemoteConnectionInfo
-}
-
-var map_SecretNameReference = map[string]string{
- "": "SecretNameReference references a secret in a specific namespace. The namespace must be specified at the point of use.",
- "name": "name is the metadata.name of the referenced secret",
-}
-
-func (SecretNameReference) SwaggerDoc() map[string]string {
- return map_SecretNameReference
-}
-
-var map_ServingInfo = map[string]string{
- "": "ServingInfo holds information about serving web pages",
- "bindAddress": "BindAddress is the ip:port to serve on",
- "bindNetwork": "BindNetwork is the type of network to bind to - defaults to \"tcp4\", accepts \"tcp\", \"tcp4\", and \"tcp6\"",
- "clientCA": "ClientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates",
- "namedCertificates": "NamedCertificates is a list of certificates to use to secure requests to specific hostnames",
- "minTLSVersion": "MinTLSVersion is the minimum TLS version supported. Values must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants",
- "cipherSuites": "CipherSuites contains an overridden list of ciphers for the server to support. Values must match cipher suite IDs from https://golang.org/pkg/crypto/tls/#pkg-constants",
-}
-
-func (ServingInfo) SwaggerDoc() map[string]string {
- return map_ServingInfo
-}
-
-var map_StringSource = map[string]string{
- "": "StringSource allows specifying a string inline, or externally via env var or file. When it contains only a string value, it marshals to a simple JSON string.",
-}
-
-func (StringSource) SwaggerDoc() map[string]string {
- return map_StringSource
-}
-
-var map_StringSourceSpec = map[string]string{
- "": "StringSourceSpec specifies a string value, or external location",
- "value": "Value specifies the cleartext value, or an encrypted value if keyFile is specified.",
- "env": "Env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified.",
- "file": "File references a file containing the cleartext value, or an encrypted value if a keyFile is specified.",
- "keyFile": "KeyFile references a file containing the key to use to decrypt the value.",
-}
-
-func (StringSourceSpec) SwaggerDoc() map[string]string {
- return map_StringSourceSpec
-}
-
-var map_APIServer = map[string]string{
- "": "APIServer holds configuration (like serving certificates, client CA and CORS domains) shared by all API servers in the system, among them especially kube-apiserver and openshift-apiserver. The canonical name of an instance is 'cluster'.",
-}
-
-func (APIServer) SwaggerDoc() map[string]string {
- return map_APIServer
-}
-
-var map_APIServerEncryption = map[string]string{
- "type": "type defines what encryption type should be used to encrypt resources at the datastore layer. When this field is unset (i.e. when it is set to the empty string), identity is implied. The behavior of unset can and will change over time. Even if encryption is enabled by default, the meaning of unset may change to a different encryption type based on changes in best practices.\n\nWhen encryption is enabled, all sensitive resources shipped with the platform are encrypted. This list of sensitive resources can and will change over time. The current authoritative list is:\n\n 1. secrets\n 2. configmaps\n 3. routes.route.openshift.io\n 4. oauthaccesstokens.oauth.openshift.io\n 5. oauthauthorizetokens.oauth.openshift.io",
-}
-
-func (APIServerEncryption) SwaggerDoc() map[string]string {
- return map_APIServerEncryption
-}
-
-var map_APIServerNamedServingCert = map[string]string{
- "": "APIServerNamedServingCert maps a server DNS name, as understood by a client, to a certificate.",
- "names": "names is a optional list of explicit DNS names (leading wildcards allowed) that should use this certificate to serve secure traffic. If no names are provided, the implicit names will be extracted from the certificates. Exact names trump over wildcard names. Explicit names defined here trump over extracted implicit names.",
- "servingCertificate": "servingCertificate references a kubernetes.io/tls type secret containing the TLS cert info for serving secure traffic. The secret must exist in the openshift-config namespace and contain the following required fields: - Secret.Data[\"tls.key\"] - TLS private key. - Secret.Data[\"tls.crt\"] - TLS certificate.",
-}
-
-func (APIServerNamedServingCert) SwaggerDoc() map[string]string {
- return map_APIServerNamedServingCert
-}
-
-var map_APIServerServingCerts = map[string]string{
- "namedCertificates": "namedCertificates references secrets containing the TLS cert info for serving secure traffic to specific hostnames. If no named certificates are provided, or no named certificates match the server name as understood by a client, the defaultServingCertificate will be used.",
-}
-
-func (APIServerServingCerts) SwaggerDoc() map[string]string {
- return map_APIServerServingCerts
-}
-
-var map_APIServerSpec = map[string]string{
- "servingCerts": "servingCert is the TLS cert info for serving secure traffic. If not specified, operator managed certificates will be used for serving secure traffic.",
- "clientCA": "clientCA references a ConfigMap containing a certificate bundle for the signers that will be recognized for incoming client certificates in addition to the operator managed signers. If this is empty, then only operator managed signers are valid. You usually only have to set this if you have your own PKI you wish to honor client certificates from. The ConfigMap must exist in the openshift-config namespace and contain the following required fields: - ConfigMap.Data[\"ca-bundle.crt\"] - CA bundle.",
- "additionalCORSAllowedOrigins": "additionalCORSAllowedOrigins lists additional, user-defined regular expressions describing hosts for which the API server allows access using the CORS headers. This may be needed to access the API and the integrated OAuth server from JavaScript applications. The values are regular expressions that correspond to the Golang regular expression language.",
- "encryption": "encryption allows the configuration of encryption of resources at the datastore layer.",
- "tlsSecurityProfile": "tlsSecurityProfile specifies settings for TLS connections for externally exposed servers.\n\nIf unset, a default (which may change between releases) is chosen. Note that only Old and Intermediate profiles are currently supported, and the maximum available MinTLSVersions is VersionTLS12.",
-}
-
-func (APIServerSpec) SwaggerDoc() map[string]string {
- return map_APIServerSpec
-}
-
-var map_Authentication = map[string]string{
- "": "Authentication specifies cluster-wide settings for authentication (like OAuth and webhook token authenticators). The canonical name of an instance is `cluster`.",
- "spec": "spec holds user settable values for configuration",
- "status": "status holds observed values from the cluster. They may not be overridden.",
-}
-
-func (Authentication) SwaggerDoc() map[string]string {
- return map_Authentication
-}
-
-var map_AuthenticationSpec = map[string]string{
- "type": "type identifies the cluster managed, user facing authentication mode in use. Specifically, it manages the component that responds to login attempts. The default is IntegratedOAuth.",
- "oauthMetadata": "oauthMetadata contains the discovery endpoint data for OAuth 2.0 Authorization Server Metadata for an external OAuth server. This discovery document can be viewed from its served location: oc get --raw '/.well-known/oauth-authorization-server' For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 If oauthMetadata.name is non-empty, this value has precedence over any metadata reference stored in status. The key \"oauthMetadata\" is used to locate the data. If specified and the config map or expected key is not found, no metadata is served. If the specified metadata is not valid, no metadata is served. The namespace for this config map is openshift-config.",
- "webhookTokenAuthenticators": "webhookTokenAuthenticators configures remote token reviewers. These remote authentication webhooks can be used to verify bearer tokens via the tokenreviews.authentication.k8s.io REST API. This is required to honor bearer tokens that are provisioned by an external authentication service. The namespace for these secrets is openshift-config.",
-}
-
-func (AuthenticationSpec) SwaggerDoc() map[string]string {
- return map_AuthenticationSpec
-}
-
-var map_AuthenticationStatus = map[string]string{
- "integratedOAuthMetadata": "integratedOAuthMetadata contains the discovery endpoint data for OAuth 2.0 Authorization Server Metadata for the in-cluster integrated OAuth server. This discovery document can be viewed from its served location: oc get --raw '/.well-known/oauth-authorization-server' For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 This contains the observed value based on cluster state. An explicitly set value in spec.oauthMetadata has precedence over this field. This field has no meaning if authentication spec.type is not set to IntegratedOAuth. The key \"oauthMetadata\" is used to locate the data. If the config map or expected key is not found, no metadata is served. If the specified metadata is not valid, no metadata is served. The namespace for this config map is openshift-config-managed.",
-}
-
-func (AuthenticationStatus) SwaggerDoc() map[string]string {
- return map_AuthenticationStatus
-}
-
-var map_WebhookTokenAuthenticator = map[string]string{
- "": "webhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator",
- "kubeConfig": "kubeConfig contains kube config file data which describes how to access the remote webhook service. For further details, see: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication The key \"kubeConfig\" is used to locate the data. If the secret or expected key is not found, the webhook is not honored. If the specified kube config data is not valid, the webhook is not honored. The namespace for this secret is determined by the point of use.",
-}
-
-func (WebhookTokenAuthenticator) SwaggerDoc() map[string]string {
- return map_WebhookTokenAuthenticator
-}
-
-var map_Build = map[string]string{
- "": "Build configures the behavior of OpenShift builds for the entire cluster. This includes default settings that can be overridden in BuildConfig objects, and overrides which are applied to all builds.\n\nThe canonical name is \"cluster\"",
- "spec": "Spec holds user-settable values for the build controller configuration",
-}
-
-func (Build) SwaggerDoc() map[string]string {
- return map_Build
-}
-
-var map_BuildDefaults = map[string]string{
- "defaultProxy": "DefaultProxy contains the default proxy settings for all build operations, including image pull/push and source download.\n\nValues can be overrode by setting the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables in the build config's strategy.",
- "gitProxy": "GitProxy contains the proxy settings for git operations only. If set, this will override any Proxy settings for all git commands, such as git clone.\n\nValues that are not set here will be inherited from DefaultProxy.",
- "env": "Env is a set of default environment variables that will be applied to the build if the specified variables do not exist on the build",
- "imageLabels": "ImageLabels is a list of docker labels that are applied to the resulting image. User can override a default label by providing a label with the same name in their Build/BuildConfig.",
- "resources": "Resources defines resource requirements to execute the build.",
-}
-
-func (BuildDefaults) SwaggerDoc() map[string]string {
- return map_BuildDefaults
-}
-
-var map_BuildOverrides = map[string]string{
- "imageLabels": "ImageLabels is a list of docker labels that are applied to the resulting image. If user provided a label in their Build/BuildConfig with the same name as one in this list, the user's label will be overwritten.",
- "nodeSelector": "NodeSelector is a selector which must be true for the build pod to fit on a node",
- "tolerations": "Tolerations is a list of Tolerations that will override any existing tolerations set on a build pod.",
-}
-
-func (BuildOverrides) SwaggerDoc() map[string]string {
- return map_BuildOverrides
-}
-
-var map_BuildSpec = map[string]string{
- "additionalTrustedCA": "AdditionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted for image pushes and pulls during builds. The namespace for this config map is openshift-config.\n\nDEPRECATED: Additional CAs for image pull and push should be set on image.config.openshift.io/cluster instead.",
- "buildDefaults": "BuildDefaults controls the default information for Builds",
- "buildOverrides": "BuildOverrides controls override settings for builds",
-}
-
-func (BuildSpec) SwaggerDoc() map[string]string {
- return map_BuildSpec
-}
-
-var map_ImageLabel = map[string]string{
- "name": "Name defines the name of the label. It must have non-zero length.",
- "value": "Value defines the literal value of the label.",
-}
-
-func (ImageLabel) SwaggerDoc() map[string]string {
- return map_ImageLabel
-}
-
-var map_ClusterOperator = map[string]string{
- "": "ClusterOperator is the Custom Resource object which holds the current state of an operator. This object is used by operators to convey their state to the rest of the cluster.",
- "spec": "spec holds configuration that could apply to any operator.",
- "status": "status holds the information about the state of an operator. It is consistent with status information across the Kubernetes ecosystem.",
-}
-
-func (ClusterOperator) SwaggerDoc() map[string]string {
- return map_ClusterOperator
-}
-
-var map_ClusterOperatorList = map[string]string{
- "": "ClusterOperatorList is a list of OperatorStatus resources.",
-}
-
-func (ClusterOperatorList) SwaggerDoc() map[string]string {
- return map_ClusterOperatorList
-}
-
-var map_ClusterOperatorSpec = map[string]string{
- "": "ClusterOperatorSpec is empty for now, but you could imagine holding information like \"pause\".",
-}
-
-func (ClusterOperatorSpec) SwaggerDoc() map[string]string {
- return map_ClusterOperatorSpec
-}
-
-var map_ClusterOperatorStatus = map[string]string{
- "": "ClusterOperatorStatus provides information about the status of the operator.",
- "conditions": "conditions describes the state of the operator's managed and monitored components.",
- "versions": "versions is a slice of operator and operand version tuples. Operators which manage multiple operands will have multiple operand entries in the array. Available operators must report the version of the operator itself with the name \"operator\". An operator reports a new \"operator\" version when it has rolled out the new version to all of its operands.",
- "relatedObjects": "relatedObjects is a list of objects that are \"interesting\" or related to this operator. Common uses are: 1. the detailed resource driving the operator 2. operator namespaces 3. operand namespaces",
- "extension": "extension contains any additional status information specific to the operator which owns this status object.",
-}
-
-func (ClusterOperatorStatus) SwaggerDoc() map[string]string {
- return map_ClusterOperatorStatus
-}
-
-var map_ClusterOperatorStatusCondition = map[string]string{
- "": "ClusterOperatorStatusCondition represents the state of the operator's managed and monitored components.",
- "type": "type specifies the aspect reported by this condition.",
- "status": "status of the condition, one of True, False, Unknown.",
- "lastTransitionTime": "lastTransitionTime is the time of the last update to the current status property.",
- "reason": "reason is the CamelCase reason for the condition's current status.",
- "message": "message provides additional information about the current condition. This is only to be consumed by humans.",
-}
-
-func (ClusterOperatorStatusCondition) SwaggerDoc() map[string]string {
- return map_ClusterOperatorStatusCondition
-}
-
-var map_ObjectReference = map[string]string{
- "": "ObjectReference contains enough information to let you inspect or modify the referred object.",
- "group": "group of the referent.",
- "resource": "resource of the referent.",
- "namespace": "namespace of the referent.",
- "name": "name of the referent.",
-}
-
-func (ObjectReference) SwaggerDoc() map[string]string {
- return map_ObjectReference
-}
-
-var map_OperandVersion = map[string]string{
- "name": "name is the name of the particular operand this version is for. It usually matches container images, not operators.",
- "version": "version indicates which version of a particular operand is currently being managed. It must always match the Available operand. If 1.0.0 is Available, then this must indicate 1.0.0 even if the operator is trying to rollout 1.1.0",
-}
-
-func (OperandVersion) SwaggerDoc() map[string]string {
- return map_OperandVersion
-}
-
-var map_ClusterVersion = map[string]string{
- "": "ClusterVersion is the configuration for the ClusterVersionOperator. This is where parameters related to automatic updates can be set.",
- "spec": "spec is the desired state of the cluster version - the operator will work to ensure that the desired version is applied to the cluster.",
- "status": "status contains information about the available updates and any in-progress updates.",
-}
-
-func (ClusterVersion) SwaggerDoc() map[string]string {
- return map_ClusterVersion
-}
-
-var map_ClusterVersionList = map[string]string{
- "": "ClusterVersionList is a list of ClusterVersion resources.",
-}
-
-func (ClusterVersionList) SwaggerDoc() map[string]string {
- return map_ClusterVersionList
-}
-
-var map_ClusterVersionSpec = map[string]string{
- "": "ClusterVersionSpec is the desired version state of the cluster. It includes the version the cluster should be at, how the cluster is identified, and where the cluster should look for version updates.",
- "clusterID": "clusterID uniquely identifies this cluster. This is expected to be an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx in hexadecimal values). This is a required field.",
- "desiredUpdate": "desiredUpdate is an optional field that indicates the desired value of the cluster version. Setting this value will trigger an upgrade (if the current version does not match the desired version). The set of recommended update values is listed as part of available updates in status, and setting values outside that range may cause the upgrade to fail. You may specify the version field without setting image if an update exists with that version in the availableUpdates or history.\n\nIf an upgrade fails the operator will halt and report status about the failing component. Setting the desired update value back to the previous version will cause a rollback to be attempted. Not all rollbacks will succeed.",
- "upstream": "upstream may be used to specify the preferred update server. By default it will use the appropriate update server for the cluster and region.",
- "channel": "channel is an identifier for explicitly requesting that a non-default set of updates be applied to this cluster. The default channel will be contain stable updates that are appropriate for production clusters.",
- "overrides": "overrides is list of overides for components that are managed by cluster version operator. Marking a component unmanaged will prevent the operator from creating or updating the object.",
-}
-
-func (ClusterVersionSpec) SwaggerDoc() map[string]string {
- return map_ClusterVersionSpec
-}
-
-var map_ClusterVersionStatus = map[string]string{
- "": "ClusterVersionStatus reports the status of the cluster versioning, including any upgrades that are in progress. The current field will be set to whichever version the cluster is reconciling to, and the conditions array will report whether the update succeeded, is in progress, or is failing.",
- "desired": "desired is the version that the cluster is reconciling towards. If the cluster is not yet fully initialized desired will be set with the information available, which may be an image or a tag.",
- "history": "history contains a list of the most recent versions applied to the cluster. This value may be empty during cluster startup, and then will be updated when a new update is being applied. The newest update is first in the list and it is ordered by recency. Updates in the history have state Completed if the rollout completed - if an update was failing or halfway applied the state will be Partial. Only a limited amount of update history is preserved.",
- "observedGeneration": "observedGeneration reports which version of the spec is being synced. If this value is not equal to metadata.generation, then the desired and conditions fields may represent a previous version.",
- "versionHash": "versionHash is a fingerprint of the content that the cluster will be updated with. It is used by the operator to avoid unnecessary work and is for internal use only.",
- "conditions": "conditions provides information about the cluster version. The condition \"Available\" is set to true if the desiredUpdate has been reached. The condition \"Progressing\" is set to true if an update is being applied. The condition \"Degraded\" is set to true if an update is currently blocked by a temporary or permanent error. Conditions are only valid for the current desiredUpdate when metadata.generation is equal to status.generation.",
- "availableUpdates": "availableUpdates contains the list of updates that are appropriate for this cluster. This list may be empty if no updates are recommended, if the update service is unavailable, or if an invalid channel has been specified.",
-}
-
-func (ClusterVersionStatus) SwaggerDoc() map[string]string {
- return map_ClusterVersionStatus
-}
-
-var map_ComponentOverride = map[string]string{
- "": "ComponentOverride allows overriding cluster version operator's behavior for a component.",
- "kind": "kind indentifies which object to override.",
- "group": "group identifies the API group that the kind is in.",
- "namespace": "namespace is the component's namespace. If the resource is cluster scoped, the namespace should be empty.",
- "name": "name is the component's name.",
- "unmanaged": "unmanaged controls if cluster version operator should stop managing the resources in this cluster. Default: false",
-}
-
-func (ComponentOverride) SwaggerDoc() map[string]string {
- return map_ComponentOverride
-}
-
-var map_Update = map[string]string{
- "": "Update represents a release of the ClusterVersionOperator, referenced by the Image member.",
- "version": "version is a semantic versioning identifying the update version. When this field is part of spec, version is optional if image is specified.",
- "image": "image is a container image location that contains the update. When this field is part of spec, image is optional if version is specified and the availableUpdates field contains a matching version.",
- "force": "force allows an administrator to update to an image that has failed verification, does not appear in the availableUpdates list, or otherwise would be blocked by normal protections on update. This option should only be used when the authenticity of the provided image has been verified out of band because the provided image will run with full administrative access to the cluster. Do not use this flag with images that comes from unknown or potentially malicious sources.\n\nThis flag does not override other forms of consistency checking that are required before a new update is deployed.",
-}
-
-func (Update) SwaggerDoc() map[string]string {
- return map_Update
-}
-
-var map_UpdateHistory = map[string]string{
- "": "UpdateHistory is a single attempted update to the cluster.",
- "state": "state reflects whether the update was fully applied. The Partial state indicates the update is not fully applied, while the Completed state indicates the update was successfully rolled out at least once (all parts of the update successfully applied).",
- "startedTime": "startedTime is the time at which the update was started.",
- "completionTime": "completionTime, if set, is when the update was fully applied. The update that is currently being applied will have a null completion time. Completion time will always be set for entries that are not the current update (usually to the started time of the next update).",
- "version": "version is a semantic versioning identifying the update version. If the requested image does not define a version, or if a failure occurs retrieving the image, this value may be empty.",
- "image": "image is a container image location that contains the update. This value is always populated.",
- "verified": "verified indicates whether the provided update was properly verified before it was installed. If this is false the cluster may not be trusted.",
-}
-
-func (UpdateHistory) SwaggerDoc() map[string]string {
- return map_UpdateHistory
-}
-
-var map_Console = map[string]string{
- "": "Console holds cluster-wide configuration for the web console, including the logout URL, and reports the public URL of the console. The canonical name is `cluster`.",
- "spec": "spec holds user settable values for configuration",
- "status": "status holds observed values from the cluster. They may not be overridden.",
-}
-
-func (Console) SwaggerDoc() map[string]string {
- return map_Console
-}
-
-var map_ConsoleAuthentication = map[string]string{
- "": "ConsoleAuthentication defines a list of optional configuration for console authentication.",
- "logoutRedirect": "An optional, absolute URL to redirect web browsers to after logging out of the console. If not specified, it will redirect to the default login page. This is required when using an identity provider that supports single sign-on (SSO) such as: - OpenID (Keycloak, Azure) - RequestHeader (GSSAPI, SSPI, SAML) - OAuth (GitHub, GitLab, Google) Logging out of the console will destroy the user's token. The logoutRedirect provides the user the option to perform single logout (SLO) through the identity provider to destroy their single sign-on session.",
-}
-
-func (ConsoleAuthentication) SwaggerDoc() map[string]string {
- return map_ConsoleAuthentication
-}
-
-var map_ConsoleSpec = map[string]string{
- "": "ConsoleSpec is the specification of the desired behavior of the Console.",
-}
-
-func (ConsoleSpec) SwaggerDoc() map[string]string {
- return map_ConsoleSpec
-}
-
-var map_ConsoleStatus = map[string]string{
- "": "ConsoleStatus defines the observed status of the Console.",
- "consoleURL": "The URL for the console. This will be derived from the host for the route that is created for the console.",
-}
-
-func (ConsoleStatus) SwaggerDoc() map[string]string {
- return map_ConsoleStatus
-}
-
-var map_DNS = map[string]string{
- "": "DNS holds cluster-wide information about DNS. The canonical name is `cluster`",
- "spec": "spec holds user settable values for configuration",
- "status": "status holds observed values from the cluster. They may not be overridden.",
-}
-
-func (DNS) SwaggerDoc() map[string]string {
- return map_DNS
-}
-
-var map_DNSSpec = map[string]string{
- "baseDomain": "baseDomain is the base domain of the cluster. All managed DNS records will be sub-domains of this base.\n\nFor example, given the base domain `openshift.example.com`, an API server DNS record may be created for `cluster-api.openshift.example.com`.\n\nOnce set, this field cannot be changed.",
- "publicZone": "publicZone is the location where all the DNS records that are publicly accessible to the internet exist.\n\nIf this field is nil, no public records should be created.\n\nOnce set, this field cannot be changed.",
- "privateZone": "privateZone is the location where all the DNS records that are only available internally to the cluster exist.\n\nIf this field is nil, no private records should be created.\n\nOnce set, this field cannot be changed.",
-}
-
-func (DNSSpec) SwaggerDoc() map[string]string {
- return map_DNSSpec
-}
-
-var map_DNSZone = map[string]string{
- "": "DNSZone is used to define a DNS hosted zone. A zone can be identified by an ID or tags.",
- "id": "id is the identifier that can be used to find the DNS hosted zone.\n\non AWS zone can be fetched using `ID` as id in [1] on Azure zone can be fetched using `ID` as a pre-determined name in [2], on GCP zone can be fetched using `ID` as a pre-determined name in [3].\n\n[1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get",
- "tags": "tags can be used to query the DNS hosted zone.\n\non AWS, resourcegroupstaggingapi [1] can be used to fetch a zone using `Tags` as tag-filters,\n\n[1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options",
-}
-
-func (DNSZone) SwaggerDoc() map[string]string {
- return map_DNSZone
-}
-
-var map_CustomFeatureGates = map[string]string{
- "enabled": "enabled is a list of all feature gates that you want to force on",
- "disabled": "disabled is a list of all feature gates that you want to force off",
-}
-
-func (CustomFeatureGates) SwaggerDoc() map[string]string {
- return map_CustomFeatureGates
-}
-
-var map_FeatureGate = map[string]string{
- "": "Feature holds cluster-wide information about feature gates. The canonical name is `cluster`",
- "spec": "spec holds user settable values for configuration",
- "status": "status holds observed values from the cluster. They may not be overridden.",
-}
-
-func (FeatureGate) SwaggerDoc() map[string]string {
- return map_FeatureGate
-}
-
-var map_FeatureGateSelection = map[string]string{
- "featureSet": "featureSet changes the list of features in the cluster. The default is empty. Be very careful adjusting this setting. Turning on or off features may cause irreversible changes in your cluster which cannot be undone.",
- "customNoUpgrade": "customNoUpgrade allows the enabling or disabling of any feature. Turning this feature set on IS NOT SUPPORTED, CANNOT BE UNDONE, and PREVENTS UPGRADES. Because of its nature, this setting cannot be validated. If you have any typos or accidentally apply invalid combinations your cluster may fail in an unrecoverable way. featureSet must equal \"CustomNoUpgrade\" must be set to use this field.",
-}
-
-func (FeatureGateSelection) SwaggerDoc() map[string]string {
- return map_FeatureGateSelection
-}
-
-var map_Image = map[string]string{
- "": "Image governs policies related to imagestream imports and runtime configuration for external registries. It allows cluster admins to configure which registries OpenShift is allowed to import images from, extra CA trust bundles for external registries, and policies to blacklist/whitelist registry hostnames. When exposing OpenShift's image registry to the public, this also lets cluster admins specify the external hostname.",
- "spec": "spec holds user settable values for configuration",
- "status": "status holds observed values from the cluster. They may not be overridden.",
-}
-
-func (Image) SwaggerDoc() map[string]string {
- return map_Image
-}
-
-var map_ImageSpec = map[string]string{
- "allowedRegistriesForImport": "allowedRegistriesForImport limits the container image registries that normal users may import images from. Set this list to the registries that you trust to contain valid Docker images and that you want applications to be able to import from. Users with permission to create Images or ImageStreamMappings via the API are not affected by this policy - typically only administrators or system integrations will have those permissions.",
- "externalRegistryHostnames": "externalRegistryHostnames provides the hostnames for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The first value is used in 'publicDockerImageRepository' field in ImageStreams. The value must be in \"hostname[:port]\" format.",
- "additionalTrustedCA": "additionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted during imagestream import, pod image pull, build image pull, and imageregistry pullthrough. The namespace for this config map is openshift-config.",
- "registrySources": "registrySources contains configuration that determines how the container runtime should treat individual registries when accessing images for builds+pods. (e.g. whether or not to allow insecure access). It does not contain configuration for the internal cluster registry.",
-}
-
-func (ImageSpec) SwaggerDoc() map[string]string {
- return map_ImageSpec
-}
-
-var map_ImageStatus = map[string]string{
- "internalRegistryHostname": "internalRegistryHostname sets the hostname for the default internal image registry. The value must be in \"hostname[:port]\" format. This value is set by the image registry operator which controls the internal registry hostname. For backward compatibility, users can still use OPENSHIFT_DEFAULT_REGISTRY environment variable but this setting overrides the environment variable.",
- "externalRegistryHostnames": "externalRegistryHostnames provides the hostnames for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The first value is used in 'publicDockerImageRepository' field in ImageStreams. The value must be in \"hostname[:port]\" format.",
-}
-
-func (ImageStatus) SwaggerDoc() map[string]string {
- return map_ImageStatus
-}
-
-var map_RegistryLocation = map[string]string{
- "": "RegistryLocation contains a location of the registry specified by the registry domain name. The domain name might include wildcards, like '*' or '??'.",
- "domainName": "domainName specifies a domain name for the registry In case the registry use non-standard (80 or 443) port, the port should be included in the domain name as well.",
- "insecure": "insecure indicates whether the registry is secure (https) or insecure (http) By default (if not specified) the registry is assumed as secure.",
-}
-
-func (RegistryLocation) SwaggerDoc() map[string]string {
- return map_RegistryLocation
-}
-
-var map_RegistrySources = map[string]string{
- "": "RegistrySources holds cluster-wide information about how to handle the registries config.",
- "insecureRegistries": "insecureRegistries are registries which do not have a valid TLS certificates or only support HTTP connections.",
- "blockedRegistries": "blockedRegistries are blacklisted from image pull/push. All other registries are allowed.\n\nOnly one of BlockedRegistries or AllowedRegistries may be set.",
- "allowedRegistries": "allowedRegistries are whitelisted for image pull/push. All other registries are blocked.\n\nOnly one of BlockedRegistries or AllowedRegistries may be set.",
-}
-
-func (RegistrySources) SwaggerDoc() map[string]string {
- return map_RegistrySources
-}
-
-var map_AWSPlatformStatus = map[string]string{
- "": "AWSPlatformStatus holds the current status of the Amazon Web Services infrastructure provider.",
- "region": "region holds the default AWS region for new AWS resources created by the cluster.",
-}
-
-func (AWSPlatformStatus) SwaggerDoc() map[string]string {
- return map_AWSPlatformStatus
-}
-
-var map_AzurePlatformStatus = map[string]string{
- "": "AzurePlatformStatus holds the current status of the Azure infrastructure provider.",
- "resourceGroupName": "resourceGroupName is the Resource Group for new Azure resources created for the cluster.",
- "networkResourceGroupName": "networkResourceGroupName is the Resource Group for network resources like the Virtual Network and Subnets used by the cluster. If empty, the value is same as ResourceGroupName.",
-}
-
-func (AzurePlatformStatus) SwaggerDoc() map[string]string {
- return map_AzurePlatformStatus
-}
-
-var map_BareMetalPlatformStatus = map[string]string{
- "": "BareMetalPlatformStatus holds the current status of the BareMetal infrastructure provider. For more information about the network architecture used with the BareMetal platform type, see: https://github.com/openshift/installer/blob/master/docs/design/baremetal/networking-infrastructure.md",
- "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.",
- "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.",
- "nodeDNSIP": "nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for BareMetal deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster.",
-}
-
-func (BareMetalPlatformStatus) SwaggerDoc() map[string]string {
- return map_BareMetalPlatformStatus
-}
-
-var map_GCPPlatformStatus = map[string]string{
- "": "GCPPlatformStatus holds the current status of the Google Cloud Platform infrastructure provider.",
- "projectID": "resourceGroupName is the Project ID for new GCP resources created for the cluster.",
- "region": "region holds the region for new GCP resources created for the cluster.",
-}
-
-func (GCPPlatformStatus) SwaggerDoc() map[string]string {
- return map_GCPPlatformStatus
-}
-
-var map_Infrastructure = map[string]string{
- "": "Infrastructure holds cluster-wide information about Infrastructure. The canonical name is `cluster`",
- "spec": "spec holds user settable values for configuration",
- "status": "status holds observed values from the cluster. They may not be overridden.",
-}
-
-func (Infrastructure) SwaggerDoc() map[string]string {
- return map_Infrastructure
-}
-
-var map_InfrastructureList = map[string]string{
- "": "InfrastructureList is",
-}
-
-func (InfrastructureList) SwaggerDoc() map[string]string {
- return map_InfrastructureList
-}
-
-var map_InfrastructureSpec = map[string]string{
- "": "InfrastructureSpec contains settings that apply to the cluster infrastructure.",
- "cloudConfig": "cloudConfig is a reference to a ConfigMap containing the cloud provider configuration file. This configuration file is used to configure the Kubernetes cloud provider integration when using the built-in cloud provider integration or the external cloud controller manager. The namespace for this config map is openshift-config.",
-}
-
-func (InfrastructureSpec) SwaggerDoc() map[string]string {
- return map_InfrastructureSpec
-}
-
-var map_InfrastructureStatus = map[string]string{
- "": "InfrastructureStatus describes the infrastructure the cluster is leveraging.",
- "infrastructureName": "infrastructureName uniquely identifies a cluster with a human friendly name. Once set it should not be changed. Must be of max length 27 and must have only alphanumeric or hyphen characters.",
- "platform": "platform is the underlying infrastructure provider for the cluster.\n\nDeprecated: Use platformStatus.type instead.",
- "platformStatus": "platformStatus holds status information specific to the underlying infrastructure provider.",
- "etcdDiscoveryDomain": "etcdDiscoveryDomain is the domain used to fetch the SRV records for discovering etcd servers and clients. For more info: https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery",
- "apiServerURL": "apiServerURL is a valid URI with scheme(http/https), address and port. apiServerURL can be used by components like the web console to tell users where to find the Kubernetes API.",
- "apiServerInternalURI": "apiServerInternalURL is a valid URI with scheme(http/https), address and port. apiServerInternalURL can be used by components like kubelets, to contact the Kubernetes API server using the infrastructure provider rather than Kubernetes networking.",
-}
-
-func (InfrastructureStatus) SwaggerDoc() map[string]string {
- return map_InfrastructureStatus
-}
-
-var map_OpenStackPlatformStatus = map[string]string{
- "": "OpenStackPlatformStatus holds the current status of the OpenStack infrastructure provider.",
- "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.",
- "cloudName": "cloudName is the name of the desired OpenStack cloud in the client configuration file (`clouds.yaml`).",
- "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.",
- "nodeDNSIP": "nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for OpenStack deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster.",
-}
-
-func (OpenStackPlatformStatus) SwaggerDoc() map[string]string {
- return map_OpenStackPlatformStatus
-}
-
-var map_OvirtPlatformStatus = map[string]string{
- "": "OvirtPlatformStatus holds the current status of the oVirt infrastructure provider.",
- "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.",
- "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.",
- "nodeDNSIP": "nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for oVirt deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster.",
-}
-
-func (OvirtPlatformStatus) SwaggerDoc() map[string]string {
- return map_OvirtPlatformStatus
-}
-
-var map_PlatformStatus = map[string]string{
- "": "PlatformStatus holds the current status specific to the underlying infrastructure provider of the current cluster. Since these are used at status-level for the underlying cluster, it is supposed that only one of the status structs is set.",
- "type": "type is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", \"OpenStack\", \"VSphere\", \"oVirt\", and \"None\". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform.",
- "aws": "AWS contains settings specific to the Amazon Web Services infrastructure provider.",
- "azure": "Azure contains settings specific to the Azure infrastructure provider.",
- "gcp": "GCP contains settings specific to the Google Cloud Platform infrastructure provider.",
- "baremetal": "BareMetal contains settings specific to the BareMetal platform.",
- "openstack": "OpenStack contains settings specific to the OpenStack infrastructure provider.",
- "ovirt": "Ovirt contains settings specific to the oVirt infrastructure provider.",
-}
-
-func (PlatformStatus) SwaggerDoc() map[string]string {
- return map_PlatformStatus
-}
-
-var map_Ingress = map[string]string{
- "": "Ingress holds cluster-wide information about ingress, including the default ingress domain used for routes. The canonical name is `cluster`.",
- "spec": "spec holds user settable values for configuration",
- "status": "status holds observed values from the cluster. They may not be overridden.",
-}
-
-func (Ingress) SwaggerDoc() map[string]string {
- return map_Ingress
-}
-
-var map_IngressSpec = map[string]string{
- "domain": "domain is used to generate a default host name for a route when the route's host name is empty. The generated host name will follow this pattern: \"<route-name>.<route-namespace>.<domain>\".\n\nIt is also used as the default wildcard domain suffix for ingress. The default ingresscontroller domain will follow this pattern: \"*.<domain>\".\n\nOnce set, changing domain is not currently supported.",
-}
-
-func (IngressSpec) SwaggerDoc() map[string]string {
- return map_IngressSpec
-}
-
-var map_ClusterNetworkEntry = map[string]string{
- "": "ClusterNetworkEntry is a contiguous block of IP addresses from which pod IPs are allocated.",
- "cidr": "The complete block for pod IPs.",
- "hostPrefix": "The size (prefix) of block to allocate to each node.",
-}
-
-func (ClusterNetworkEntry) SwaggerDoc() map[string]string {
- return map_ClusterNetworkEntry
-}
-
-var map_ExternalIPConfig = map[string]string{
- "": "ExternalIPConfig specifies some IP blocks relevant for the ExternalIP field of a Service resource.",
- "policy": "policy is a set of restrictions applied to the ExternalIP field. If nil or empty, then ExternalIP is not allowed to be set.",
- "autoAssignCIDRs": "autoAssignCIDRs is a list of CIDRs from which to automatically assign Service.ExternalIP. These are assigned when the service is of type LoadBalancer. In general, this is only useful for bare-metal clusters. In Openshift 3.x, this was misleadingly called \"IngressIPs\". Automatically assigned External IPs are not affected by any ExternalIPPolicy rules. Currently, only one entry may be provided.",
-}
-
-func (ExternalIPConfig) SwaggerDoc() map[string]string {
- return map_ExternalIPConfig
-}
-
-var map_ExternalIPPolicy = map[string]string{
- "": "ExternalIPPolicy configures exactly which IPs are allowed for the ExternalIP field in a Service. If the zero struct is supplied, then none are permitted. The policy controller always allows automatically assigned external IPs.",
- "allowedCIDRs": "allowedCIDRs is the list of allowed CIDRs.",
- "rejectedCIDRs": "rejectedCIDRs is the list of disallowed CIDRs. These take precedence over allowedCIDRs.",
-}
-
-func (ExternalIPPolicy) SwaggerDoc() map[string]string {
- return map_ExternalIPPolicy
-}
-
-var map_Network = map[string]string{
- "": "Network holds cluster-wide information about Network. The canonical name is `cluster`. It is used to configure the desired network configuration, such as: IP address pools for services/pod IPs, network plugin, etc. Please view network.spec for an explanation on what applies when configuring this resource.",
- "spec": "spec holds user settable values for configuration. As a general rule, this SHOULD NOT be read directly. Instead, you should consume the NetworkStatus, as it indicates the currently deployed configuration. Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each.",
- "status": "status holds observed values from the cluster. They may not be overridden.",
-}
-
-func (Network) SwaggerDoc() map[string]string {
- return map_Network
-}
-
-var map_NetworkSpec = map[string]string{
- "": "NetworkSpec is the desired network configuration. As a general rule, this SHOULD NOT be read directly. Instead, you should consume the NetworkStatus, as it indicates the currently deployed configuration. Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each.",
- "clusterNetwork": "IP address pool to use for pod IPs. This field is immutable after installation.",
- "serviceNetwork": "IP address pool for services. Currently, we only support a single entry here. This field is immutable after installation.",
- "networkType": "NetworkType is the plugin that is to be deployed (e.g. OpenShiftSDN). This should match a value that the cluster-network-operator understands, or else no networking will be installed. Currently supported values are: - OpenShiftSDN This field is immutable after installation.",
- "externalIP": "externalIP defines configuration for controllers that affect Service.ExternalIP. If nil, then ExternalIP is not allowed to be set.",
-}
-
-func (NetworkSpec) SwaggerDoc() map[string]string {
- return map_NetworkSpec
-}
-
-var map_NetworkStatus = map[string]string{
- "": "NetworkStatus is the current network configuration.",
- "clusterNetwork": "IP address pool to use for pod IPs.",
- "serviceNetwork": "IP address pool for services. Currently, we only support a single entry here.",
- "networkType": "NetworkType is the plugin that is deployed (e.g. OpenShiftSDN).",
- "clusterNetworkMTU": "ClusterNetworkMTU is the MTU for inter-pod networking.",
-}
-
-func (NetworkStatus) SwaggerDoc() map[string]string {
- return map_NetworkStatus
-}
-
-var map_BasicAuthIdentityProvider = map[string]string{
- "": "BasicAuthPasswordIdentityProvider provides identities for users authenticating using HTTP basic auth credentials",
-}
-
-func (BasicAuthIdentityProvider) SwaggerDoc() map[string]string {
- return map_BasicAuthIdentityProvider
-}
-
-var map_GitHubIdentityProvider = map[string]string{
- "": "GitHubIdentityProvider provides identities for users authenticating using GitHub credentials",
- "clientID": "clientID is the oauth client ID",
- "clientSecret": "clientSecret is a required reference to the secret by name containing the oauth client secret. The key \"clientSecret\" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config.",
- "organizations": "organizations optionally restricts which organizations are allowed to log in",
- "teams": "teams optionally restricts which teams are allowed to log in. Format is <org>/<team>.",
- "hostname": "hostname is the optional domain (e.g. \"mycompany.com\") for use with a hosted instance of GitHub Enterprise. It must match the GitHub Enterprise settings value configured at /setup/settings#hostname.",
- "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca.crt\" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. This can only be configured when hostname is set to a non-empty value. The namespace for this config map is openshift-config.",
-}
-
-func (GitHubIdentityProvider) SwaggerDoc() map[string]string {
- return map_GitHubIdentityProvider
-}
-
-var map_GitLabIdentityProvider = map[string]string{
- "": "GitLabIdentityProvider provides identities for users authenticating using GitLab credentials",
- "clientID": "clientID is the oauth client ID",
- "clientSecret": "clientSecret is a required reference to the secret by name containing the oauth client secret. The key \"clientSecret\" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config.",
- "url": "url is the oauth server base URL",
- "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca.crt\" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config.",
-}
-
-func (GitLabIdentityProvider) SwaggerDoc() map[string]string {
- return map_GitLabIdentityProvider
-}
-
-var map_GoogleIdentityProvider = map[string]string{
- "": "GoogleIdentityProvider provides identities for users authenticating using Google credentials",
- "clientID": "clientID is the oauth client ID",
- "clientSecret": "clientSecret is a required reference to the secret by name containing the oauth client secret. The key \"clientSecret\" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config.",
- "hostedDomain": "hostedDomain is the optional Google App domain (e.g. \"mycompany.com\") to restrict logins to",
-}
-
-func (GoogleIdentityProvider) SwaggerDoc() map[string]string {
- return map_GoogleIdentityProvider
-}
-
-var map_HTPasswdIdentityProvider = map[string]string{
- "": "HTPasswdPasswordIdentityProvider provides identities for users authenticating using htpasswd credentials",
- "fileData": "fileData is a required reference to a secret by name containing the data to use as the htpasswd file. The key \"htpasswd\" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. If the specified htpasswd data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config.",
-}
-
-func (HTPasswdIdentityProvider) SwaggerDoc() map[string]string {
- return map_HTPasswdIdentityProvider
-}
-
-var map_IdentityProvider = map[string]string{
- "": "IdentityProvider provides identities for users authenticating using credentials",
- "name": "name is used to qualify the identities returned by this provider. - It MUST be unique and not shared by any other identity provider used - It MUST be a valid path segment: name cannot equal \".\" or \"..\" or contain \"/\" or \"%\" or \":\"\n Ref: https://godoc.org/github.com/openshift/origin/pkg/user/apis/user/validation#ValidateIdentityProviderName",
- "mappingMethod": "mappingMethod determines how identities from this provider are mapped to users Defaults to \"claim\"",
-}
-
-func (IdentityProvider) SwaggerDoc() map[string]string {
- return map_IdentityProvider
-}
-
-var map_IdentityProviderConfig = map[string]string{
- "": "IdentityProviderConfig contains configuration for using a specific identity provider",
- "type": "type identifies the identity provider type for this entry.",
- "basicAuth": "basicAuth contains configuration options for the BasicAuth IdP",
- "github": "github enables user authentication using GitHub credentials",
- "gitlab": "gitlab enables user authentication using GitLab credentials",
- "google": "google enables user authentication using Google credentials",
- "htpasswd": "htpasswd enables user authentication using an HTPasswd file to validate credentials",
- "keystone": "keystone enables user authentication using keystone password credentials",
- "ldap": "ldap enables user authentication using LDAP credentials",
- "openID": "openID enables user authentication using OpenID credentials",
- "requestHeader": "requestHeader enables user authentication using request header credentials",
-}
-
-func (IdentityProviderConfig) SwaggerDoc() map[string]string {
- return map_IdentityProviderConfig
-}
-
-var map_KeystoneIdentityProvider = map[string]string{
- "": "KeystonePasswordIdentityProvider provides identities for users authenticating using keystone password credentials",
- "domainName": "domainName is required for keystone v3",
-}
-
-func (KeystoneIdentityProvider) SwaggerDoc() map[string]string {
- return map_KeystoneIdentityProvider
-}
-
-var map_LDAPAttributeMapping = map[string]string{
- "": "LDAPAttributeMapping maps LDAP attributes to OpenShift identity fields",
- "id": "id is the list of attributes whose values should be used as the user ID. Required. First non-empty attribute is used. At least one attribute is required. If none of the listed attribute have a value, authentication fails. LDAP standard identity attribute is \"dn\"",
- "preferredUsername": "preferredUsername is the list of attributes whose values should be used as the preferred username. LDAP standard login attribute is \"uid\"",
- "name": "name is the list of attributes whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity LDAP standard display name attribute is \"cn\"",
- "email": "email is the list of attributes whose values should be used as the email address. Optional. If unspecified, no email is set for the identity",
-}
-
-func (LDAPAttributeMapping) SwaggerDoc() map[string]string {
- return map_LDAPAttributeMapping
-}
-
-var map_LDAPIdentityProvider = map[string]string{
- "": "LDAPPasswordIdentityProvider provides identities for users authenticating using LDAP credentials",
- "url": "url is an RFC 2255 URL which specifies the LDAP search parameters to use. The syntax of the URL is: ldap://host:port/basedn?attribute?scope?filter",
- "bindDN": "bindDN is an optional DN to bind with during the search phase.",
- "bindPassword": "bindPassword is an optional reference to a secret by name containing a password to bind with during the search phase. The key \"bindPassword\" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config.",
- "insecure": "insecure, if true, indicates the connection should not use TLS WARNING: Should not be set to `true` with the URL scheme \"ldaps://\" as \"ldaps://\" URLs always\n attempt to connect using TLS, even when `insecure` is set to `true`\nWhen `true`, \"ldap://\" URLS connect insecurely. When `false`, \"ldap://\" URLs are upgraded to a TLS connection using StartTLS as specified in https://tools.ietf.org/html/rfc2830.",
- "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca.crt\" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config.",
- "attributes": "attributes maps LDAP attributes to identities",
-}
-
-func (LDAPIdentityProvider) SwaggerDoc() map[string]string {
- return map_LDAPIdentityProvider
-}
-
-var map_OAuth = map[string]string{
- "": "OAuth holds cluster-wide information about OAuth. The canonical name is `cluster`. It is used to configure the integrated OAuth server. This configuration is only honored when the top level Authentication config has type set to IntegratedOAuth.",
-}
-
-func (OAuth) SwaggerDoc() map[string]string {
- return map_OAuth
-}
-
-var map_OAuthRemoteConnectionInfo = map[string]string{
- "": "OAuthRemoteConnectionInfo holds information necessary for establishing a remote connection",
- "url": "url is the remote URL to connect to",
- "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca.crt\" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config.",
- "tlsClientCert": "tlsClientCert is an optional reference to a secret by name that contains the PEM-encoded TLS client certificate to present when connecting to the server. The key \"tls.crt\" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. If the specified certificate data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config.",
- "tlsClientKey": "tlsClientKey is an optional reference to a secret by name that contains the PEM-encoded TLS private key for the client certificate referenced in tlsClientCert. The key \"tls.key\" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. If the specified certificate data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config.",
-}
-
-func (OAuthRemoteConnectionInfo) SwaggerDoc() map[string]string {
- return map_OAuthRemoteConnectionInfo
-}
-
-var map_OAuthSpec = map[string]string{
- "": "OAuthSpec contains desired cluster auth configuration",
- "identityProviders": "identityProviders is an ordered list of ways for a user to identify themselves. When this list is empty, no identities are provisioned for users.",
- "tokenConfig": "tokenConfig contains options for authorization and access tokens",
- "templates": "templates allow you to customize pages like the login page.",
-}
-
-func (OAuthSpec) SwaggerDoc() map[string]string {
- return map_OAuthSpec
-}
-
-var map_OAuthStatus = map[string]string{
- "": "OAuthStatus shows current known state of OAuth server in the cluster",
-}
-
-func (OAuthStatus) SwaggerDoc() map[string]string {
- return map_OAuthStatus
-}
-
-var map_OAuthTemplates = map[string]string{
- "": "OAuthTemplates allow for customization of pages like the login page",
- "login": "login is the name of a secret that specifies a go template to use to render the login page. The key \"login.html\" is used to locate the template data. If specified and the secret or expected key is not found, the default login page is used. If the specified template is not valid, the default login page is used. If unspecified, the default login page is used. The namespace for this secret is openshift-config.",
- "providerSelection": "providerSelection is the name of a secret that specifies a go template to use to render the provider selection page. The key \"providers.html\" is used to locate the template data. If specified and the secret or expected key is not found, the default provider selection page is used. If the specified template is not valid, the default provider selection page is used. If unspecified, the default provider selection page is used. The namespace for this secret is openshift-config.",
- "error": "error is the name of a secret that specifies a go template to use to render error pages during the authentication or grant flow. The key \"errors.html\" is used to locate the template data. If specified and the secret or expected key is not found, the default error page is used. If the specified template is not valid, the default error page is used. If unspecified, the default error page is used. The namespace for this secret is openshift-config.",
-}
-
-func (OAuthTemplates) SwaggerDoc() map[string]string {
- return map_OAuthTemplates
-}
-
-var map_OpenIDClaims = map[string]string{
- "": "OpenIDClaims contains a list of OpenID claims to use when authenticating with an OpenID identity provider",
- "preferredUsername": "preferredUsername is the list of claims whose values should be used as the preferred username. If unspecified, the preferred username is determined from the value of the sub claim",
- "name": "name is the list of claims whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity",
- "email": "email is the list of claims whose values should be used as the email address. Optional. If unspecified, no email is set for the identity",
-}
-
-func (OpenIDClaims) SwaggerDoc() map[string]string {
- return map_OpenIDClaims
-}
-
-var map_OpenIDIdentityProvider = map[string]string{
- "": "OpenIDIdentityProvider provides identities for users authenticating using OpenID credentials",
- "clientID": "clientID is the oauth client ID",
- "clientSecret": "clientSecret is a required reference to the secret by name containing the oauth client secret. The key \"clientSecret\" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config.",
- "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca.crt\" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config.",
- "extraScopes": "extraScopes are any scopes to request in addition to the standard \"openid\" scope.",
- "extraAuthorizeParameters": "extraAuthorizeParameters are any custom parameters to add to the authorize request.",
- "issuer": "issuer is the URL that the OpenID Provider asserts as its Issuer Identifier. It must use the https scheme with no query or fragment component.",
- "claims": "claims mappings",
-}
-
-func (OpenIDIdentityProvider) SwaggerDoc() map[string]string {
- return map_OpenIDIdentityProvider
-}
-
-var map_RequestHeaderIdentityProvider = map[string]string{
- "": "RequestHeaderIdentityProvider provides identities for users authenticating using request header credentials",
- "loginURL": "loginURL is a URL to redirect unauthenticated /authorize requests to Unauthenticated requests from OAuth clients which expect interactive logins will be redirected here ${url} is replaced with the current URL, escaped to be safe in a query parameter\n https://www.example.com/sso-login?then=${url}\n${query} is replaced with the current query string\n https://www.example.com/auth-proxy/oauth/authorize?${query}\nRequired when login is set to true.",
- "challengeURL": "challengeURL is a URL to redirect unauthenticated /authorize requests to Unauthenticated requests from OAuth clients which expect WWW-Authenticate challenges will be redirected here. ${url} is replaced with the current URL, escaped to be safe in a query parameter\n https://www.example.com/sso-login?then=${url}\n${query} is replaced with the current query string\n https://www.example.com/auth-proxy/oauth/authorize?${query}\nRequired when challenge is set to true.",
- "ca": "ca is a required reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. Specifically, it allows verification of incoming requests to prevent header spoofing. The key \"ca.crt\" is used to locate the data. If the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. The namespace for this config map is openshift-config.",
- "clientCommonNames": "clientCommonNames is an optional list of common names to require a match from. If empty, any client certificate validated against the clientCA bundle is considered authoritative.",
- "headers": "headers is the set of headers to check for identity information",
- "preferredUsernameHeaders": "preferredUsernameHeaders is the set of headers to check for the preferred username",
- "nameHeaders": "nameHeaders is the set of headers to check for the display name",
- "emailHeaders": "emailHeaders is the set of headers to check for the email address",
-}
-
-func (RequestHeaderIdentityProvider) SwaggerDoc() map[string]string {
- return map_RequestHeaderIdentityProvider
-}
-
-var map_TokenConfig = map[string]string{
- "": "TokenConfig holds the necessary configuration options for authorization and access tokens",
- "accessTokenMaxAgeSeconds": "accessTokenMaxAgeSeconds defines the maximum age of access tokens",
- "accessTokenInactivityTimeoutSeconds": "accessTokenInactivityTimeoutSeconds defines the default token inactivity timeout for tokens granted by any client. The value represents the maximum amount of time that can occur between consecutive uses of the token. Tokens become invalid if they are not used within this temporal window. The user will need to acquire a new token to regain access once a token times out. Valid values are integer values:\n x < 0 Tokens time out is enabled but tokens never timeout unless configured per client (e.g. `-1`)\n x = 0 Tokens time out is disabled (default)\n x > 0 Tokens time out if there is no activity for x seconds\nThe current minimum allowed value for X is 300 (5 minutes)",
-}
-
-func (TokenConfig) SwaggerDoc() map[string]string {
- return map_TokenConfig
-}
-
-var map_HubSource = map[string]string{
- "": "HubSource is used to specify the hub source and its configuration",
- "name": "name is the name of one of the default hub sources",
- "disabled": "disabled is used to disable a default hub source on cluster",
-}
-
-func (HubSource) SwaggerDoc() map[string]string {
- return map_HubSource
-}
-
-var map_HubSourceStatus = map[string]string{
- "": "HubSourceStatus is used to reflect the current state of applying the configuration to a default source",
- "status": "status indicates success or failure in applying the configuration",
- "message": "message provides more information regarding failures",
-}
-
-func (HubSourceStatus) SwaggerDoc() map[string]string {
- return map_HubSourceStatus
-}
-
-var map_OperatorHub = map[string]string{
- "": "OperatorHub is the Schema for the operatorhubs API. It can be used to change the state of the default hub sources for OperatorHub on the cluster from enabled to disabled and vice versa.",
-}
-
-func (OperatorHub) SwaggerDoc() map[string]string {
- return map_OperatorHub
-}
-
-var map_OperatorHubList = map[string]string{
- "": "OperatorHubList contains a list of OperatorHub",
-}
-
-func (OperatorHubList) SwaggerDoc() map[string]string {
- return map_OperatorHubList
-}
-
-var map_OperatorHubSpec = map[string]string{
- "": "OperatorHubSpec defines the desired state of OperatorHub",
- "disableAllDefaultSources": "disableAllDefaultSources allows you to disable all the default hub sources. If this is true, a specific entry in sources can be used to enable a default source. If this is false, a specific entry in sources can be used to disable or enable a default source.",
- "sources": "sources is the list of default hub sources and their configuration. If the list is empty, it implies that the default hub sources are enabled on the cluster unless disableAllDefaultSources is true. If disableAllDefaultSources is true and sources is not empty, the configuration present in sources will take precedence. The list of default hub sources and their current state will always be reflected in the status block.",
-}
-
-func (OperatorHubSpec) SwaggerDoc() map[string]string {
- return map_OperatorHubSpec
-}
-
-var map_OperatorHubStatus = map[string]string{
- "": "OperatorHubStatus defines the observed state of OperatorHub. The current state of the default hub sources will always be reflected here.",
- "sources": "sources encapsulates the result of applying the configuration for each hub source",
-}
-
-func (OperatorHubStatus) SwaggerDoc() map[string]string {
- return map_OperatorHubStatus
-}
-
-var map_Project = map[string]string{
- "": "Project holds cluster-wide information about Project. The canonical name is `cluster`",
- "spec": "spec holds user settable values for configuration",
- "status": "status holds observed values from the cluster. They may not be overridden.",
-}
-
-func (Project) SwaggerDoc() map[string]string {
- return map_Project
-}
-
-var map_ProjectSpec = map[string]string{
- "": "ProjectSpec holds the project creation configuration.",
- "projectRequestMessage": "projectRequestMessage is the string presented to a user if they are unable to request a project via the projectrequest api endpoint",
- "projectRequestTemplate": "projectRequestTemplate is the template to use for creating projects in response to projectrequest. This must point to a template in 'openshift-config' namespace. It is optional. If it is not specified, a default template is used.",
-}
-
-func (ProjectSpec) SwaggerDoc() map[string]string {
- return map_ProjectSpec
-}
-
-var map_TemplateReference = map[string]string{
- "": "TemplateReference references a template in a specific namespace. The namespace must be specified at the point of use.",
- "name": "name is the metadata.name of the referenced project request template",
-}
-
-func (TemplateReference) SwaggerDoc() map[string]string {
- return map_TemplateReference
-}
-
-var map_Proxy = map[string]string{
- "": "Proxy holds cluster-wide information on how to configure default proxies for the cluster. The canonical name is `cluster`",
- "spec": "Spec holds user-settable values for the proxy configuration",
- "status": "status holds observed values from the cluster. They may not be overridden.",
-}
-
-func (Proxy) SwaggerDoc() map[string]string {
- return map_Proxy
-}
-
-var map_ProxySpec = map[string]string{
- "": "ProxySpec contains cluster proxy creation configuration.",
- "httpProxy": "httpProxy is the URL of the proxy for HTTP requests. Empty means unset and will not result in an env var.",
- "httpsProxy": "httpsProxy is the URL of the proxy for HTTPS requests. Empty means unset and will not result in an env var.",
- "noProxy": "noProxy is a comma-separated list of hostnames and/or CIDRs for which the proxy should not be used. Empty means unset and will not result in an env var.",
- "readinessEndpoints": "readinessEndpoints is a list of endpoints used to verify readiness of the proxy.",
- "trustedCA": "trustedCA is a reference to a ConfigMap containing a CA certificate bundle used for client egress HTTPS connections. The certificate bundle must be from the CA that signed the proxy's certificate and be signed for everything. The trustedCA field should only be consumed by a proxy validator. The validator is responsible for reading the certificate bundle from required key \"ca-bundle.crt\" and copying it to a ConfigMap named \"trusted-ca-bundle\" in the \"openshift-config-managed\" namespace. The namespace for the ConfigMap referenced by trustedCA is \"openshift-config\". Here is an example ConfigMap (in yaml):\n\napiVersion: v1 kind: ConfigMap metadata:\n name: user-ca-bundle\n namespace: openshift-config\n data:\n ca-bundle.crt: |",
-}
-
-func (ProxySpec) SwaggerDoc() map[string]string {
- return map_ProxySpec
-}
-
-var map_ProxyStatus = map[string]string{
- "": "ProxyStatus shows current known state of the cluster proxy.",
- "httpProxy": "httpProxy is the URL of the proxy for HTTP requests.",
- "httpsProxy": "httpsProxy is the URL of the proxy for HTTPS requests.",
- "noProxy": "noProxy is a comma-separated list of hostnames and/or CIDRs for which the proxy should not be used.",
-}
-
-func (ProxyStatus) SwaggerDoc() map[string]string {
- return map_ProxyStatus
-}
-
-var map_Scheduler = map[string]string{
- "": "Scheduler holds cluster-wide config information to run the Kubernetes Scheduler and influence its placement decisions. The canonical name for this config is `cluster`.",
- "spec": "spec holds user settable values for configuration",
- "status": "status holds observed values from the cluster. They may not be overridden.",
-}
-
-func (Scheduler) SwaggerDoc() map[string]string {
- return map_Scheduler
-}
-
-var map_SchedulerSpec = map[string]string{
- "policy": "policy is a reference to a ConfigMap containing scheduler policy which has user specified predicates and priorities. If this ConfigMap is not available scheduler will default to use DefaultAlgorithmProvider. The namespace for this configmap is openshift-config.",
- "defaultNodeSelector": "defaultNodeSelector helps set the cluster-wide default node selector to restrict pod placement to specific nodes. This is applied to the pods created in all namespaces without a specified nodeSelector value. For example, defaultNodeSelector: \"type=user-node,region=east\" would set nodeSelector field in pod spec to \"type=user-node,region=east\" to all pods created in all namespaces. Namespaces having project-wide node selectors won't be impacted even if this field is set. This adds an annotation section to the namespace. For example, if a new namespace is created with node-selector='type=user-node,region=east', the annotation openshift.io/node-selector: type=user-node,region=east gets added to the project. When the openshift.io/node-selector annotation is set on the project the value is used in preference to the value we are setting for defaultNodeSelector field. For instance, openshift.io/node-selector: \"type=user-node,region=west\" means that the default of \"type=user-node,region=east\" set in defaultNodeSelector would not be applied.",
- "mastersSchedulable": "MastersSchedulable allows masters nodes to be schedulable. When this flag is turned on, all the master nodes in the cluster will be made schedulable, so that workload pods can run on them. The default value for this field is false, meaning none of the master nodes are schedulable. Important Note: Once the workload pods start running on the master nodes, extreme care must be taken to ensure that cluster-critical control plane components are not impacted. Please turn on this field after doing due diligence.",
-}
-
-func (SchedulerSpec) SwaggerDoc() map[string]string {
- return map_SchedulerSpec
-}
-
-var map_CustomTLSProfile = map[string]string{
- "": "CustomTLSProfile is a user-defined TLS security profile. Be extremely careful using a custom TLS profile as invalid configurations can be catastrophic.",
-}
-
-func (CustomTLSProfile) SwaggerDoc() map[string]string {
- return map_CustomTLSProfile
-}
-
-var map_IntermediateTLSProfile = map[string]string{
- "": "IntermediateTLSProfile is a TLS security profile based on: https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28default.29",
-}
-
-func (IntermediateTLSProfile) SwaggerDoc() map[string]string {
- return map_IntermediateTLSProfile
-}
-
-var map_ModernTLSProfile = map[string]string{
- "": "ModernTLSProfile is a TLS security profile based on: https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility",
-}
-
-func (ModernTLSProfile) SwaggerDoc() map[string]string {
- return map_ModernTLSProfile
-}
-
-var map_OldTLSProfile = map[string]string{
- "": "OldTLSProfile is a TLS security profile based on: https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility",
-}
-
-func (OldTLSProfile) SwaggerDoc() map[string]string {
- return map_OldTLSProfile
-}
-
-var map_TLSProfileSpec = map[string]string{
- "": "TLSProfileSpec is the desired behavior of a TLSSecurityProfile.",
- "ciphers": "ciphers is used to specify the cipher algorithms that are negotiated during the TLS handshake. Operators may remove entries their operands do not support. For example, to use DES-CBC3-SHA (yaml):\n\n ciphers:\n - DES-CBC3-SHA",
- "minTLSVersion": "minTLSVersion is used to specify the minimal version of the TLS protocol that is negotiated during the TLS handshake. For example, to use TLS versions 1.1, 1.2 and 1.3 (yaml):\n\n minTLSVersion: TLSv1.1\n\nNOTE: currently the highest minTLSVersion allowed is VersionTLS12",
-}
-
-func (TLSProfileSpec) SwaggerDoc() map[string]string {
- return map_TLSProfileSpec
-}
-
-var map_TLSSecurityProfile = map[string]string{
- "": "TLSSecurityProfile defines the schema for a TLS security profile. This object is used by operators to apply TLS security settings to operands.",
- "type": "type is one of Old, Intermediate, Modern or Custom. Custom provides the ability to specify individual TLS security profile parameters. Old, Intermediate and Modern are TLS security profiles based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations\n\nThe profiles are intent based, so they may change over time as new ciphers are developed and existing ciphers are found to be insecure. Depending on precisely which ciphers are available to a process, the list may be reduced.\n\nNote that the Modern profile is currently not supported because it is not yet well adopted by common software libraries.",
- "old": "old is a TLS security profile based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility\n\nand looks like this (yaml):\n\n ciphers:\n - TLS_AES_128_GCM_SHA256\n - TLS_AES_256_GCM_SHA384\n - TLS_CHACHA20_POLY1305_SHA256\n - ECDHE-ECDSA-AES128-GCM-SHA256\n - ECDHE-RSA-AES128-GCM-SHA256\n - ECDHE-ECDSA-AES256-GCM-SHA384\n - ECDHE-RSA-AES256-GCM-SHA384\n - ECDHE-ECDSA-CHACHA20-POLY1305\n - ECDHE-RSA-CHACHA20-POLY1305\n - DHE-RSA-AES128-GCM-SHA256\n - DHE-RSA-AES256-GCM-SHA384\n - DHE-RSA-CHACHA20-POLY1305\n - ECDHE-ECDSA-AES128-SHA256\n - ECDHE-RSA-AES128-SHA256\n - ECDHE-ECDSA-AES128-SHA\n - ECDHE-RSA-AES128-SHA\n - ECDHE-ECDSA-AES256-SHA384\n - ECDHE-RSA-AES256-SHA384\n - ECDHE-ECDSA-AES256-SHA\n - ECDHE-RSA-AES256-SHA\n - DHE-RSA-AES128-SHA256\n - DHE-RSA-AES256-SHA256\n - AES128-GCM-SHA256\n - AES256-GCM-SHA384\n - AES128-SHA256\n - AES256-SHA256\n - AES128-SHA\n - AES256-SHA\n - DES-CBC3-SHA\n minTLSVersion: TLSv1.0",
- "intermediate": "intermediate is a TLS security profile based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29\n\nand looks like this (yaml):\n\n ciphers:\n - TLS_AES_128_GCM_SHA256\n - TLS_AES_256_GCM_SHA384\n - TLS_CHACHA20_POLY1305_SHA256\n - ECDHE-ECDSA-AES128-GCM-SHA256\n - ECDHE-RSA-AES128-GCM-SHA256\n - ECDHE-ECDSA-AES256-GCM-SHA384\n - ECDHE-RSA-AES256-GCM-SHA384\n - ECDHE-ECDSA-CHACHA20-POLY1305\n - ECDHE-RSA-CHACHA20-POLY1305\n - DHE-RSA-AES128-GCM-SHA256\n - DHE-RSA-AES256-GCM-SHA384\n minTLSVersion: TLSv1.2",
- "modern": "modern is a TLS security profile based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility\n\nand looks like this (yaml):\n\n ciphers:\n - TLS_AES_128_GCM_SHA256\n - TLS_AES_256_GCM_SHA384\n - TLS_CHACHA20_POLY1305_SHA256\n minTLSVersion: TLSv1.3\n\nNOTE: Currently unsupported.",
- "custom": "custom is a user-defined TLS security profile. Be extremely careful using a custom profile as invalid configurations can be catastrophic. An example custom profile looks like this:\n\n ciphers:\n - ECDHE-ECDSA-CHACHA20-POLY1305\n - ECDHE-RSA-CHACHA20-POLY1305\n - ECDHE-RSA-AES128-GCM-SHA256\n - ECDHE-ECDSA-AES128-GCM-SHA256\n minTLSVersion: TLSv1.1",
-}
-
-func (TLSSecurityProfile) SwaggerDoc() map[string]string {
- return map_TLSSecurityProfile
-}
-
-// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/github.com/seccomp/containers-golang/.gitignore b/vendor/github.com/seccomp/containers-golang/.gitignore
index 2cad96a16..e433eef88 100644
--- a/vendor/github.com/seccomp/containers-golang/.gitignore
+++ b/vendor/github.com/seccomp/containers-golang/.gitignore
@@ -1,2 +1,2 @@
-default.json
-fixtures
+*.orig
+generate
diff --git a/vendor/github.com/openshift/api/LICENSE b/vendor/github.com/seccomp/containers-golang/LICENSE
index 8dada3eda..bd465fcf0 100644
--- a/vendor/github.com/openshift/api/LICENSE
+++ b/vendor/github.com/seccomp/containers-golang/LICENSE
@@ -175,18 +175,7 @@
END OF TERMS AND CONDITIONS
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "{}"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright {yyyy} {name of copyright owner}
+ Copyright 2018-2019 github.com/seccomp authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/seccomp/containers-golang/Makefile b/vendor/github.com/seccomp/containers-golang/Makefile
index 88569e87b..2d91917f9 100644
--- a/vendor/github.com/seccomp/containers-golang/Makefile
+++ b/vendor/github.com/seccomp/containers-golang/Makefile
@@ -1,3 +1,5 @@
+export GO111MODULE=off
+
TAGS ?= seccomp
BUILDFLAGS := -tags "$(AUTOTAGS) $(TAGS)"
GO := go
@@ -5,14 +7,26 @@ PACKAGE := github.com/seccomp/containers-golang
sources := $(wildcard *.go)
-default.json: $(sources)
+.PHONY: seccomp.json
+seccomp.json: $(sources)
$(GO) build -compiler gc $(BUILDFLAGS) ./cmd/generate.go
$(GO) build -compiler gc ./cmd/generate.go
$(GO) run ${BUILDFLAGS} cmd/generate.go
-all: default.json
+all: seccomp.json
.PHONY: test-unit
test-unit:
- $(GO) test $(BUILDFLAGS) $(shell $(GO) list ./... | grep -v ^$(PACKAGE)/vendor)
- $(GO) test $(shell $(GO) list ./... | grep -v ^$(PACKAGE)/vendor)
+ $(GO) test -v $(BUILDFLAGS) $(shell $(GO) list ./... | grep -v ^$(PACKAGE)/vendor)
+ $(GO) test -v $(shell $(GO) list ./... | grep -v ^$(PACKAGE)/vendor)
+
+.PHONY: vendor
+vendor:
+ export GO111MODULE=on \
+ $(GO) mod tidy && \
+ $(GO) mod vendor && \
+ $(GO) mod verify
+
+.PHONY: clean
+clean:
+ rm -f generate
diff --git a/vendor/github.com/seccomp/containers-golang/README.md b/vendor/github.com/seccomp/containers-golang/README.md
index 1012baec3..a44238432 100644
--- a/vendor/github.com/seccomp/containers-golang/README.md
+++ b/vendor/github.com/seccomp/containers-golang/README.md
@@ -1,9 +1,13 @@
+# containers-golang
+
+[![CircleCI](https://circleci.com/gh/seccomp/containers-golang.svg?style=shield)](https://circleci.com/gh/seccomp/containers-golang)
+
`containers-golang` is a set of Go libraries used by container runtimes to generate and load seccomp mappings into the kernel.
seccomp (short for secure computing mode) is a BPF based syscall filter language and present a more conventional function-call based filtering interface that should be familiar to, and easily adopted by, application developers.
## Building
- make - Generates default.json file, which containes the whitelisted syscalls that can be used by container runtime engines like [CRI-O][cri-o], [Buildah][buildah], [Podman][podman] and [Docker][docker], and container runtimes like OCI [Runc][runc] to controll the syscalls available to containers.
+ make - Generates seccomp.json file, which contains the whitelisted syscalls that can be used by container runtime engines like [CRI-O][cri-o], [Buildah][buildah], [Podman][podman] and [Docker][docker], and container runtimes like OCI [Runc][runc] to controll the syscalls available to containers.
### Supported build tags
@@ -13,13 +17,9 @@ seccomp (short for secure computing mode) is a BPF based syscall filter language
When developing this library, please use `make` (or `make … BUILDTAGS=…`) to take advantage of the tests and validation.
-## License
-
-ASL 2.0
-
## Contact
-- IRC: #[CRI-O](irc://irc.freenode.net:6667/#cri-o) on freenode.net
+- IRC: #[containers](irc://irc.freenode.net:6667/#containers) on freenode.net
[cri-o]: https://github.com/kubernetes-incubator/cri-o/pulls
[buildah]: https://github.com/projectatomic/buildah
diff --git a/vendor/github.com/seccomp/containers-golang/go.mod b/vendor/github.com/seccomp/containers-golang/go.mod
new file mode 100644
index 000000000..2b56d46fd
--- /dev/null
+++ b/vendor/github.com/seccomp/containers-golang/go.mod
@@ -0,0 +1,16 @@
+module github.com/seccomp/containers-golang
+
+go 1.13
+
+require (
+ github.com/blang/semver v3.5.1+incompatible // indirect
+ github.com/hashicorp/go-multierror v1.0.0 // indirect
+ github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2
+ github.com/opencontainers/runtime-tools v0.9.0
+ github.com/opencontainers/selinux v1.3.0 // indirect
+ github.com/seccomp/libseccomp-golang v0.9.1
+ github.com/sirupsen/logrus v1.4.2 // indirect
+ github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 // indirect
+ github.com/xeipuuv/gojsonschema v1.2.0 // indirect
+ golang.org/x/sys v0.0.0-20190921190940-14da1ac737cc
+)
diff --git a/vendor/github.com/seccomp/containers-golang/go.sum b/vendor/github.com/seccomp/containers-golang/go.sum
new file mode 100644
index 000000000..ba00acd09
--- /dev/null
+++ b/vendor/github.com/seccomp/containers-golang/go.sum
@@ -0,0 +1,48 @@
+github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
+github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
+github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
+github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/opencontainers/runtime-spec v0.1.2-0.20190618234442-a950415649c7 h1:Dliu5QO+4JYWu/yMshaMU7G3JN2POGpwjJN7gjy10Go=
+github.com/opencontainers/runtime-spec v0.1.2-0.20190618234442-a950415649c7/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.0.1 h1:wY4pOY8fBdSIvs9+IDHC55thBuEulhzfSgKeC1yFvzQ=
+github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.0.2-0.20191007145322-19e92ca81777 h1:7CkKaORyxoXsM8z56r+M0wf3uCpVGVqx4CWq7oJ/4DY=
+github.com/opencontainers/runtime-spec v1.0.2-0.20191007145322-19e92ca81777/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2 h1:9mv9SC7GWmRWE0J/+oD8w3GsN2KYGKtg6uwLN7hfP5E=
+github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-tools v0.9.0 h1:FYgwVsKRI/H9hU32MJ/4MLOzXWodKK5zsQavY8NPMkU=
+github.com/opencontainers/runtime-tools v0.9.0/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
+github.com/opencontainers/selinux v1.2.2 h1:Kx9J6eDG5/24A6DtUquGSpJQ+m2MUTahn4FtGEe8bFg=
+github.com/opencontainers/selinux v1.2.2/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs=
+github.com/opencontainers/selinux v1.3.0 h1:xsI95WzPZu5exzA6JzkLSfdr/DilzOhCJOqGe5TgR0g=
+github.com/opencontainers/selinux v1.3.0/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/seccomp/libseccomp-golang v0.9.1 h1:NJjM5DNFOs0s3kYE1WUOr6G8V97sdt46rlXTMfXGWBo=
+github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
+github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
+github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 h1:b6uOv7YOFK0TYG7HtkIgExQo+2RdLuwRft63jn2HWj8=
+github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
+github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c=
+github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
+github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
+github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
+github.com/xeipuuv/gojsonschema v1.1.0 h1:ngVtJC9TY/lg0AA/1k48FYhBrhRoFlEmWzsehpNAaZg=
+github.com/xeipuuv/gojsonschema v1.1.0/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
+github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
+github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/xC2Run6RzeW1SyHxpc=
+golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190921190940-14da1ac737cc h1:EinpED/Eb9JUgDi6pkoFjw+tz69c3lHUZr2+Va84S0w=
+golang.org/x/sys v0.0.0-20190921190940-14da1ac737cc/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
diff --git a/vendor/github.com/seccomp/containers-golang/seccomp.json b/vendor/github.com/seccomp/containers-golang/seccomp.json
index fe9eda56a..4c84d981f 100644
--- a/vendor/github.com/seccomp/containers-golang/seccomp.json
+++ b/vendor/github.com/seccomp/containers-golang/seccomp.json
@@ -52,6 +52,8 @@
"syscalls": [
{
"names": [
+ "_llseek",
+ "_newselect",
"accept",
"accept4",
"access",
@@ -118,6 +120,8 @@
"ftruncate64",
"futex",
"futimesat",
+ "get_robust_list",
+ "get_thread_area",
"getcpu",
"getcwd",
"getdents",
@@ -143,12 +147,10 @@
"getresuid",
"getresuid32",
"getrlimit",
- "get_robust_list",
"getrusage",
"getsid",
"getsockname",
"getsockopt",
- "get_thread_area",
"gettid",
"gettimeofday",
"getuid",
@@ -159,13 +161,13 @@
"inotify_init1",
"inotify_rm_watch",
"io_cancel",
- "ioctl",
"io_destroy",
"io_getevents",
- "ioprio_get",
- "ioprio_set",
"io_setup",
"io_submit",
+ "ioctl",
+ "ioprio_get",
+ "ioprio_set",
"ipc",
"kill",
"lchown",
@@ -176,7 +178,6 @@
"listen",
"listxattr",
"llistxattr",
- "_llseek",
"lremovexattr",
"lseek",
"lsetxattr",
@@ -194,6 +195,7 @@
"mlockall",
"mmap",
"mmap2",
+ "mount",
"mprotect",
"mq_getsetattr",
"mq_notify",
@@ -210,9 +212,9 @@
"munlock",
"munlockall",
"munmap",
+ "name_to_handle_at",
"nanosleep",
"newfstatat",
- "_newselect",
"open",
"openat",
"pause",
@@ -234,6 +236,7 @@
"readlink",
"readlinkat",
"readv",
+ "reboot",
"recv",
"recvfrom",
"recvmmsg",
@@ -253,11 +256,11 @@
"rt_sigsuspend",
"rt_sigtimedwait",
"rt_tgsigqueueinfo",
+ "sched_get_priority_max",
+ "sched_get_priority_min",
"sched_getaffinity",
"sched_getattr",
"sched_getparam",
- "sched_get_priority_max",
- "sched_get_priority_min",
"sched_getscheduler",
"sched_rr_get_interval",
"sched_setaffinity",
@@ -277,6 +280,9 @@
"sendmmsg",
"sendmsg",
"sendto",
+ "set_robust_list",
+ "set_thread_area",
+ "set_tid_address",
"setfsgid",
"setfsgid32",
"setfsuid",
@@ -297,11 +303,8 @@
"setreuid",
"setreuid32",
"setrlimit",
- "set_robust_list",
"setsid",
"setsockopt",
- "set_thread_area",
- "set_tid_address",
"setuid",
"setuid32",
"setxattr",
@@ -335,21 +338,24 @@
"time",
"timer_create",
"timer_delete",
- "timerfd_create",
- "timerfd_gettime",
- "timerfd_settime",
"timer_getoverrun",
"timer_gettime",
"timer_settime",
+ "timerfd_create",
+ "timerfd_gettime",
+ "timerfd_settime",
"times",
"tkill",
"truncate",
"truncate64",
"ugetrlimit",
"umask",
+ "umount",
+ "umount2",
"uname",
"unlink",
"unlinkat",
+ "unshare",
"utime",
"utimensat",
"utimes",
@@ -359,12 +365,7 @@
"waitid",
"waitpid",
"write",
- "writev",
- "mount",
- "umount2",
- "reboot",
- "name_to_handle_at",
- "unshare"
+ "writev"
],
"action": "SCMP_ACT_ALLOW",
"args": [],
@@ -770,4 +771,4 @@
"excludes": {}
}
]
-}
+} \ No newline at end of file
diff --git a/vendor/github.com/seccomp/containers-golang/seccomp_default_linux.go b/vendor/github.com/seccomp/containers-golang/seccomp_default_linux.go
index fde3cff75..e137a5887 100644
--- a/vendor/github.com/seccomp/containers-golang/seccomp_default_linux.go
+++ b/vendor/github.com/seccomp/containers-golang/seccomp_default_linux.go
@@ -1,5 +1,9 @@
// +build seccomp
+// SPDX-License-Identifier: Apache-2.0
+
+// Copyright 2013-2018 Docker, Inc.
+
package seccomp // import "github.com/seccomp/containers-golang"
import (
@@ -44,6 +48,8 @@ func DefaultProfile() *Seccomp {
syscalls := []*Syscall{
{
Names: []string{
+ "_llseek",
+ "_newselect",
"accept",
"accept4",
"access",
@@ -110,6 +116,8 @@ func DefaultProfile() *Seccomp {
"ftruncate64",
"futex",
"futimesat",
+ "get_robust_list",
+ "get_thread_area",
"getcpu",
"getcwd",
"getdents",
@@ -135,12 +143,10 @@ func DefaultProfile() *Seccomp {
"getresuid",
"getresuid32",
"getrlimit",
- "get_robust_list",
"getrusage",
"getsid",
"getsockname",
"getsockopt",
- "get_thread_area",
"gettid",
"gettimeofday",
"getuid",
@@ -151,13 +157,13 @@ func DefaultProfile() *Seccomp {
"inotify_init1",
"inotify_rm_watch",
"io_cancel",
- "ioctl",
"io_destroy",
"io_getevents",
- "ioprio_get",
- "ioprio_set",
"io_setup",
"io_submit",
+ "ioctl",
+ "ioprio_get",
+ "ioprio_set",
"ipc",
"kill",
"lchown",
@@ -168,7 +174,6 @@ func DefaultProfile() *Seccomp {
"listen",
"listxattr",
"llistxattr",
- "_llseek",
"lremovexattr",
"lseek",
"lsetxattr",
@@ -206,7 +211,6 @@ func DefaultProfile() *Seccomp {
"name_to_handle_at",
"nanosleep",
"newfstatat",
- "_newselect",
"open",
"openat",
"pause",
@@ -248,11 +252,11 @@ func DefaultProfile() *Seccomp {
"rt_sigsuspend",
"rt_sigtimedwait",
"rt_tgsigqueueinfo",
+ "sched_get_priority_max",
+ "sched_get_priority_min",
"sched_getaffinity",
"sched_getattr",
"sched_getparam",
- "sched_get_priority_max",
- "sched_get_priority_min",
"sched_getscheduler",
"sched_rr_get_interval",
"sched_setaffinity",
@@ -272,6 +276,9 @@ func DefaultProfile() *Seccomp {
"sendmmsg",
"sendmsg",
"sendto",
+ "set_robust_list",
+ "set_thread_area",
+ "set_tid_address",
"setfsgid",
"setfsgid32",
"setfsuid",
@@ -292,11 +299,8 @@ func DefaultProfile() *Seccomp {
"setreuid",
"setreuid32",
"setrlimit",
- "set_robust_list",
"setsid",
"setsockopt",
- "set_thread_area",
- "set_tid_address",
"setuid",
"setuid32",
"setxattr",
@@ -330,12 +334,12 @@ func DefaultProfile() *Seccomp {
"time",
"timer_create",
"timer_delete",
- "timerfd_create",
- "timerfd_gettime",
- "timerfd_settime",
"timer_getoverrun",
"timer_gettime",
"timer_settime",
+ "timerfd_create",
+ "timerfd_gettime",
+ "timerfd_settime",
"times",
"tkill",
"truncate",
@@ -343,9 +347,11 @@ func DefaultProfile() *Seccomp {
"ugetrlimit",
"umask",
"umount",
+ "umount2",
"uname",
"unlink",
"unlinkat",
+ "unshare",
"utime",
"utimensat",
"utimes",
diff --git a/vendor/github.com/seccomp/containers-golang/seccomp_linux.go b/vendor/github.com/seccomp/containers-golang/seccomp_linux.go
index 9a495e3e2..44dcd90b8 100644
--- a/vendor/github.com/seccomp/containers-golang/seccomp_linux.go
+++ b/vendor/github.com/seccomp/containers-golang/seccomp_linux.go
@@ -1,5 +1,9 @@
// +build seccomp
+// SPDX-License-Identifier: Apache-2.0
+
+// Copyright 2013-2018 Docker, Inc.
+
package seccomp // import "github.com/seccomp/containers-golang"
import (
@@ -9,6 +13,7 @@ import (
"github.com/opencontainers/runtime-spec/specs-go"
libseccomp "github.com/seccomp/libseccomp-golang"
+ "golang.org/x/sys/unix"
)
//go:generate go run -tags 'seccomp' generate.go
@@ -22,11 +27,25 @@ func GetDefaultProfile(rs *specs.Spec) (*specs.LinuxSeccomp, error) {
func LoadProfile(body string, rs *specs.Spec) (*specs.LinuxSeccomp, error) {
var config Seccomp
if err := json.Unmarshal([]byte(body), &config); err != nil {
- return nil, fmt.Errorf("Decoding seccomp profile failed: %v", err)
+ return nil, fmt.Errorf("decoding seccomp profile failed: %v", err)
}
return setupSeccomp(&config, rs)
}
+// LoadProfileFromBytes takes a byte slice and decodes the seccomp profile.
+func LoadProfileFromBytes(body []byte, rs *specs.Spec) (*specs.LinuxSeccomp, error) {
+ config := &Seccomp{}
+ if err := json.Unmarshal(body, config); err != nil {
+ return nil, fmt.Errorf("decoding seccomp profile failed: %v", err)
+ }
+ return setupSeccomp(config, rs)
+}
+
+// LoadProfileFromConfig takes a Seccomp struct and a spec to retrieve a LinuxSeccomp
+func LoadProfileFromConfig(config *Seccomp, specgen *specs.Spec) (*specs.LinuxSeccomp, error) {
+ return setupSeccomp(config, specgen)
+}
+
var nativeToSeccomp = map[string]Arch{
"amd64": ArchX86_64,
"arm64": ArchAARCH64,
@@ -127,21 +146,22 @@ Loop:
}
if call.Name != "" {
- newConfig.Syscalls = append(newConfig.Syscalls, createSpecsSyscall(call.Name, call.Action, call.Args))
+ newConfig.Syscalls = append(newConfig.Syscalls, createSpecsSyscall([]string{call.Name}, call.Action, call.Args, call.ErrnoRet))
}
- for _, n := range call.Names {
- newConfig.Syscalls = append(newConfig.Syscalls, createSpecsSyscall(n, call.Action, call.Args))
+ if len(call.Names) > 0 {
+ newConfig.Syscalls = append(newConfig.Syscalls, createSpecsSyscall(call.Names, call.Action, call.Args, call.ErrnoRet))
}
}
return newConfig, nil
}
-func createSpecsSyscall(name string, action Action, args []*Arg) specs.LinuxSyscall {
+func createSpecsSyscall(names []string, action Action, args []*Arg, errnoRet *uint) specs.LinuxSyscall {
newCall := specs.LinuxSyscall{
- Names: []string{name},
- Action: specs.LinuxSeccompAction(action),
+ Names: names,
+ Action: specs.LinuxSeccompAction(action),
+ ErrnoRet: errnoRet,
}
// Loop through all the arguments of the syscall and convert them
@@ -157,3 +177,15 @@ func createSpecsSyscall(name string, action Action, args []*Arg) specs.LinuxSysc
}
return newCall
}
+
+// IsEnabled returns true if seccomp is enabled for the host.
+func IsEnabled() bool {
+ // Check if Seccomp is supported, via CONFIG_SECCOMP.
+ if err := unix.Prctl(unix.PR_GET_SECCOMP, 0, 0, 0, 0); err != unix.EINVAL {
+ // Make sure the kernel has CONFIG_SECCOMP_FILTER.
+ if err := unix.Prctl(unix.PR_SET_SECCOMP, unix.SECCOMP_MODE_FILTER, 0, 0, 0); err != unix.EINVAL {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/seccomp/containers-golang/seccomp_unsupported.go b/vendor/github.com/seccomp/containers-golang/seccomp_unsupported.go
index 279340426..936a9a641 100644
--- a/vendor/github.com/seccomp/containers-golang/seccomp_unsupported.go
+++ b/vendor/github.com/seccomp/containers-golang/seccomp_unsupported.go
@@ -1,5 +1,9 @@
// +build !seccomp
+// SPDX-License-Identifier: Apache-2.0
+
+// Copyright 2013-2018 Docker, Inc.
+
package seccomp // import "github.com/seccomp/containers-golang"
import (
@@ -22,3 +26,18 @@ func LoadProfile(body string, rs *specs.Spec) (*specs.LinuxSeccomp, error) {
func GetDefaultProfile(rs *specs.Spec) (*specs.LinuxSeccomp, error) {
return nil, fmt.Errorf("Seccomp not supported on this platform")
}
+
+// LoadProfileFromBytes takes a byte slice and decodes the seccomp profile.
+func LoadProfileFromBytes(body []byte, rs *specs.Spec) (*specs.LinuxSeccomp, error) {
+ return nil, fmt.Errorf("Seccomp not supported on this platform")
+}
+
+// LoadProfileFromConfig takes a Seccomp struct and a spec to retrieve a LinuxSeccomp
+func LoadProfileFromConfig(config *Seccomp, specgen *specs.Spec) (*specs.LinuxSeccomp, error) {
+ return nil, fmt.Errorf("Seccomp not supported on this platform")
+}
+
+// IsEnabled returns true if seccomp is enabled for the host.
+func IsEnabled() bool {
+ return false
+}
diff --git a/vendor/github.com/seccomp/containers-golang/types.go b/vendor/github.com/seccomp/containers-golang/types.go
index b549a55fe..6651c423f 100644
--- a/vendor/github.com/seccomp/containers-golang/types.go
+++ b/vendor/github.com/seccomp/containers-golang/types.go
@@ -1,5 +1,9 @@
package seccomp // import "github.com/seccomp/containers-golang"
+// SPDX-License-Identifier: Apache-2.0
+
+// Copyright 2013-2018 Docker, Inc.
+
// Seccomp represents the config for a seccomp profile for syscall restriction.
type Seccomp struct {
DefaultAction Action `json:"defaultAction"`
@@ -90,4 +94,5 @@ type Syscall struct {
Comment string `json:"comment"`
Includes Filter `json:"includes"`
Excludes Filter `json:"excludes"`
+ ErrnoRet *uint `json:"errnoRet,omitempty"`
}
diff --git a/vendor/github.com/seccomp/containers-golang/vendor.conf b/vendor/github.com/seccomp/containers-golang/vendor.conf
deleted file mode 100644
index 6111c475b..000000000
--- a/vendor/github.com/seccomp/containers-golang/vendor.conf
+++ /dev/null
@@ -1,9 +0,0 @@
-github.com/opencontainers/runtime-tools master
-github.com/blang/semver master
-github.com/hashicorp/go-multierror master
-github.com/hashicorp/errwrap master
-github.com/syndtr/gocapability master
-github.com/xeipuuv/gojsonschema master
-github.com/xeipuuv/gojsonreference master
-github.com/xeipuuv/gojsonpointer master
-