summaryrefslogtreecommitdiff
path: root/vendor/github.com
diff options
context:
space:
mode:
authorOpenShift Merge Robot <openshift-merge-robot@users.noreply.github.com>2019-09-05 19:44:43 +0200
committerGitHub <noreply@github.com>2019-09-05 19:44:43 +0200
commitcef5bec06b200ffa5caed46db296e55fb7d72376 (patch)
tree2dd8944e49a07b8f067f07ac8c880244f97dbdb8 /vendor/github.com
parent3f81f4409d41510dd7440af9a3321c949ca485a0 (diff)
parentf66a2069f1b26ff9987b008ba4b0c91ac3b682cc (diff)
downloadpodman-cef5bec06b200ffa5caed46db296e55fb7d72376.tar.gz
podman-cef5bec06b200ffa5caed46db296e55fb7d72376.tar.bz2
podman-cef5bec06b200ffa5caed46db296e55fb7d72376.zip
Merge pull request #3948 from openSUSE/buildah-update
Update buildah to v1.11.0
Diffstat (limited to 'vendor/github.com')
-rw-r--r--vendor/github.com/containers/buildah/buildah.go2
-rw-r--r--vendor/github.com/containers/buildah/changelog.txt35
-rw-r--r--vendor/github.com/containers/buildah/commit.go120
-rw-r--r--vendor/github.com/containers/buildah/go.mod9
-rw-r--r--vendor/github.com/containers/buildah/go.sum76
-rw-r--r--vendor/github.com/containers/buildah/pkg/cgroups/cgroups_supported.go31
-rw-r--r--vendor/github.com/containers/buildah/pkg/cgroups/cgroups_unsupported.go8
-rw-r--r--vendor/github.com/containers/buildah/pull.go3
-rw-r--r--vendor/github.com/containers/buildah/run_linux.go3
-rw-r--r--vendor/github.com/containers/buildah/util/util.go7
-rw-r--r--vendor/github.com/gogo/protobuf/proto/extensions.go1
-rw-r--r--vendor/github.com/gogo/protobuf/proto/extensions_gogo.go21
-rw-r--r--vendor/github.com/json-iterator/go/adapter.go2
-rw-r--r--vendor/github.com/json-iterator/go/go.mod11
-rw-r--r--vendor/github.com/json-iterator/go/go.sum14
-rw-r--r--vendor/github.com/json-iterator/go/iter_skip.go25
-rw-r--r--vendor/github.com/json-iterator/go/reflect_native.go14
-rw-r--r--vendor/github.com/json-iterator/go/reflect_struct_decoder.go2
-rw-r--r--vendor/github.com/json-iterator/go/stream_float.go17
-rw-r--r--vendor/github.com/openshift/api/LICENSE201
-rw-r--r--vendor/github.com/openshift/api/config/v1/doc.go8
-rw-r--r--vendor/github.com/openshift/api/config/v1/register.go70
-rw-r--r--vendor/github.com/openshift/api/config/v1/stringsource.go31
-rw-r--r--vendor/github.com/openshift/api/config/v1/types.go310
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_apiserver.go75
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_authentication.go120
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_build.go101
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_cluster_operator.go150
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_cluster_version.go237
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_console.go63
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_dns.go88
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_feature.go118
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_image.go111
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_infrastructure.go210
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_ingress.go42
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_network.go117
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_oauth.go557
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_operatorhub.go69
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_project.go55
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_proxy.go89
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_scheduling.go75
-rw-r--r--vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go3200
-rw-r--r--vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go1312
43 files changed, 7771 insertions, 39 deletions
diff --git a/vendor/github.com/containers/buildah/buildah.go b/vendor/github.com/containers/buildah/buildah.go
index 1a290f262..29e7bf44f 100644
--- a/vendor/github.com/containers/buildah/buildah.go
+++ b/vendor/github.com/containers/buildah/buildah.go
@@ -26,7 +26,7 @@ const (
Package = "buildah"
// Version for the Package. Bump version in contrib/rpm/buildah.spec
// too.
- Version = "1.11.0-dev"
+ Version = "1.11.0"
// The value we use to identify what type of information, currently a
// serialized Builder structure, we are using as per-container state.
// This should only be changed when we make incompatible changes to
diff --git a/vendor/github.com/containers/buildah/changelog.txt b/vendor/github.com/containers/buildah/changelog.txt
index 48c67d842..94f61763e 100644
--- a/vendor/github.com/containers/buildah/changelog.txt
+++ b/vendor/github.com/containers/buildah/changelog.txt
@@ -1,3 +1,38 @@
+- Changelog for v1.11.0 (2019-08-29)
+ * tests/bud.bats: add --signature-policy to some tests
+ * Vendor github.com/openshift/api
+ * pull/commit/push: pay attention to $BUILD_REGISTRY_SOURCES
+ * Add `--log-level` command line option and deprecate `--debug`
+ * add support for cgroupsV2
+ * Correctly detect ExitError values from Run()
+ * Disable empty logrus timestamps to reduce logger noise
+ * Remove outdated deps Makefile target
+ * Remove gofmt.sh in favor of golangci-lint
+ * Remove govet.sh in favor of golangci-lint
+ * Allow to override build date with SOURCE_DATE_EPOCH
+ * Update shebangs to take env into consideration
+ * Fix directory pull image names
+ * Add --digestfile and Re-add push statement as debug
+ * README: mention that Podman uses Buildah's API
+ * Use content digests in ADD/COPY history entries
+ * add: add a DryRun flag to AddAndCopyOptions
+ * Fix possible runtime panic on bud
+ * Add security-related volume options to validator
+ * use correct path for ginkgo
+ * Add bud 'without arguments' integration tests
+ * Update documentation about bud
+ * add: handle hard links when copying with .dockerignore
+ * add: teach copyFileWithTar() about symlinks and directories
+ * Allow buildah bud to be called without arguments
+ * imagebuilder: fix detection of referenced stage roots
+ * Touch up go mod instructions in install
+ * run_linux: fix mounting /sys in a userns
+ * Vendor Storage v1.13.2
+ * Cirrus: Update VM images
+ * Fix handling of /dev/null masked devices
+ * Update `bud`/`from` help to contain indicator for `--dns=none`
+ * Bump back to v1.11.0-dev
+
- Changelog for v1.10.1 (2019-08-08)
* Bump containers/image to v3.0.2 to fix keyring issue
* Bug fix for volume minus syntax
diff --git a/vendor/github.com/containers/buildah/commit.go b/vendor/github.com/containers/buildah/commit.go
index 989afad8e..b3b56f39a 100644
--- a/vendor/github.com/containers/buildah/commit.go
+++ b/vendor/github.com/containers/buildah/commit.go
@@ -2,15 +2,18 @@ package buildah
import (
"context"
+ "encoding/json"
"fmt"
"io"
"io/ioutil"
+ "os"
"strings"
"time"
"github.com/containers/buildah/pkg/blobcache"
"github.com/containers/buildah/util"
cp "github.com/containers/image/copy"
+ "github.com/containers/image/docker"
"github.com/containers/image/docker/reference"
"github.com/containers/image/manifest"
"github.com/containers/image/signature"
@@ -21,6 +24,7 @@ import (
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/stringid"
digest "github.com/opencontainers/go-digest"
+ configv1 "github.com/openshift/api/config/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -107,21 +111,64 @@ type PushOptions struct {
}
var (
- // commitPolicy bypasses any signing requirements when committing containers to images
- commitPolicy = &signature.Policy{
- Default: []signature.PolicyRequirement{signature.NewPRReject()},
- Transports: map[string]signature.PolicyTransportScopes{
- is.Transport.Name(): {
- "": []signature.PolicyRequirement{
- signature.NewPRInsecureAcceptAnything(),
- },
- },
+ // storageAllowedPolicyScopes overrides the policy for local storage
+ // to ensure that we can read images from it.
+ storageAllowedPolicyScopes = signature.PolicyTransportScopes{
+ "": []signature.PolicyRequirement{
+ signature.NewPRInsecureAcceptAnything(),
},
}
- // pushPolicy bypasses any signing requirements when pushing (copying) images from local storage
- pushPolicy = commitPolicy
)
+// checkRegistrySourcesAllows checks the $BUILD_REGISTRY_SOURCES environment
+// variable, if it's set. The contents are expected to be a JSON-encoded
+// github.com/openshift/api/config/v1.Image, set by an OpenShift build
+// controller that arranged for us to be run in a container.
+func checkRegistrySourcesAllows(forWhat string, dest types.ImageReference) error {
+ transport := dest.Transport()
+ if transport == nil {
+ return nil
+ }
+ if transport.Name() != docker.Transport.Name() {
+ return nil
+ }
+ dref := dest.DockerReference()
+ if dref == nil || reference.Domain(dref) == "" {
+ return nil
+ }
+
+ if registrySources, ok := os.LookupEnv("BUILD_REGISTRY_SOURCES"); ok && len(registrySources) > 0 {
+ var sources configv1.RegistrySources
+ if err := json.Unmarshal([]byte(registrySources), &sources); err != nil {
+ return errors.Wrapf(err, "error parsing $BUILD_REGISTRY_SOURCES (%q) as JSON", registrySources)
+ }
+ blocked := false
+ if len(sources.BlockedRegistries) > 0 {
+ for _, blockedDomain := range sources.BlockedRegistries {
+ if blockedDomain == reference.Domain(dref) {
+ blocked = true
+ }
+ }
+ }
+ if blocked {
+ return errors.Errorf("%s registry at %q denied by policy: it is in the blocked registries list", forWhat, reference.Domain(dref))
+ }
+ allowed := true
+ if len(sources.AllowedRegistries) > 0 {
+ allowed = false
+ for _, allowedDomain := range sources.AllowedRegistries {
+ if allowedDomain == reference.Domain(dref) {
+ allowed = true
+ }
+ }
+ }
+ if !allowed {
+ return errors.Errorf("%s registry at %q denied by policy: not in allowed registries list", forWhat, reference.Domain(dref))
+ }
+ }
+ return nil
+}
+
// Commit writes the contents of the container, along with its updated
// configuration, to a new image in the specified location, and if we know how,
// add any additional tags that were specified. Returns the ID of the new image
@@ -157,6 +204,14 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
return "", nil, "", errors.Errorf("commit access to registry for %q is blocked by configuration", transports.ImageName(dest))
}
+ // Load the system signing policy.
+ commitPolicy, err := signature.DefaultPolicy(systemContext)
+ if err != nil {
+ return "", nil, "", errors.Wrapf(err, "error obtaining default signature policy")
+ }
+ // Override the settings for local storage to make sure that we can always read the source "image".
+ commitPolicy.Transports[is.Transport.Name()] = storageAllowedPolicyScopes
+
policyContext, err := signature.NewPolicyContext(commitPolicy)
if err != nil {
return imgID, nil, "", errors.Wrapf(err, "error creating new signature policy context")
@@ -166,6 +221,28 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
logrus.Debugf("error destroying signature policy context: %v", err2)
}
}()
+
+ // Check if the commit is blocked by $BUILDER_REGISTRY_SOURCES.
+ if err := checkRegistrySourcesAllows("commit to", dest); err != nil {
+ return imgID, nil, "", err
+ }
+ if len(options.AdditionalTags) > 0 {
+ names, err := util.ExpandNames(options.AdditionalTags, "", systemContext, b.store)
+ if err != nil {
+ return imgID, nil, "", err
+ }
+ for _, name := range names {
+ additionalDest, err := docker.Transport.ParseReference(name)
+ if err != nil {
+ return imgID, nil, "", errors.Wrapf(err, "error parsing image name %q as an image reference", name)
+ }
+ if err := checkRegistrySourcesAllows("commit to", additionalDest); err != nil {
+ return imgID, nil, "", err
+ }
+ }
+ }
+ logrus.Debugf("committing image with reference %q is allowed by policy", transports.ImageName(dest))
+
// Check if the base image is already in the destination and it's some kind of local
// storage. If so, we can skip recompressing any layers that come from the base image.
exportBaseLayers := true
@@ -292,10 +369,24 @@ func Push(ctx context.Context, image string, dest types.ImageReference, options
return nil, "", errors.Errorf("push access to registry for %q is blocked by configuration", transports.ImageName(dest))
}
+ // Load the system signing policy.
+ pushPolicy, err := signature.DefaultPolicy(systemContext)
+ if err != nil {
+ return nil, "", errors.Wrapf(err, "error obtaining default signature policy")
+ }
+ // Override the settings for local storage to make sure that we can always read the source "image".
+ pushPolicy.Transports[is.Transport.Name()] = storageAllowedPolicyScopes
+
policyContext, err := signature.NewPolicyContext(pushPolicy)
if err != nil {
return nil, "", errors.Wrapf(err, "error creating new signature policy context")
}
+ defer func() {
+ if err2 := policyContext.Destroy(); err2 != nil {
+ logrus.Debugf("error destroying signature policy context: %v", err2)
+ }
+ }()
+
// Look up the image.
src, _, err := util.FindImage(options.Store, "", systemContext, image)
if err != nil {
@@ -313,6 +404,13 @@ func Push(ctx context.Context, image string, dest types.ImageReference, options
}
maybeCachedSrc = cache
}
+
+ // Check if the push is blocked by $BUILDER_REGISTRY_SOURCES.
+ if err := checkRegistrySourcesAllows("push to", dest); err != nil {
+ return nil, "", err
+ }
+ logrus.Debugf("pushing image to reference %q is allowed by policy", transports.ImageName(dest))
+
// Copy everything.
switch options.Compression {
case archive.Uncompressed:
diff --git a/vendor/github.com/containers/buildah/go.mod b/vendor/github.com/containers/buildah/go.mod
index 61c80d90b..33b9bfb0d 100644
--- a/vendor/github.com/containers/buildah/go.mod
+++ b/vendor/github.com/containers/buildah/go.mod
@@ -18,7 +18,6 @@ require (
github.com/etcd-io/bbolt v1.3.2
github.com/fsouza/go-dockerclient v1.3.0
github.com/ghodss/yaml v1.0.0
- github.com/gogo/protobuf v1.2.0 // indirect
github.com/hashicorp/go-multierror v1.0.0
github.com/imdario/mergo v0.3.6 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
@@ -27,14 +26,15 @@ require (
github.com/mattn/go-shellwords v1.0.5
github.com/moby/moby v0.0.0-20171005181806-f8806b18b4b9 // indirect
github.com/mtrmac/gpgme v0.0.0-20170102180018-b2432428689c // indirect
- github.com/onsi/ginkgo v1.6.0
- github.com/onsi/gomega v1.4.3
+ github.com/onsi/ginkgo v1.8.0
+ github.com/onsi/gomega v1.5.0
github.com/opencontainers/go-digest v1.0.0-rc1
github.com/opencontainers/image-spec v1.0.1
github.com/opencontainers/runc v1.0.0-rc8
github.com/opencontainers/runtime-spec v0.1.2-0.20190618234442-a950415649c7
github.com/opencontainers/runtime-tools v0.9.0
github.com/opencontainers/selinux v1.2.2
+ github.com/openshift/api v3.9.1-0.20190810003144-27fb16909b15+incompatible
github.com/openshift/imagebuilder v1.1.0
github.com/pkg/errors v0.8.1
github.com/seccomp/containers-golang v0.0.0-20180629143253-cdfdaa7543f4
@@ -49,8 +49,7 @@ require (
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v1.1.0 // indirect
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2
- golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 // indirect
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb
- gopkg.in/yaml.v2 v2.2.2 // indirect
+ k8s.io/api v0.0.0-20190813020757-36bff7324fb7 // indirect
k8s.io/client-go v0.0.0-20181219152756-3dd551c0f083 // indirect
)
diff --git a/vendor/github.com/containers/buildah/go.sum b/vendor/github.com/containers/buildah/go.sum
index afd38be1a..d489375de 100644
--- a/vendor/github.com/containers/buildah/go.sum
+++ b/vendor/github.com/containers/buildah/go.sum
@@ -14,8 +14,11 @@ github.com/Microsoft/hcsshim v0.8.3 h1:KWCdVGOju81E0RL4ndn9/E6I4qMBi6kuPw1W4yBYl
github.com/Microsoft/hcsshim v0.8.3/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
github.com/Microsoft/hcsshim v0.8.6 h1:ZfF0+zZeYdzMIVMZHKtDKJvLHj76XCuVae/jNkjj0IA=
github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
+github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw=
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk=
+github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
+github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM=
github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA=
github.com/blang/semver v3.5.0+incompatible h1:CGxCgetQ64DKk7rdZ++Vfnb1+ogGNnB17OJKJXD2Cfs=
@@ -53,6 +56,7 @@ github.com/containers/storage v1.13.2 h1:UXZ0Ckmk6+6+4vj2M2ywruVtH97pnRoAhTG8ctd
github.com/containers/storage v1.13.2/go.mod h1:6D8nK2sU9V7nEmAraINRs88ZEscM5C5DK+8Npp27GeA=
github.com/cyphar/filepath-securejoin v0.2.1 h1:5DPkzz/0MwUpvR4fxASKzgApeq2OMFY5FfYtrX28Coo=
github.com/cyphar/filepath-securejoin v0.2.1/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
+github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/docker/distribution v0.0.0-20170817175659-5f6282db7d65 h1:4zlOyrJUbYnrvlzChJ+jP2J3i77Jbhm336NEuCv7kZo=
@@ -73,18 +77,38 @@ github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316 h1:moehP
github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8=
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4=
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
+github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
+github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
+github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/etcd-io/bbolt v1.3.2 h1:RLRQ0TKLX7DlBRXAJHvbmXL17Q3KNnTBtZ9B6Qo+/Y0=
github.com/etcd-io/bbolt v1.3.2/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw=
+github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsouza/go-dockerclient v1.3.0 h1:tOXkq/5++XihrAvH5YNwCTdPeQg3XVcC6WI2FVy4ZS0=
github.com/fsouza/go-dockerclient v1.3.0/go.mod h1:IN9UPc4/w7cXiARH2Yg99XxUHbAM+6rAi9hzBVbkWRU=
+github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
+github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
+github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
+github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.0 h1:xU6/SpYbvkNYiptHJYEDRseDLvYE7wSqhYYNy0QSUzI=
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I=
+github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
+github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
+github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/googleapis/gnostic v0.0.0-20170426233943-68f4ded48ba9/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
+github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8=
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk=
@@ -93,6 +117,7 @@ github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/U
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28=
@@ -101,6 +126,10 @@ github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NH
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/ishidawataru/sctp v0.0.0-20180918013207-6e2cb1366111 h1:NAAiV9ass6VReWFjuxqrMIq12WKlSULI6Gs3PxQghLA=
github.com/ishidawataru/sctp v0.0.0-20180918013207-6e2cb1366111/go.mod h1:DM4VvS+hD/kDi1U1QsX2fnZowwBhqD0Dk3bRPKF/Oc8=
+github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.4.1 h1:8VMb5+0wMgdBykOV96DwNwKFQ+WTI4pzYURP99CcB9E=
github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.7.2 h1:liMOoeIvFpr9kEvalrZ7VVBA4wGf7zfOgwBjzz/5g2Y=
@@ -113,6 +142,10 @@ github.com/klauspost/pgzip v1.2.1 h1:oIPZROsWuPHpOdMVWLuJZXwgjhrW8r1yEX8UqMyeNHM
github.com/klauspost/pgzip v1.2.1/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-shellwords v1.0.3 h1:K/VxK7SZ+cvuPgFSLKi5QPI9Vr/ipOf4C1gN+ntueUk=
@@ -122,13 +155,26 @@ github.com/mattn/go-shellwords v1.0.5/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vq
github.com/mistifyio/go-zfs v2.1.1+incompatible h1:gAMO1HM9xBRONLHHYnu5iFsOJUiJdNZo6oqSENd4eW8=
github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
github.com/moby/moby v0.0.0-20171005181806-f8806b18b4b9/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/mtrmac/gpgme v0.0.0-20170102180018-b2432428689c h1:xa+eQWKuJ9MbB9FBL/eoNvDFvveAkz2LQoz8PzX7Q/4=
github.com/mtrmac/gpgme v0.0.0-20170102180018-b2432428689c/go.mod h1:GhAqVMEWnTcW2dxoD/SO3n2enrgWl3y6Dnx4m59GvcA=
+github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
+github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w=
+github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo=
+github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
@@ -142,6 +188,9 @@ github.com/opencontainers/runtime-tools v0.9.0 h1:FYgwVsKRI/H9hU32MJ/4MLOzXWodKK
github.com/opencontainers/runtime-tools v0.9.0/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
github.com/opencontainers/selinux v1.2.2 h1:Kx9J6eDG5/24A6DtUquGSpJQ+m2MUTahn4FtGEe8bFg=
github.com/opencontainers/selinux v1.2.2/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs=
+github.com/openshift/api v3.9.0+incompatible h1:fJ/KsefYuZAjmrr3+5U9yZIZbTOpVkDDLDLFresAeYs=
+github.com/openshift/api v3.9.1-0.20190810003144-27fb16909b15+incompatible h1:s55wx8JIG/CKnewev892HifTBrtKzMdvgB3rm4rxC2s=
+github.com/openshift/api v3.9.1-0.20190810003144-27fb16909b15+incompatible/go.mod h1:dh9o4Fs58gpFXGSYfnVxGR9PnV53I8TW84pQaJDdGiY=
github.com/openshift/imagebuilder v1.1.0 h1:oT704SkwMEzmIMU/+Uv1Wmvt+p10q3v2WuYMeFI18c4=
github.com/openshift/imagebuilder v1.1.0/go.mod h1:9aJRczxCH0mvT6XQ+5STAQaPWz7OsWcU5/mRkt8IWeo=
github.com/ostreedev/ostree-go v0.0.0-20181112201119-9ab99253d365 h1:5DKEDlc/DLftia3h4tk5K0KBiqBXogCc6EarWTlD3fM=
@@ -151,6 +200,7 @@ github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913/go.mod h1:J6OG
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pquerna/ffjson v0.0.0-20171002144729-d49c2bc1aa13 h1:AUK/hm/tPsiNNASdb3J8fySVRZoI7fnK5mlOvdFD43o=
github.com/pquerna/ffjson v0.0.0-20171002144729-d49c2bc1aa13/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M=
@@ -165,10 +215,12 @@ github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
+github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 h1:b6uOv7YOFK0TYG7HtkIgExQo+2RdLuwRft63jn2HWj8=
@@ -202,30 +254,43 @@ golang.org/x/crypto v0.0.0-20190103213133-ff983b9c42bc h1:F5tKCVGp+MUAHhKp5MZtGq
golang.org/x/crypto v0.0.0-20190103213133-ff983b9c42bc/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190107210223-45ffb0cd1ba0 h1:1DW40AJQ7AP4nY6ORUGUdkpXyEC9W2GAXcOPaMZK0K8=
golang.org/x/net v0.0.0-20190107210223-45ffb0cd1ba0/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7 h1:rTIdg5QFRR7XCaK4LCjBiPbx8j4DQRpdYMnGn/bJUEU=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180824143301-4910a1d54f87/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb h1:fgwFCsaw9buMuxNd6+DQfAuSFqbNiQZpcgJQAgJsK6k=
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/tools v0.0.0-20180810170437-e96c4e24768d/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
+gopkg.in/inf.v0 v0.9.0 h1:3zYtXIO92bvsdS3ggAdA8Gb4Azj0YU+TVY1uGYNFA8o=
+gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
@@ -233,5 +298,16 @@ gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gotest.tools v0.0.0-20190624233834-05ebafbffc79/go.mod h1:R//lfYlUuTOTfblYI3lGoAAAebUdzjvbmQsuB7Ykd90=
gotest.tools v2.1.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
+k8s.io/api v0.0.0-20190813020757-36bff7324fb7 h1:4uJOjRn9kWq4AqJRE8+qzmAy+lJd9rh8TY455dNef4U=
+k8s.io/api v0.0.0-20190813020757-36bff7324fb7/go.mod h1:3Iy+myeAORNCLgjd/Xu9ebwN7Vh59Bw0vh9jhoX+V58=
+k8s.io/apimachinery v0.0.0-20190809020650-423f5d784010 h1:pyoq062NftC1y/OcnbSvgolyZDJ8y4fmUPWMkdA6gfU=
+k8s.io/apimachinery v0.0.0-20190809020650-423f5d784010/go.mod h1:Waf/xTS2FGRrgXCkO5FP3XxTOWh0qLf2QhL1qFZZ/R8=
k8s.io/client-go v0.0.0-20181219152756-3dd551c0f083 h1:+Qf/nITucAbm09aIdxvoA+7X0BwaXmQGVoR8k7Ynk9o=
k8s.io/client-go v0.0.0-20181219152756-3dd551c0f083/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s=
+k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
+k8s.io/klog v0.3.1 h1:RVgyDHY/kFKtLqh67NvEWIgkMneNoIrdkN0CxDSQc68=
+k8s.io/klog v0.3.1/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
+k8s.io/kube-openapi v0.0.0-20190709113604-33be087ad058/go.mod h1:nfDlWeOsu3pUf4yWGL+ERqohP4YsZcBJXWMK+gkzOA4=
+sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI=
+sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
diff --git a/vendor/github.com/containers/buildah/pkg/cgroups/cgroups_supported.go b/vendor/github.com/containers/buildah/pkg/cgroups/cgroups_supported.go
new file mode 100644
index 000000000..142eced08
--- /dev/null
+++ b/vendor/github.com/containers/buildah/pkg/cgroups/cgroups_supported.go
@@ -0,0 +1,31 @@
+// +build linux
+
+package cgroups
+
+import (
+ "sync"
+ "syscall"
+)
+
+const (
+ _cgroup2SuperMagic = 0x63677270
+)
+
+var (
+ isUnifiedOnce sync.Once
+ isUnified bool
+ isUnifiedErr error
+)
+
+// IsCgroup2UnifiedMode returns whether we are running in cgroup 2 cgroup2 mode.
+func IsCgroup2UnifiedMode() (bool, error) {
+ isUnifiedOnce.Do(func() {
+ var st syscall.Statfs_t
+ if err := syscall.Statfs("/sys/fs/cgroup", &st); err != nil {
+ isUnified, isUnifiedErr = false, err
+ } else {
+ isUnified, isUnifiedErr = st.Type == _cgroup2SuperMagic, nil
+ }
+ })
+ return isUnified, isUnifiedErr
+}
diff --git a/vendor/github.com/containers/buildah/pkg/cgroups/cgroups_unsupported.go b/vendor/github.com/containers/buildah/pkg/cgroups/cgroups_unsupported.go
new file mode 100644
index 000000000..9dc196e42
--- /dev/null
+++ b/vendor/github.com/containers/buildah/pkg/cgroups/cgroups_unsupported.go
@@ -0,0 +1,8 @@
+// +build !linux
+
+package cgroups
+
+// IsCgroup2UnifiedMode returns whether we are running in cgroup 2 cgroup2 mode.
+func IsCgroup2UnifiedMode() (bool, error) {
+ return false, nil
+}
diff --git a/vendor/github.com/containers/buildah/pull.go b/vendor/github.com/containers/buildah/pull.go
index 98e3ff354..f05d2bf50 100644
--- a/vendor/github.com/containers/buildah/pull.go
+++ b/vendor/github.com/containers/buildah/pull.go
@@ -218,6 +218,9 @@ func pullImage(ctx context.Context, store storage.Store, srcRef types.ImageRefer
if blocked {
return nil, errors.Errorf("pull access to registry for %q is blocked by configuration", transports.ImageName(srcRef))
}
+ if err := checkRegistrySourcesAllows("pull from", srcRef); err != nil {
+ return nil, err
+ }
destName, err := localImageNameForReference(ctx, store, srcRef)
if err != nil {
diff --git a/vendor/github.com/containers/buildah/run_linux.go b/vendor/github.com/containers/buildah/run_linux.go
index e5541dd34..aeb9f5bbb 100644
--- a/vendor/github.com/containers/buildah/run_linux.go
+++ b/vendor/github.com/containers/buildah/run_linux.go
@@ -1384,8 +1384,7 @@ func runUsingRuntimeMain() {
os.Exit(1)
}
// Set ourselves up to read the container's exit status. We're doing this in a child process
- // so that we won't mess with the setting in a caller of the library. This stubs to OS specific
- // calls
+ // so that we won't mess with the setting in a caller of the library.
if err := setChildProcess(); err != nil {
os.Exit(1)
}
diff --git a/vendor/github.com/containers/buildah/util/util.go b/vendor/github.com/containers/buildah/util/util.go
index bea54b7ad..4be0b2de8 100644
--- a/vendor/github.com/containers/buildah/util/util.go
+++ b/vendor/github.com/containers/buildah/util/util.go
@@ -9,6 +9,7 @@ import (
"strings"
"syscall"
+ "github.com/containers/buildah/pkg/cgroups"
"github.com/containers/image/docker/reference"
"github.com/containers/image/pkg/sysregistriesv2"
"github.com/containers/image/signature"
@@ -249,6 +250,12 @@ func Runtime() string {
if runtime != "" {
return runtime
}
+
+ // Need to switch default until runc supports cgroups v2
+ if unified, _ := cgroups.IsCgroup2UnifiedMode(); unified {
+ return "crun"
+ }
+
return DefaultRuntime
}
diff --git a/vendor/github.com/gogo/protobuf/proto/extensions.go b/vendor/github.com/gogo/protobuf/proto/extensions.go
index 686bd2a09..341c6f57f 100644
--- a/vendor/github.com/gogo/protobuf/proto/extensions.go
+++ b/vendor/github.com/gogo/protobuf/proto/extensions.go
@@ -527,6 +527,7 @@ func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
// SetExtension sets the specified extension of pb to the specified value.
func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
if epb, ok := pb.(extensionsBytes); ok {
+ ClearExtension(pb, extension)
newb, err := encodeExtension(extension, value)
if err != nil {
return err
diff --git a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go
index 53ebd8cca..6f1ae120e 100644
--- a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go
+++ b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go
@@ -154,6 +154,10 @@ func EncodeInternalExtension(m extendableProto, data []byte) (n int, err error)
return EncodeExtensionMap(m.extensionsWrite(), data)
}
+func EncodeInternalExtensionBackwards(m extendableProto, data []byte) (n int, err error) {
+ return EncodeExtensionMapBackwards(m.extensionsWrite(), data)
+}
+
func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) {
o := 0
for _, e := range m {
@@ -169,6 +173,23 @@ func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) {
return o, nil
}
+func EncodeExtensionMapBackwards(m map[int32]Extension, data []byte) (n int, err error) {
+ o := 0
+ end := len(data)
+ for _, e := range m {
+ if err := e.Encode(); err != nil {
+ return 0, err
+ }
+ n := copy(data[end-len(e.enc):], e.enc)
+ if n != len(e.enc) {
+ return 0, io.ErrShortBuffer
+ }
+ end -= n
+ o += n
+ }
+ return o, nil
+}
+
func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) {
e := m[id]
if err := e.Encode(); err != nil {
diff --git a/vendor/github.com/json-iterator/go/adapter.go b/vendor/github.com/json-iterator/go/adapter.go
index e674d0f39..92d2cc4a3 100644
--- a/vendor/github.com/json-iterator/go/adapter.go
+++ b/vendor/github.com/json-iterator/go/adapter.go
@@ -16,7 +16,7 @@ func Unmarshal(data []byte, v interface{}) error {
return ConfigDefault.Unmarshal(data, v)
}
-// UnmarshalFromString convenient method to read from string instead of []byte
+// UnmarshalFromString is a convenient method to read from string instead of []byte
func UnmarshalFromString(str string, v interface{}) error {
return ConfigDefault.UnmarshalFromString(str, v)
}
diff --git a/vendor/github.com/json-iterator/go/go.mod b/vendor/github.com/json-iterator/go/go.mod
new file mode 100644
index 000000000..e05c42ff5
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/go.mod
@@ -0,0 +1,11 @@
+module github.com/json-iterator/go
+
+go 1.12
+
+require (
+ github.com/davecgh/go-spew v1.1.1
+ github.com/google/gofuzz v1.0.0
+ github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421
+ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742
+ github.com/stretchr/testify v1.3.0
+)
diff --git a/vendor/github.com/json-iterator/go/go.sum b/vendor/github.com/json-iterator/go/go.sum
new file mode 100644
index 000000000..d778b5a14
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/go.sum
@@ -0,0 +1,14 @@
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
diff --git a/vendor/github.com/json-iterator/go/iter_skip.go b/vendor/github.com/json-iterator/go/iter_skip.go
index f58beb913..e91eefb15 100644
--- a/vendor/github.com/json-iterator/go/iter_skip.go
+++ b/vendor/github.com/json-iterator/go/iter_skip.go
@@ -37,17 +37,24 @@ func (iter *Iterator) SkipAndReturnBytes() []byte {
return iter.stopCapture()
}
-type captureBuffer struct {
- startedAt int
- captured []byte
+// SkipAndAppendBytes skips next JSON element and appends its content to
+// buffer, returning the result.
+func (iter *Iterator) SkipAndAppendBytes(buf []byte) []byte {
+ iter.startCaptureTo(buf, iter.head)
+ iter.Skip()
+ return iter.stopCapture()
}
-func (iter *Iterator) startCapture(captureStartedAt int) {
+func (iter *Iterator) startCaptureTo(buf []byte, captureStartedAt int) {
if iter.captured != nil {
panic("already in capture mode")
}
iter.captureStartedAt = captureStartedAt
- iter.captured = make([]byte, 0, 32)
+ iter.captured = buf
+}
+
+func (iter *Iterator) startCapture(captureStartedAt int) {
+ iter.startCaptureTo(make([]byte, 0, 32), captureStartedAt)
}
func (iter *Iterator) stopCapture() []byte {
@@ -58,13 +65,7 @@ func (iter *Iterator) stopCapture() []byte {
remaining := iter.buf[iter.captureStartedAt:iter.head]
iter.captureStartedAt = -1
iter.captured = nil
- if len(captured) == 0 {
- copied := make([]byte, len(remaining))
- copy(copied, remaining)
- return copied
- }
- captured = append(captured, remaining...)
- return captured
+ return append(captured, remaining...)
}
// Skip skips a json object and positions to relatively the next json object
diff --git a/vendor/github.com/json-iterator/go/reflect_native.go b/vendor/github.com/json-iterator/go/reflect_native.go
index 9042eb0cb..f88722d14 100644
--- a/vendor/github.com/json-iterator/go/reflect_native.go
+++ b/vendor/github.com/json-iterator/go/reflect_native.go
@@ -432,17 +432,19 @@ func (codec *base64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
}
func (codec *base64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
- src := *((*[]byte)(ptr))
- if len(src) == 0 {
+ if codec.sliceType.UnsafeIsNil(ptr) {
stream.WriteNil()
return
}
+ src := *((*[]byte)(ptr))
encoding := base64.StdEncoding
stream.writeByte('"')
- size := encoding.EncodedLen(len(src))
- buf := make([]byte, size)
- encoding.Encode(buf, src)
- stream.buf = append(stream.buf, buf...)
+ if len(src) != 0 {
+ size := encoding.EncodedLen(len(src))
+ buf := make([]byte, size)
+ encoding.Encode(buf, src)
+ stream.buf = append(stream.buf, buf...)
+ }
stream.writeByte('"')
}
diff --git a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go
index 355d2d116..932641ac4 100644
--- a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go
+++ b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go
@@ -530,8 +530,8 @@ func (decoder *generalStructDecoder) decodeOneField(ptr unsafe.Pointer, iter *It
}
}
if fieldDecoder == nil {
- msg := "found unknown field: " + field
if decoder.disallowUnknownFields {
+ msg := "found unknown field: " + field
iter.ReportError("ReadObject", msg)
}
c := iter.nextToken()
diff --git a/vendor/github.com/json-iterator/go/stream_float.go b/vendor/github.com/json-iterator/go/stream_float.go
index f318d2c59..826aa594a 100644
--- a/vendor/github.com/json-iterator/go/stream_float.go
+++ b/vendor/github.com/json-iterator/go/stream_float.go
@@ -1,6 +1,7 @@
package jsoniter
import (
+ "fmt"
"math"
"strconv"
)
@@ -13,6 +14,10 @@ func init() {
// WriteFloat32 write float32 to stream
func (stream *Stream) WriteFloat32(val float32) {
+ if math.IsInf(float64(val), 0) || math.IsNaN(float64(val)) {
+ stream.Error = fmt.Errorf("unsupported value: %f", val)
+ return
+ }
abs := math.Abs(float64(val))
fmt := byte('f')
// Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right.
@@ -26,6 +31,10 @@ func (stream *Stream) WriteFloat32(val float32) {
// WriteFloat32Lossy write float32 to stream with ONLY 6 digits precision although much much faster
func (stream *Stream) WriteFloat32Lossy(val float32) {
+ if math.IsInf(float64(val), 0) || math.IsNaN(float64(val)) {
+ stream.Error = fmt.Errorf("unsupported value: %f", val)
+ return
+ }
if val < 0 {
stream.writeByte('-')
val = -val
@@ -54,6 +63,10 @@ func (stream *Stream) WriteFloat32Lossy(val float32) {
// WriteFloat64 write float64 to stream
func (stream *Stream) WriteFloat64(val float64) {
+ if math.IsInf(val, 0) || math.IsNaN(val) {
+ stream.Error = fmt.Errorf("unsupported value: %f", val)
+ return
+ }
abs := math.Abs(val)
fmt := byte('f')
// Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right.
@@ -67,6 +80,10 @@ func (stream *Stream) WriteFloat64(val float64) {
// WriteFloat64Lossy write float64 to stream with ONLY 6 digits precision although much much faster
func (stream *Stream) WriteFloat64Lossy(val float64) {
+ if math.IsInf(val, 0) || math.IsNaN(val) {
+ stream.Error = fmt.Errorf("unsupported value: %f", val)
+ return
+ }
if val < 0 {
stream.writeByte('-')
val = -val
diff --git a/vendor/github.com/openshift/api/LICENSE b/vendor/github.com/openshift/api/LICENSE
new file mode 100644
index 000000000..8dada3eda
--- /dev/null
+++ b/vendor/github.com/openshift/api/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/openshift/api/config/v1/doc.go b/vendor/github.com/openshift/api/config/v1/doc.go
new file mode 100644
index 000000000..4ff5208f2
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/doc.go
@@ -0,0 +1,8 @@
+// +k8s:deepcopy-gen=package,register
+// +k8s:defaulter-gen=TypeMeta
+// +k8s:openapi-gen=true
+
+// +kubebuilder:validation:Optional
+// +groupName=config.openshift.io
+// Package v1 is the v1 version of the API.
+package v1
diff --git a/vendor/github.com/openshift/api/config/v1/register.go b/vendor/github.com/openshift/api/config/v1/register.go
new file mode 100644
index 000000000..35eace370
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/register.go
@@ -0,0 +1,70 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+var (
+ GroupName = "config.openshift.io"
+ GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
+ schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+ // Install is a function which adds this version to a scheme
+ Install = schemeBuilder.AddToScheme
+
+ // SchemeGroupVersion generated code relies on this name
+ // Deprecated
+ SchemeGroupVersion = GroupVersion
+ // AddToScheme exists solely to keep the old generators creating valid code
+ // DEPRECATED
+ AddToScheme = schemeBuilder.AddToScheme
+)
+
+// Resource generated code relies on this being here, but it logically belongs to the group
+// DEPRECATED
+func Resource(resource string) schema.GroupResource {
+ return schema.GroupResource{Group: GroupName, Resource: resource}
+}
+
+// Adds the list of known types to api.Scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(GroupVersion,
+ &APIServer{},
+ &APIServerList{},
+ &Authentication{},
+ &AuthenticationList{},
+ &Build{},
+ &BuildList{},
+ &ClusterOperator{},
+ &ClusterOperatorList{},
+ &ClusterVersion{},
+ &ClusterVersionList{},
+ &Console{},
+ &ConsoleList{},
+ &DNS{},
+ &DNSList{},
+ &FeatureGate{},
+ &FeatureGateList{},
+ &Image{},
+ &ImageList{},
+ &Infrastructure{},
+ &InfrastructureList{},
+ &Ingress{},
+ &IngressList{},
+ &Network{},
+ &NetworkList{},
+ &OAuth{},
+ &OAuthList{},
+ &OperatorHub{},
+ &OperatorHubList{},
+ &Project{},
+ &ProjectList{},
+ &Proxy{},
+ &ProxyList{},
+ &Scheduler{},
+ &SchedulerList{},
+ )
+ metav1.AddToGroupVersion(scheme, GroupVersion)
+ return nil
+}
diff --git a/vendor/github.com/openshift/api/config/v1/stringsource.go b/vendor/github.com/openshift/api/config/v1/stringsource.go
new file mode 100644
index 000000000..6a5718c1d
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/stringsource.go
@@ -0,0 +1,31 @@
+package v1
+
+import "encoding/json"
+
+// UnmarshalJSON implements the json.Unmarshaller interface.
+// If the value is a string, it sets the Value field of the StringSource.
+// Otherwise, it is unmarshaled into the StringSourceSpec struct
+func (s *StringSource) UnmarshalJSON(value []byte) error {
+ // If we can unmarshal to a simple string, just set the value
+ var simpleValue string
+ if err := json.Unmarshal(value, &simpleValue); err == nil {
+ s.Value = simpleValue
+ return nil
+ }
+
+ // Otherwise do the full struct unmarshal
+ return json.Unmarshal(value, &s.StringSourceSpec)
+}
+
+// MarshalJSON implements the json.Marshaller interface.
+// If the StringSource contains only a string Value (or is empty), it is marshaled as a JSON string.
+// Otherwise, the StringSourceSpec struct is marshaled as a JSON object.
+func (s *StringSource) MarshalJSON() ([]byte, error) {
+ // If we have only a cleartext value set, do a simple string marshal
+ if s.StringSourceSpec == (StringSourceSpec{Value: s.Value}) {
+ return json.Marshal(s.Value)
+ }
+
+ // Otherwise do the full struct marshal of the externalized bits
+ return json.Marshal(s.StringSourceSpec)
+}
diff --git a/vendor/github.com/openshift/api/config/v1/types.go b/vendor/github.com/openshift/api/config/v1/types.go
new file mode 100644
index 000000000..ca36f6777
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/types.go
@@ -0,0 +1,310 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+// ConfigMapFileReference references a config map in a specific namespace.
+// The namespace must be specified at the point of use.
+type ConfigMapFileReference struct {
+ Name string `json:"name"`
+ // Key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references.
+ Key string `json:"key,omitempty"`
+}
+
+// ConfigMapNameReference references a config map in a specific namespace.
+// The namespace must be specified at the point of use.
+type ConfigMapNameReference struct {
+ // name is the metadata.name of the referenced config map
+ // +kubebuilder:validation:Required
+ // +required
+ Name string `json:"name"`
+}
+
+// SecretNameReference references a secret in a specific namespace.
+// The namespace must be specified at the point of use.
+type SecretNameReference struct {
+ // name is the metadata.name of the referenced secret
+ // +kubebuilder:validation:Required
+ // +required
+ Name string `json:"name"`
+}
+
+// HTTPServingInfo holds configuration for serving HTTP
+type HTTPServingInfo struct {
+ // ServingInfo is the HTTP serving information
+ ServingInfo `json:",inline"`
+ // MaxRequestsInFlight is the number of concurrent requests allowed to the server. If zero, no limit.
+ MaxRequestsInFlight int64 `json:"maxRequestsInFlight"`
+ // RequestTimeoutSeconds is the number of seconds before requests are timed out. The default is 60 minutes, if
+ // -1 there is no limit on requests.
+ RequestTimeoutSeconds int64 `json:"requestTimeoutSeconds"`
+}
+
+// ServingInfo holds information about serving web pages
+type ServingInfo struct {
+ // BindAddress is the ip:port to serve on
+ BindAddress string `json:"bindAddress"`
+ // BindNetwork is the type of network to bind to - defaults to "tcp4", accepts "tcp",
+ // "tcp4", and "tcp6"
+ BindNetwork string `json:"bindNetwork"`
+ // CertInfo is the TLS cert info for serving secure traffic.
+ // this is anonymous so that we can inline it for serialization
+ CertInfo `json:",inline"`
+ // ClientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates
+ // +optional
+ ClientCA string `json:"clientCA,omitempty"`
+ // NamedCertificates is a list of certificates to use to secure requests to specific hostnames
+ NamedCertificates []NamedCertificate `json:"namedCertificates,omitempty"`
+ // MinTLSVersion is the minimum TLS version supported.
+ // Values must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants
+ MinTLSVersion string `json:"minTLSVersion,omitempty"`
+ // CipherSuites contains an overridden list of ciphers for the server to support.
+ // Values must match cipher suite IDs from https://golang.org/pkg/crypto/tls/#pkg-constants
+ CipherSuites []string `json:"cipherSuites,omitempty"`
+}
+
+// CertInfo relates a certificate with a private key
+type CertInfo struct {
+ // CertFile is a file containing a PEM-encoded certificate
+ CertFile string `json:"certFile"`
+ // KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile
+ KeyFile string `json:"keyFile"`
+}
+
+// NamedCertificate specifies a certificate/key, and the names it should be served for
+type NamedCertificate struct {
+ // Names is a list of DNS names this certificate should be used to secure
+ // A name can be a normal DNS name, or can contain leading wildcard segments.
+ Names []string `json:"names,omitempty"`
+ // CertInfo is the TLS cert info for serving secure traffic
+ CertInfo `json:",inline"`
+}
+
+// LeaderElection provides information to elect a leader
+type LeaderElection struct {
+ // disable allows leader election to be suspended while allowing a fully defaulted "normal" startup case.
+ Disable bool `json:"disable,omitempty"`
+ // namespace indicates which namespace the resource is in
+ Namespace string `json:"namespace,omitempty"`
+ // name indicates what name to use for the resource
+ Name string `json:"name,omitempty"`
+
+ // leaseDuration is the duration that non-leader candidates will wait
+ // after observing a leadership renewal until attempting to acquire
+ // leadership of a led but unrenewed leader slot. This is effectively the
+ // maximum duration that a leader can be stopped before it is replaced
+ // by another candidate. This is only applicable if leader election is
+ // enabled.
+ // +nullable
+ LeaseDuration metav1.Duration `json:"leaseDuration"`
+ // renewDeadline is the interval between attempts by the acting master to
+ // renew a leadership slot before it stops leading. This must be less
+ // than or equal to the lease duration. This is only applicable if leader
+ // election is enabled.
+ // +nullable
+ RenewDeadline metav1.Duration `json:"renewDeadline"`
+ // retryPeriod is the duration the clients should wait between attempting
+ // acquisition and renewal of a leadership. This is only applicable if
+ // leader election is enabled.
+ // +nullable
+ RetryPeriod metav1.Duration `json:"retryPeriod"`
+}
+
+// StringSource allows specifying a string inline, or externally via env var or file.
+// When it contains only a string value, it marshals to a simple JSON string.
+type StringSource struct {
+ // StringSourceSpec specifies the string value, or external location
+ StringSourceSpec `json:",inline"`
+}
+
+// StringSourceSpec specifies a string value, or external location
+type StringSourceSpec struct {
+ // Value specifies the cleartext value, or an encrypted value if keyFile is specified.
+ Value string `json:"value"`
+
+ // Env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified.
+ Env string `json:"env"`
+
+ // File references a file containing the cleartext value, or an encrypted value if a keyFile is specified.
+ File string `json:"file"`
+
+ // KeyFile references a file containing the key to use to decrypt the value.
+ KeyFile string `json:"keyFile"`
+}
+
+// RemoteConnectionInfo holds information necessary for establishing a remote connection
+type RemoteConnectionInfo struct {
+ // URL is the remote URL to connect to
+ URL string `json:"url"`
+ // CA is the CA for verifying TLS connections
+ CA string `json:"ca"`
+ // CertInfo is the TLS client cert information to present
+ // this is anonymous so that we can inline it for serialization
+ CertInfo `json:",inline"`
+}
+
+type AdmissionConfig struct {
+ PluginConfig map[string]AdmissionPluginConfig `json:"pluginConfig,omitempty"`
+
+ // enabledPlugins is a list of admission plugins that must be on in addition to the default list.
+ // Some admission plugins are disabled by default, but certain configurations require them. This is fairly uncommon
+ // and can result in performance penalties and unexpected behavior.
+ EnabledAdmissionPlugins []string `json:"enabledPlugins,omitempty"`
+
+ // disabledPlugins is a list of admission plugins that must be off. Putting something in this list
+ // is almost always a mistake and likely to result in cluster instability.
+ DisabledAdmissionPlugins []string `json:"disabledPlugins,omitempty"`
+}
+
+// AdmissionPluginConfig holds the necessary configuration options for admission plugins
+type AdmissionPluginConfig struct {
+ // Location is the path to a configuration file that contains the plugin's
+ // configuration
+ Location string `json:"location"`
+
+ // Configuration is an embedded configuration object to be used as the plugin's
+ // configuration. If present, it will be used instead of the path to the configuration file.
+ // +nullable
+ Configuration runtime.RawExtension `json:"configuration"`
+}
+
+type LogFormatType string
+
+type WebHookModeType string
+
+const (
+ // LogFormatLegacy saves event in 1-line text format.
+ LogFormatLegacy LogFormatType = "legacy"
+ // LogFormatJson saves event in structured json format.
+ LogFormatJson LogFormatType = "json"
+
+ // WebHookModeBatch indicates that the webhook should buffer audit events
+ // internally, sending batch updates either once a certain number of
+ // events have been received or a certain amount of time has passed.
+ WebHookModeBatch WebHookModeType = "batch"
+ // WebHookModeBlocking causes the webhook to block on every attempt to process
+ // a set of events. This causes requests to the API server to wait for a
+ // round trip to the external audit service before sending a response.
+ WebHookModeBlocking WebHookModeType = "blocking"
+)
+
+// AuditConfig holds configuration for the audit capabilities
+type AuditConfig struct {
+ // If this flag is set, audit log will be printed in the logs.
+ // The logs contains, method, user and a requested URL.
+ Enabled bool `json:"enabled"`
+ // All requests coming to the apiserver will be logged to this file.
+ AuditFilePath string `json:"auditFilePath"`
+ // Maximum number of days to retain old log files based on the timestamp encoded in their filename.
+ MaximumFileRetentionDays int32 `json:"maximumFileRetentionDays"`
+ // Maximum number of old log files to retain.
+ MaximumRetainedFiles int32 `json:"maximumRetainedFiles"`
+ // Maximum size in megabytes of the log file before it gets rotated. Defaults to 100MB.
+ MaximumFileSizeMegabytes int32 `json:"maximumFileSizeMegabytes"`
+
+ // PolicyFile is a path to the file that defines the audit policy configuration.
+ PolicyFile string `json:"policyFile"`
+ // PolicyConfiguration is an embedded policy configuration object to be used
+ // as the audit policy configuration. If present, it will be used instead of
+ // the path to the policy file.
+ // +nullable
+ PolicyConfiguration runtime.RawExtension `json:"policyConfiguration"`
+
+ // Format of saved audits (legacy or json).
+ LogFormat LogFormatType `json:"logFormat"`
+
+ // Path to a .kubeconfig formatted file that defines the audit webhook configuration.
+ WebHookKubeConfig string `json:"webHookKubeConfig"`
+ // Strategy for sending audit events (block or batch).
+ WebHookMode WebHookModeType `json:"webHookMode"`
+}
+
+// EtcdConnectionInfo holds information necessary for connecting to an etcd server
+type EtcdConnectionInfo struct {
+ // URLs are the URLs for etcd
+ URLs []string `json:"urls,omitempty"`
+ // CA is a file containing trusted roots for the etcd server certificates
+ CA string `json:"ca"`
+ // CertInfo is the TLS client cert information for securing communication to etcd
+ // this is anonymous so that we can inline it for serialization
+ CertInfo `json:",inline"`
+}
+
+type EtcdStorageConfig struct {
+ EtcdConnectionInfo `json:",inline"`
+
+ // StoragePrefix is the path within etcd that the OpenShift resources will
+ // be rooted under. This value, if changed, will mean existing objects in etcd will
+ // no longer be located.
+ StoragePrefix string `json:"storagePrefix"`
+}
+
+// GenericAPIServerConfig is an inline-able struct for aggregated apiservers that need to store data in etcd
+type GenericAPIServerConfig struct {
+ // servingInfo describes how to start serving
+ ServingInfo HTTPServingInfo `json:"servingInfo"`
+
+ // corsAllowedOrigins
+ CORSAllowedOrigins []string `json:"corsAllowedOrigins"`
+
+ // auditConfig describes how to configure audit information
+ AuditConfig AuditConfig `json:"auditConfig"`
+
+ // storageConfig contains information about how to use
+ StorageConfig EtcdStorageConfig `json:"storageConfig"`
+
+ // admissionConfig holds information about how to configure admission.
+ AdmissionConfig AdmissionConfig `json:"admission"`
+
+ KubeClientConfig KubeClientConfig `json:"kubeClientConfig"`
+}
+
+type KubeClientConfig struct {
+ // kubeConfig is a .kubeconfig filename for going to the owning kube-apiserver. Empty uses an in-cluster-config
+ KubeConfig string `json:"kubeConfig"`
+
+ // connectionOverrides specifies client overrides for system components to loop back to this master.
+ ConnectionOverrides ClientConnectionOverrides `json:"connectionOverrides"`
+}
+
+type ClientConnectionOverrides struct {
+ // acceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the
+ // default value of 'application/json'. This field will control all connections to the server used by a particular
+ // client.
+ AcceptContentTypes string `json:"acceptContentTypes"`
+ // contentType is the content type used when sending data to the server from this client.
+ ContentType string `json:"contentType"`
+
+ // qps controls the number of queries per second allowed for this connection.
+ QPS float32 `json:"qps"`
+ // burst allows extra queries to accumulate when a client is exceeding its rate.
+ Burst int32 `json:"burst"`
+}
+
+// GenericControllerConfig provides information to configure a controller
+type GenericControllerConfig struct {
+ // ServingInfo is the HTTP serving information for the controller's endpoints
+ ServingInfo HTTPServingInfo `json:"servingInfo"`
+
+ // leaderElection provides information to elect a leader. Only override this if you have a specific need
+ LeaderElection LeaderElection `json:"leaderElection"`
+
+ // authentication allows configuration of authentication for the endpoints
+ Authentication DelegatedAuthentication `json:"authentication"`
+ // authorization allows configuration of authentication for the endpoints
+ Authorization DelegatedAuthorization `json:"authorization"`
+}
+
+// DelegatedAuthentication allows authentication to be disabled.
+type DelegatedAuthentication struct {
+ // disabled indicates that authentication should be disabled. By default it will use delegated authentication.
+ Disabled bool `json:"disabled,omitempty"`
+}
+
+// DelegatedAuthorization allows authorization to be disabled.
+type DelegatedAuthorization struct {
+ // disabled indicates that authorization should be disabled. By default it will use delegated authorization.
+ Disabled bool `json:"disabled,omitempty"`
+}
diff --git a/vendor/github.com/openshift/api/config/v1/types_apiserver.go b/vendor/github.com/openshift/api/config/v1/types_apiserver.go
new file mode 100644
index 000000000..ea76aec02
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/types_apiserver.go
@@ -0,0 +1,75 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// APIServer holds configuration (like serving certificates, client CA and CORS domains)
+// shared by all API servers in the system, among them especially kube-apiserver
+// and openshift-apiserver. The canonical name of an instance is 'cluster'.
+type APIServer struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+ // +kubebuilder:validation:Required
+ // +required
+ Spec APIServerSpec `json:"spec"`
+ // +optional
+ Status APIServerStatus `json:"status"`
+}
+
+type APIServerSpec struct {
+ // servingCert is the TLS cert info for serving secure traffic. If not specified, operator managed certificates
+ // will be used for serving secure traffic.
+ // +optional
+ ServingCerts APIServerServingCerts `json:"servingCerts"`
+ // clientCA references a ConfigMap containing a certificate bundle for the signers that will be recognized for
+ // incoming client certificates in addition to the operator managed signers. If this is empty, then only operator managed signers are valid.
+ // You usually only have to set this if you have your own PKI you wish to honor client certificates from.
+ // The ConfigMap must exist in the openshift-config namespace and contain the following required fields:
+ // - ConfigMap.Data["ca-bundle.crt"] - CA bundle.
+ // +optional
+ ClientCA ConfigMapNameReference `json:"clientCA"`
+ // additionalCORSAllowedOrigins lists additional, user-defined regular expressions describing hosts for which the
+ // API server allows access using the CORS headers. This may be needed to access the API and the integrated OAuth
+ // server from JavaScript applications.
+ // The values are regular expressions that correspond to the Golang regular expression language.
+ // +optional
+ AdditionalCORSAllowedOrigins []string `json:"additionalCORSAllowedOrigins,omitempty"`
+}
+
+type APIServerServingCerts struct {
+ // namedCertificates references secrets containing the TLS cert info for serving secure traffic to specific hostnames.
+ // If no named certificates are provided, or no named certificates match the server name as understood by a client,
+ // the defaultServingCertificate will be used.
+ // +optional
+ NamedCertificates []APIServerNamedServingCert `json:"namedCertificates,omitempty"`
+}
+
+// APIServerNamedServingCert maps a server DNS name, as understood by a client, to a certificate.
+type APIServerNamedServingCert struct {
+ // names is a optional list of explicit DNS names (leading wildcards allowed) that should use this certificate to
+ // serve secure traffic. If no names are provided, the implicit names will be extracted from the certificates.
+ // Exact names trump over wildcard names. Explicit names defined here trump over extracted implicit names.
+ // +optional
+ Names []string `json:"names,omitempty"`
+ // servingCertificate references a kubernetes.io/tls type secret containing the TLS cert info for serving secure traffic.
+ // The secret must exist in the openshift-config namespace and contain the following required fields:
+ // - Secret.Data["tls.key"] - TLS private key.
+ // - Secret.Data["tls.crt"] - TLS certificate.
+ ServingCertificate SecretNameReference `json:"servingCertificate"`
+}
+
+type APIServerStatus struct {
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+type APIServerList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata"`
+ Items []APIServer `json:"items"`
+}
diff --git a/vendor/github.com/openshift/api/config/v1/types_authentication.go b/vendor/github.com/openshift/api/config/v1/types_authentication.go
new file mode 100644
index 000000000..0722ddbfc
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/types_authentication.go
@@ -0,0 +1,120 @@
+package v1
+
+import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Authentication specifies cluster-wide settings for authentication (like OAuth and
+// webhook token authenticators). The canonical name of an instance is `cluster`.
+type Authentication struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // spec holds user settable values for configuration
+ // +kubebuilder:validation:Required
+ // +required
+ Spec AuthenticationSpec `json:"spec"`
+ // status holds observed values from the cluster. They may not be overridden.
+ // +optional
+ Status AuthenticationStatus `json:"status"`
+}
+
+type AuthenticationSpec struct {
+ // type identifies the cluster managed, user facing authentication mode in use.
+ // Specifically, it manages the component that responds to login attempts.
+ // The default is IntegratedOAuth.
+ // +optional
+ Type AuthenticationType `json:"type"`
+
+ // oauthMetadata contains the discovery endpoint data for OAuth 2.0
+ // Authorization Server Metadata for an external OAuth server.
+ // This discovery document can be viewed from its served location:
+ // oc get --raw '/.well-known/oauth-authorization-server'
+ // For further details, see the IETF Draft:
+ // https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2
+ // If oauthMetadata.name is non-empty, this value has precedence
+ // over any metadata reference stored in status.
+ // The key "oauthMetadata" is used to locate the data.
+ // If specified and the config map or expected key is not found, no metadata is served.
+ // If the specified metadata is not valid, no metadata is served.
+ // The namespace for this config map is openshift-config.
+ // +optional
+ OAuthMetadata ConfigMapNameReference `json:"oauthMetadata"`
+
+ // webhookTokenAuthenticators configures remote token reviewers.
+ // These remote authentication webhooks can be used to verify bearer tokens
+ // via the tokenreviews.authentication.k8s.io REST API. This is required to
+ // honor bearer tokens that are provisioned by an external authentication service.
+ // The namespace for these secrets is openshift-config.
+ // +optional
+ WebhookTokenAuthenticators []WebhookTokenAuthenticator `json:"webhookTokenAuthenticators,omitempty"`
+}
+
+type AuthenticationStatus struct {
+ // integratedOAuthMetadata contains the discovery endpoint data for OAuth 2.0
+ // Authorization Server Metadata for the in-cluster integrated OAuth server.
+ // This discovery document can be viewed from its served location:
+ // oc get --raw '/.well-known/oauth-authorization-server'
+ // For further details, see the IETF Draft:
+ // https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2
+ // This contains the observed value based on cluster state.
+ // An explicitly set value in spec.oauthMetadata has precedence over this field.
+ // This field has no meaning if authentication spec.type is not set to IntegratedOAuth.
+ // The key "oauthMetadata" is used to locate the data.
+ // If the config map or expected key is not found, no metadata is served.
+ // If the specified metadata is not valid, no metadata is served.
+ // The namespace for this config map is openshift-config-managed.
+ IntegratedOAuthMetadata ConfigMapNameReference `json:"integratedOAuthMetadata"`
+
+ // TODO if we add support for an in-cluster operator managed Keycloak instance
+ // KeycloakOAuthMetadata ConfigMapNameReference `json:"keycloakOAuthMetadata"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+type AuthenticationList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ metav1.ListMeta `json:"metadata"`
+
+ Items []Authentication `json:"items"`
+}
+
+type AuthenticationType string
+
+const (
+ // None means that no cluster managed authentication system is in place.
+ // Note that user login will only work if a manually configured system is in place and
+ // referenced in authentication spec via oauthMetadata and webhookTokenAuthenticators.
+ AuthenticationTypeNone AuthenticationType = "None"
+
+ // IntegratedOAuth refers to the cluster managed OAuth server.
+ // It is configured via the top level OAuth config.
+ AuthenticationTypeIntegratedOAuth AuthenticationType = "IntegratedOAuth"
+
+ // TODO if we add support for an in-cluster operator managed Keycloak instance
+ // AuthenticationTypeKeycloak AuthenticationType = "Keycloak"
+)
+
+// webhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator
+type WebhookTokenAuthenticator struct {
+ // kubeConfig contains kube config file data which describes how to access the remote webhook service.
+ // For further details, see:
+ // https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication
+ // The key "kubeConfig" is used to locate the data.
+ // If the secret or expected key is not found, the webhook is not honored.
+ // If the specified kube config data is not valid, the webhook is not honored.
+ // The namespace for this secret is determined by the point of use.
+ KubeConfig SecretNameReference `json:"kubeConfig"`
+}
+
+const (
+ // OAuthMetadataKey is the key for the oauth authorization server metadata
+ OAuthMetadataKey = "oauthMetadata"
+
+ // KubeConfigKey is the key for the kube config file data in a secret
+ KubeConfigKey = "kubeConfig"
+)
diff --git a/vendor/github.com/openshift/api/config/v1/types_build.go b/vendor/github.com/openshift/api/config/v1/types_build.go
new file mode 100644
index 000000000..c7ed7e958
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/types_build.go
@@ -0,0 +1,101 @@
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Build holds cluster-wide information on how to handle builds. The canonical name is `cluster`
+type Build struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+ // Spec holds user-settable values for the build controller configuration
+ // +kubebuilder:validation:Required
+ // +required
+ Spec BuildSpec `json:"spec"`
+}
+
+type BuildSpec struct {
+ // AdditionalTrustedCA is a reference to a ConfigMap containing additional CAs that
+ // should be trusted for image pushes and pulls during builds.
+ // The namespace for this config map is openshift-config.
+ // +optional
+ AdditionalTrustedCA ConfigMapNameReference `json:"additionalTrustedCA"`
+ // BuildDefaults controls the default information for Builds
+ // +optional
+ BuildDefaults BuildDefaults `json:"buildDefaults"`
+ // BuildOverrides controls override settings for builds
+ // +optional
+ BuildOverrides BuildOverrides `json:"buildOverrides"`
+}
+
+type BuildDefaults struct {
+ // DefaultProxy contains the default proxy settings for all build operations, including image pull/push
+ // and source download.
+ //
+ // Values can be overrode by setting the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables
+ // in the build config's strategy.
+ // +optional
+ DefaultProxy *ProxySpec `json:"defaultProxy,omitempty"`
+
+ // GitProxy contains the proxy settings for git operations only. If set, this will override
+ // any Proxy settings for all git commands, such as git clone.
+ //
+ // Values that are not set here will be inherited from DefaultProxy.
+ // +optional
+ GitProxy *ProxySpec `json:"gitProxy,omitempty"`
+
+ // Env is a set of default environment variables that will be applied to the
+ // build if the specified variables do not exist on the build
+ // +optional
+ Env []corev1.EnvVar `json:"env,omitempty"`
+
+ // ImageLabels is a list of docker labels that are applied to the resulting image.
+ // User can override a default label by providing a label with the same name in their
+ // Build/BuildConfig.
+ // +optional
+ ImageLabels []ImageLabel `json:"imageLabels,omitempty"`
+
+ // Resources defines resource requirements to execute the build.
+ // +optional
+ Resources corev1.ResourceRequirements `json:"resources"`
+}
+
+type ImageLabel struct {
+ // Name defines the name of the label. It must have non-zero length.
+ Name string `json:"name"`
+
+ // Value defines the literal value of the label.
+ // +optional
+ Value string `json:"value,omitempty"`
+}
+
+type BuildOverrides struct {
+ // ImageLabels is a list of docker labels that are applied to the resulting image.
+ // If user provided a label in their Build/BuildConfig with the same name as one in this
+ // list, the user's label will be overwritten.
+ // +optional
+ ImageLabels []ImageLabel `json:"imageLabels,omitempty"`
+
+ // NodeSelector is a selector which must be true for the build pod to fit on a node
+ // +optional
+ NodeSelector map[string]string `json:"nodeSelector,omitempty"`
+
+ // Tolerations is a list of Tolerations that will override any existing
+ // tolerations set on a build pod.
+ // +optional
+ Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+type BuildList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ metav1.ListMeta `json:"metadata"`
+ Items []Build `json:"items"`
+}
diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go b/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go
new file mode 100644
index 000000000..af2ce846c
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go
@@ -0,0 +1,150 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ClusterOperator is the Custom Resource object which holds the current state
+// of an operator. This object is used by operators to convey their state to
+// the rest of the cluster.
+type ClusterOperator struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata"`
+
+ // spec hold the intent of how this operator should behave.
+ // +kubebuilder:validation:Required
+ // +required
+ Spec ClusterOperatorSpec `json:"spec"`
+
+ // status holds the information about the state of an operator. It is consistent with status information across
+ // the kube ecosystem.
+ // +optional
+ Status ClusterOperatorStatus `json:"status"`
+}
+
+// ClusterOperatorSpec is empty for now, but you could imagine holding information like "pause".
+type ClusterOperatorSpec struct {
+}
+
+// ClusterOperatorStatus provides information about the status of the operator.
+// +k8s:deepcopy-gen=true
+type ClusterOperatorStatus struct {
+ // conditions describes the state of the operator's reconciliation functionality.
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ // +optional
+ Conditions []ClusterOperatorStatusCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"`
+
+ // versions is a slice of operand version tuples. Operators which manage multiple operands will have multiple
+ // entries in the array. If an operator is Available, it must have at least one entry. You must report the version of
+ // the operator itself with the name "operator".
+ // +optional
+ Versions []OperandVersion `json:"versions,omitempty"`
+
+ // relatedObjects is a list of objects that are "interesting" or related to this operator. Common uses are:
+ // 1. the detailed resource driving the operator
+ // 2. operator namespaces
+ // 3. operand namespaces
+ // +optional
+ RelatedObjects []ObjectReference `json:"relatedObjects,omitempty"`
+
+ // extension contains any additional status information specific to the
+ // operator which owns this status object.
+ // +nullable
+ // +optional
+ Extension runtime.RawExtension `json:"extension"`
+}
+
+type OperandVersion struct {
+ // name is the name of the particular operand this version is for. It usually matches container images, not operators.
+ Name string `json:"name"`
+
+ // version indicates which version of a particular operand is currently being manage. It must always match the Available
+ // condition. If 1.0.0 is Available, then this must indicate 1.0.0 even if the operator is trying to rollout
+ // 1.1.0
+ Version string `json:"version"`
+}
+
+// ObjectReference contains enough information to let you inspect or modify the referred object.
+type ObjectReference struct {
+ // group of the referent.
+ Group string `json:"group"`
+ // resource of the referent.
+ Resource string `json:"resource"`
+ // namespace of the referent.
+ // +optional
+ Namespace string `json:"namespace,omitempty"`
+ // name of the referent.
+ Name string `json:"name"`
+}
+
+type ConditionStatus string
+
+// These are valid condition statuses. "ConditionTrue" means a resource is in the condition.
+// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes
+// can't decide if a resource is in the condition or not. In the future, we could add other
+// intermediate conditions, e.g. ConditionDegraded.
+const (
+ ConditionTrue ConditionStatus = "True"
+ ConditionFalse ConditionStatus = "False"
+ ConditionUnknown ConditionStatus = "Unknown"
+)
+
+// ClusterOperatorStatusCondition represents the state of the operator's
+// reconciliation functionality.
+// +k8s:deepcopy-gen=true
+type ClusterOperatorStatusCondition struct {
+ // type specifies the state of the operator's reconciliation functionality.
+ Type ClusterStatusConditionType `json:"type"`
+
+ // status of the condition, one of True, False, Unknown.
+ Status ConditionStatus `json:"status"`
+
+ // lastTransitionTime is the time of the last update to the current status object.
+ LastTransitionTime metav1.Time `json:"lastTransitionTime"`
+
+ // reason is the reason for the condition's last transition. Reasons are CamelCase
+ Reason string `json:"reason,omitempty"`
+
+ // message provides additional information about the current condition.
+ // This is only to be consumed by humans.
+ Message string `json:"message,omitempty"`
+}
+
+// ClusterStatusConditionType is the state of the operator's reconciliation functionality.
+type ClusterStatusConditionType string
+
+const (
+ // Available indicates that the binary maintained by the operator (eg: openshift-apiserver for the
+ // openshift-apiserver-operator), is functional and available in the cluster.
+ OperatorAvailable ClusterStatusConditionType = "Available"
+
+ // Progressing indicates that the operator is actively making changes to the binary maintained by the
+ // operator (eg: openshift-apiserver for the openshift-apiserver-operator).
+ OperatorProgressing ClusterStatusConditionType = "Progressing"
+
+ // Degraded indicates that the operand is not functioning completely. An example of a degraded state
+ // would be if there should be 5 copies of the operand running but only 4 are running. It may still be available,
+ // but it is degraded
+ OperatorDegraded ClusterStatusConditionType = "Degraded"
+
+ // Upgradeable indicates whether the operator is in a state that is safe to upgrade. When status is `False`
+ // administrators should not upgrade their cluster and the message field should contain a human readable description
+ // of what the administrator should do to allow the operator to successfully update. A missing condition, True,
+ // and Unknown are all treated by the CVO as allowing an upgrade.
+ OperatorUpgradeable ClusterStatusConditionType = "Upgradeable"
+)
+
+// ClusterOperatorList is a list of OperatorStatus resources.
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+type ClusterOperatorList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata"`
+
+ Items []ClusterOperator `json:"items"`
+}
diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_version.go b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go
new file mode 100644
index 000000000..c6c2e7e43
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go
@@ -0,0 +1,237 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ClusterVersion is the configuration for the ClusterVersionOperator. This is where
+// parameters related to automatic updates can be set.
+type ClusterVersion struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // spec is the desired state of the cluster version - the operator will work
+ // to ensure that the desired version is applied to the cluster.
+ // +kubebuilder:validation:Required
+ // +required
+ Spec ClusterVersionSpec `json:"spec"`
+ // status contains information about the available updates and any in-progress
+ // updates.
+ // +optional
+ Status ClusterVersionStatus `json:"status"`
+}
+
+// ClusterVersionSpec is the desired version state of the cluster. It includes
+// the version the cluster should be at, how the cluster is identified, and
+// where the cluster should look for version updates.
+// +k8s:deepcopy-gen=true
+type ClusterVersionSpec struct {
+ // clusterID uniquely identifies this cluster. This is expected to be
+ // an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx in
+ // hexadecimal values). This is a required field.
+ ClusterID ClusterID `json:"clusterID"`
+
+ // desiredUpdate is an optional field that indicates the desired value of
+ // the cluster version. Setting this value will trigger an upgrade (if
+ // the current version does not match the desired version). The set of
+ // recommended update values is listed as part of available updates in
+ // status, and setting values outside that range may cause the upgrade
+ // to fail. You may specify the version field without setting image if
+ // an update exists with that version in the availableUpdates or history.
+ //
+ // If an upgrade fails the operator will halt and report status
+ // about the failing component. Setting the desired update value back to
+ // the previous version will cause a rollback to be attempted. Not all
+ // rollbacks will succeed.
+ //
+ // +optional
+ DesiredUpdate *Update `json:"desiredUpdate,omitempty"`
+
+ // upstream may be used to specify the preferred update server. By default
+ // it will use the appropriate update server for the cluster and region.
+ //
+ // +optional
+ Upstream URL `json:"upstream,omitempty"`
+ // channel is an identifier for explicitly requesting that a non-default
+ // set of updates be applied to this cluster. The default channel will be
+ // contain stable updates that are appropriate for production clusters.
+ //
+ // +optional
+ Channel string `json:"channel,omitempty"`
+
+ // overrides is list of overides for components that are managed by
+ // cluster version operator. Marking a component unmanaged will prevent
+ // the operator from creating or updating the object.
+ // +optional
+ Overrides []ComponentOverride `json:"overrides,omitempty"`
+}
+
+// ClusterVersionStatus reports the status of the cluster versioning,
+// including any upgrades that are in progress. The current field will
+// be set to whichever version the cluster is reconciling to, and the
+// conditions array will report whether the update succeeded, is in
+// progress, or is failing.
+// +k8s:deepcopy-gen=true
+type ClusterVersionStatus struct {
+ // desired is the version that the cluster is reconciling towards.
+ // If the cluster is not yet fully initialized desired will be set
+ // with the information available, which may be an image or a tag.
+ Desired Update `json:"desired"`
+
+ // history contains a list of the most recent versions applied to the cluster.
+ // This value may be empty during cluster startup, and then will be updated
+ // when a new update is being applied. The newest update is first in the
+ // list and it is ordered by recency. Updates in the history have state
+ // Completed if the rollout completed - if an update was failing or halfway
+ // applied the state will be Partial. Only a limited amount of update history
+ // is preserved.
+ // +optional
+ History []UpdateHistory `json:"history,omitempty"`
+
+ // observedGeneration reports which version of the spec is being synced.
+ // If this value is not equal to metadata.generation, then the desired
+ // and conditions fields may represent from a previous version.
+ ObservedGeneration int64 `json:"observedGeneration"`
+
+ // versionHash is a fingerprint of the content that the cluster will be
+ // updated with. It is used by the operator to avoid unnecessary work
+ // and is for internal use only.
+ VersionHash string `json:"versionHash"`
+
+ // conditions provides information about the cluster version. The condition
+ // "Available" is set to true if the desiredUpdate has been reached. The
+ // condition "Progressing" is set to true if an update is being applied.
+ // The condition "Degraded" is set to true if an update is currently blocked
+ // by a temporary or permanent error. Conditions are only valid for the
+ // current desiredUpdate when metadata.generation is equal to
+ // status.generation.
+ // +optional
+ Conditions []ClusterOperatorStatusCondition `json:"conditions,omitempty"`
+
+ // availableUpdates contains the list of updates that are appropriate
+ // for this cluster. This list may be empty if no updates are recommended,
+ // if the update service is unavailable, or if an invalid channel has
+ // been specified.
+ // +nullable
+ AvailableUpdates []Update `json:"availableUpdates"`
+}
+
+// UpdateState is a constant representing whether an update was successfully
+// applied to the cluster or not.
+type UpdateState string
+
+const (
+ // CompletedUpdate indicates an update was successfully applied
+ // to the cluster (all resource updates were successful).
+ CompletedUpdate UpdateState = "Completed"
+ // PartialUpdate indicates an update was never completely applied
+ // or is currently being applied.
+ PartialUpdate UpdateState = "Partial"
+)
+
+// UpdateHistory is a single attempted update to the cluster.
+type UpdateHistory struct {
+ // state reflects whether the update was fully applied. The Partial state
+ // indicates the update is not fully applied, while the Completed state
+ // indicates the update was successfully rolled out at least once (all
+ // parts of the update successfully applied).
+ State UpdateState `json:"state"`
+
+ // startedTime is the time at which the update was started.
+ StartedTime metav1.Time `json:"startedTime"`
+ // completionTime, if set, is when the update was fully applied. The update
+ // that is currently being applied will have a null completion time.
+ // Completion time will always be set for entries that are not the current
+ // update (usually to the started time of the next update).
+ // +nullable
+ CompletionTime *metav1.Time `json:"completionTime"`
+
+ // version is a semantic versioning identifying the update version. If the
+ // requested image does not define a version, or if a failure occurs
+ // retrieving the image, this value may be empty.
+ //
+ // +optional
+ Version string `json:"version"`
+ // image is a container image location that contains the update. This value
+ // is always populated.
+ Image string `json:"image"`
+ // verified indicates whether the provided update was properly verified
+ // before it was installed. If this is false the cluster may not be trusted.
+ Verified bool `json:"verified"`
+}
+
+// ClusterID is string RFC4122 uuid.
+type ClusterID string
+
+// ComponentOverride allows overriding cluster version operator's behavior
+// for a component.
+// +k8s:deepcopy-gen=true
+type ComponentOverride struct {
+ // kind indentifies which object to override.
+ Kind string `json:"kind"`
+ // group identifies the API group that the kind is in.
+ Group string `json:"group"`
+
+ // namespace is the component's namespace. If the resource is cluster
+ // scoped, the namespace should be empty.
+ Namespace string `json:"namespace"`
+ // name is the component's name.
+ Name string `json:"name"`
+
+ // unmanaged controls if cluster version operator should stop managing the
+ // resources in this cluster.
+ // Default: false
+ Unmanaged bool `json:"unmanaged"`
+}
+
+// URL is a thin wrapper around string that ensures the string is a valid URL.
+type URL string
+
+// Update represents a release of the ClusterVersionOperator, referenced by the
+// Image member.
+// +k8s:deepcopy-gen=true
+type Update struct {
+ // version is a semantic versioning identifying the update version. When this
+ // field is part of spec, version is optional if image is specified.
+ //
+ // +optional
+ Version string `json:"version"`
+ // image is a container image location that contains the update. When this
+ // field is part of spec, image is optional if version is specified and the
+ // availableUpdates field contains a matching version.
+ //
+ // +optional
+ Image string `json:"image"`
+ // force allows an administrator to update to an image that has failed
+ // verification, does not appear in the availableUpdates list, or otherwise
+ // would be blocked by normal protections on update. This option should only
+ // be used when the authenticity of the provided image has been verified out
+ // of band because the provided image will run with full administrative access
+ // to the cluster. Do not use this flag with images that comes from unknown
+ // or potentially malicious sources.
+ //
+ // This flag does not override other forms of consistency checking that are
+ // required before a new update is deployed.
+ //
+ // +optional
+ Force bool `json:"force"`
+}
+
+// RetrievedUpdates reports whether available updates have been retrieved from
+// the upstream update server. The condition is Unknown before retrieval, False
+// if the updates could not be retrieved or recently failed, or True if the
+// availableUpdates field is accurate and recent.
+const RetrievedUpdates ClusterStatusConditionType = "RetrievedUpdates"
+
+// ClusterVersionList is a list of ClusterVersion resources.
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+type ClusterVersionList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata"`
+
+ Items []ClusterVersion `json:"items"`
+}
diff --git a/vendor/github.com/openshift/api/config/v1/types_console.go b/vendor/github.com/openshift/api/config/v1/types_console.go
new file mode 100644
index 000000000..9cda3f83b
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/types_console.go
@@ -0,0 +1,63 @@
+package v1
+
+import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Console holds cluster-wide configuration for the web console, including the
+// logout URL, and reports the public URL of the console. The canonical name is
+// `cluster`.
+type Console struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // spec holds user settable values for configuration
+ // +kubebuilder:validation:Required
+ // +required
+ Spec ConsoleSpec `json:"spec"`
+ // status holds observed values from the cluster. They may not be overridden.
+ // +optional
+ Status ConsoleStatus `json:"status"`
+}
+
+// ConsoleSpec is the specification of the desired behavior of the Console.
+type ConsoleSpec struct {
+ // +optional
+ Authentication ConsoleAuthentication `json:"authentication"`
+}
+
+// ConsoleStatus defines the observed status of the Console.
+type ConsoleStatus struct {
+ // The URL for the console. This will be derived from the host for the route that
+ // is created for the console.
+ ConsoleURL string `json:"consoleURL"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+type ConsoleList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ metav1.ListMeta `json:"metadata"`
+ Items []Console `json:"items"`
+}
+
+// ConsoleAuthentication defines a list of optional configuration for console authentication.
+type ConsoleAuthentication struct {
+ // An optional, absolute URL to redirect web browsers to after logging out of
+ // the console. If not specified, it will redirect to the default login page.
+ // This is required when using an identity provider that supports single
+ // sign-on (SSO) such as:
+ // - OpenID (Keycloak, Azure)
+ // - RequestHeader (GSSAPI, SSPI, SAML)
+ // - OAuth (GitHub, GitLab, Google)
+ // Logging out of the console will destroy the user's token. The logoutRedirect
+ // provides the user the option to perform single logout (SLO) through the identity
+ // provider to destroy their single sign-on session.
+ // +optional
+ // +kubebuilder:validation:Pattern=^$|^((https):\/\/?)[^\s()<>]+(?:\([\w\d]+\)|([^[:punct:]\s]|\/?))$
+ LogoutRedirect string `json:"logoutRedirect,omitempty"`
+}
diff --git a/vendor/github.com/openshift/api/config/v1/types_dns.go b/vendor/github.com/openshift/api/config/v1/types_dns.go
new file mode 100644
index 000000000..ef04f7a67
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/types_dns.go
@@ -0,0 +1,88 @@
+package v1
+
+import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// DNS holds cluster-wide information about DNS. The canonical name is `cluster`
+type DNS struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // spec holds user settable values for configuration
+ // +kubebuilder:validation:Required
+ // +required
+ Spec DNSSpec `json:"spec"`
+ // status holds observed values from the cluster. They may not be overridden.
+ // +optional
+ Status DNSStatus `json:"status"`
+}
+
+type DNSSpec struct {
+ // baseDomain is the base domain of the cluster. All managed DNS records will
+ // be sub-domains of this base.
+ //
+ // For example, given the base domain `openshift.example.com`, an API server
+ // DNS record may be created for `cluster-api.openshift.example.com`.
+ //
+ // Once set, this field cannot be changed.
+ BaseDomain string `json:"baseDomain"`
+ // publicZone is the location where all the DNS records that are publicly accessible to
+ // the internet exist.
+ //
+ // If this field is nil, no public records should be created.
+ //
+ // Once set, this field cannot be changed.
+ //
+ // +optional
+ PublicZone *DNSZone `json:"publicZone,omitempty"`
+ // privateZone is the location where all the DNS records that are only available internally
+ // to the cluster exist.
+ //
+ // If this field is nil, no private records should be created.
+ //
+ // Once set, this field cannot be changed.
+ //
+ // +optional
+ PrivateZone *DNSZone `json:"privateZone,omitempty"`
+}
+
+// DNSZone is used to define a DNS hosted zone.
+// A zone can be identified by an ID or tags.
+type DNSZone struct {
+ // id is the identifier that can be used to find the DNS hosted zone.
+ //
+ // on AWS zone can be fetched using `ID` as id in [1]
+ // on Azure zone can be fetched using `ID` as a pre-determined name in [2],
+ // on GCP zone can be fetched using `ID` as a pre-determined name in [3].
+ //
+ // [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options
+ // [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show
+ // [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get
+ // +optional
+ ID string `json:"id,omitempty"`
+
+ // tags can be used to query the DNS hosted zone.
+ //
+ // on AWS, resourcegroupstaggingapi [1] can be used to fetch a zone using `Tags` as tag-filters,
+ //
+ // [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options
+ // +optional
+ Tags map[string]string `json:"tags,omitempty"`
+}
+
+type DNSStatus struct {
+ // dnsSuffix (service-ca amongst others)
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+type DNSList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ metav1.ListMeta `json:"metadata"`
+ Items []DNS `json:"items"`
+}
diff --git a/vendor/github.com/openshift/api/config/v1/types_feature.go b/vendor/github.com/openshift/api/config/v1/types_feature.go
new file mode 100644
index 000000000..536bad191
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/types_feature.go
@@ -0,0 +1,118 @@
+package v1
+
+import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Feature holds cluster-wide information about feature gates. The canonical name is `cluster`
+type FeatureGate struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // spec holds user settable values for configuration
+ // +kubebuilder:validation:Required
+ // +required
+ Spec FeatureGateSpec `json:"spec"`
+ // status holds observed values from the cluster. They may not be overridden.
+ // +optional
+ Status FeatureGateStatus `json:"status"`
+}
+
+type FeatureSet string
+
+var (
+ // Default feature set that allows upgrades.
+ Default FeatureSet = ""
+
+ // TechPreviewNoUpgrade turns on tech preview features that are not part of the normal supported platform. Turning
+ // this feature set on CANNOT BE UNDONE and PREVENTS UPGRADES.
+ TechPreviewNoUpgrade FeatureSet = "TechPreviewNoUpgrade"
+
+ // CustomNoUpgrade allows the enabling or disabling of any feature. Turning this feature set on IS NOT SUPPORTED, CANNOT BE UNDONE, and PREVENTS UPGRADES.
+ // Because of its nature, this setting cannot be validated. If you have any typos or accidentally apply invalid combinations
+ // your cluster may fail in an unrecoverable way.
+ CustomNoUpgrade FeatureSet = "CustomNoUpgrade"
+)
+
+type FeatureGateSpec struct {
+ FeatureGateSelection `json:",inline"`
+}
+
+// +union
+type FeatureGateSelection struct {
+ // featureSet changes the list of features in the cluster. The default is empty. Be very careful adjusting this setting.
+ // Turning on or off features may cause irreversible changes in your cluster which cannot be undone.
+ // +unionDiscriminator
+ // +optional
+ FeatureSet FeatureSet `json:"featureSet,omitempty"`
+
+ // customNoUpgrade allows the enabling or disabling of any feature. Turning this feature set on IS NOT SUPPORTED, CANNOT BE UNDONE, and PREVENTS UPGRADES.
+ // Because of its nature, this setting cannot be validated. If you have any typos or accidentally apply invalid combinations
+ // your cluster may fail in an unrecoverable way. featureSet must equal "CustomNoUpgrade" must be set to use this field.
+ // +optional
+ // +nullable
+ CustomNoUpgrade *CustomFeatureGates `json:"customNoUpgrade,omitempty"`
+}
+
+type CustomFeatureGates struct {
+ // enabled is a list of all feature gates that you want to force on
+ // +optional
+ Enabled []string `json:"enabled,omitempty"`
+ // disabled is a list of all feature gates that you want to force off
+ // +optional
+ Disabled []string `json:"disabled,omitempty"`
+}
+
+type FeatureGateStatus struct {
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+type FeatureGateList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ metav1.ListMeta `json:"metadata"`
+ Items []FeatureGate `json:"items"`
+}
+
+type FeatureGateEnabledDisabled struct {
+ Enabled []string
+ Disabled []string
+}
+
+// FeatureSets Contains a map of Feature names to Enabled/Disabled Feature.
+//
+// NOTE: The caller needs to make sure to check for the existence of the value
+// using golang's existence field. A possible scenario is an upgrade where new
+// FeatureSets are added and a controller has not been upgraded with a newer
+// version of this file. In this upgrade scenario the map could return nil.
+//
+// example:
+// if featureSet, ok := FeatureSets["SomeNewFeature"]; ok { }
+//
+// If you put an item in either of these lists, put your area and name on it so we can find owners.
+var FeatureSets = map[FeatureSet]*FeatureGateEnabledDisabled{
+ Default: {
+ Enabled: []string{
+ "ExperimentalCriticalPodAnnotation", // sig-pod, sjenning
+ "RotateKubeletServerCertificate", // sig-pod, sjenning
+ "SupportPodPidsLimit", // sig-pod, sjenning
+ },
+ Disabled: []string{
+ "LocalStorageCapacityIsolation", // sig-pod, sjenning
+ },
+ },
+ TechPreviewNoUpgrade: {
+ Enabled: []string{
+ "ExperimentalCriticalPodAnnotation", // sig-pod, sjenning
+ "RotateKubeletServerCertificate", // sig-pod, sjenning
+ "SupportPodPidsLimit", // sig-pod, sjenning
+ },
+ Disabled: []string{
+ "LocalStorageCapacityIsolation", // sig-pod, sjenning
+ },
+ },
+}
diff --git a/vendor/github.com/openshift/api/config/v1/types_image.go b/vendor/github.com/openshift/api/config/v1/types_image.go
new file mode 100644
index 000000000..f0cf220d3
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/types_image.go
@@ -0,0 +1,111 @@
+package v1
+
+import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Image holds cluster-wide information about how to handle images. The canonical name is `cluster`
+type Image struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // spec holds user settable values for configuration
+ // +kubebuilder:validation:Required
+ // +required
+ Spec ImageSpec `json:"spec"`
+ // status holds observed values from the cluster. They may not be overridden.
+ // +optional
+ Status ImageStatus `json:"status"`
+}
+
+type ImageSpec struct {
+ // AllowedRegistriesForImport limits the container image registries that normal users may import
+ // images from. Set this list to the registries that you trust to contain valid Docker
+ // images and that you want applications to be able to import from. Users with
+ // permission to create Images or ImageStreamMappings via the API are not affected by
+ // this policy - typically only administrators or system integrations will have those
+ // permissions.
+ // +optional
+ AllowedRegistriesForImport []RegistryLocation `json:"allowedRegistriesForImport,omitempty"`
+
+ // externalRegistryHostnames provides the hostnames for the default external image
+ // registry. The external hostname should be set only when the image registry
+ // is exposed externally. The first value is used in 'publicDockerImageRepository'
+ // field in ImageStreams. The value must be in "hostname[:port]" format.
+ // +optional
+ ExternalRegistryHostnames []string `json:"externalRegistryHostnames,omitempty"`
+
+ // AdditionalTrustedCA is a reference to a ConfigMap containing additional CAs that
+ // should be trusted during imagestream import, pod image pull, and imageregistry
+ // pullthrough.
+ // The namespace for this config map is openshift-config.
+ // +optional
+ AdditionalTrustedCA ConfigMapNameReference `json:"additionalTrustedCA"`
+
+ // RegistrySources contains configuration that determines how the container runtime
+ // should treat individual registries when accessing images for builds+pods. (e.g.
+ // whether or not to allow insecure access). It does not contain configuration for the
+ // internal cluster registry.
+ // +optional
+ RegistrySources RegistrySources `json:"registrySources"`
+}
+
+type ImageStatus struct {
+
+ // this value is set by the image registry operator which controls the internal registry hostname
+ // InternalRegistryHostname sets the hostname for the default internal image
+ // registry. The value must be in "hostname[:port]" format.
+ // For backward compatibility, users can still use OPENSHIFT_DEFAULT_REGISTRY
+ // environment variable but this setting overrides the environment variable.
+ // +optional
+ InternalRegistryHostname string `json:"internalRegistryHostname,omitempty"`
+
+ // externalRegistryHostnames provides the hostnames for the default external image
+ // registry. The external hostname should be set only when the image registry
+ // is exposed externally. The first value is used in 'publicDockerImageRepository'
+ // field in ImageStreams. The value must be in "hostname[:port]" format.
+ // +optional
+ ExternalRegistryHostnames []string `json:"externalRegistryHostnames,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+type ImageList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ metav1.ListMeta `json:"metadata"`
+ Items []Image `json:"items"`
+}
+
+// RegistryLocation contains a location of the registry specified by the registry domain
+// name. The domain name might include wildcards, like '*' or '??'.
+type RegistryLocation struct {
+ // DomainName specifies a domain name for the registry
+ // In case the registry use non-standard (80 or 443) port, the port should be included
+ // in the domain name as well.
+ DomainName string `json:"domainName"`
+ // Insecure indicates whether the registry is secure (https) or insecure (http)
+ // By default (if not specified) the registry is assumed as secure.
+ // +optional
+ Insecure bool `json:"insecure,omitempty"`
+}
+
+// RegistrySources holds cluster-wide information about how to handle the registries config.
+type RegistrySources struct {
+ // InsecureRegistries are registries which do not have a valid TLS certificates or only support HTTP connections.
+ // +optional
+ InsecureRegistries []string `json:"insecureRegistries,omitempty"`
+ // BlockedRegistries are blacklisted from image pull/push. All other registries are allowed.
+ //
+ // Only one of BlockedRegistries or AllowedRegistries may be set.
+ // +optional
+ BlockedRegistries []string `json:"blockedRegistries,omitempty"`
+ // AllowedRegistries are whitelisted for image pull/push. All other registries are blocked.
+ //
+ // Only one of BlockedRegistries or AllowedRegistries may be set.
+ // +optional
+ AllowedRegistries []string `json:"allowedRegistries,omitempty"`
+}
diff --git a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go
new file mode 100644
index 000000000..4632e6ada
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go
@@ -0,0 +1,210 @@
+package v1
+
+import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Infrastructure holds cluster-wide information about Infrastructure. The canonical name is `cluster`
+type Infrastructure struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // spec holds user settable values for configuration
+ // +kubebuilder:validation:Required
+ // +required
+ Spec InfrastructureSpec `json:"spec"`
+ // status holds observed values from the cluster. They may not be overridden.
+ // +optional
+ Status InfrastructureStatus `json:"status"`
+}
+
+// InfrastructureSpec contains settings that apply to the cluster infrastructure.
+type InfrastructureSpec struct {
+ // cloudConfig is a reference to a ConfigMap containing the cloud provider configuration file.
+ // This configuration file is used to configure the Kubernetes cloud provider integration
+ // when using the built-in cloud provider integration or the external cloud controller manager.
+ // The namespace for this config map is openshift-config.
+ // +optional
+ CloudConfig ConfigMapFileReference `json:"cloudConfig"`
+}
+
+// InfrastructureStatus describes the infrastructure the cluster is leveraging.
+type InfrastructureStatus struct {
+ // infrastructureName uniquely identifies a cluster with a human friendly name.
+ // Once set it should not be changed. Must be of max length 27 and must have only
+ // alphanumeric or hyphen characters.
+ InfrastructureName string `json:"infrastructureName"`
+
+ // platform is the underlying infrastructure provider for the cluster.
+ //
+ // Deprecated: Use platformStatus.type instead.
+ Platform PlatformType `json:"platform,omitempty"`
+
+ // platformStatus holds status information specific to the underlying
+ // infrastructure provider.
+ // +optional
+ PlatformStatus *PlatformStatus `json:"platformStatus,omitempty"`
+
+ // etcdDiscoveryDomain is the domain used to fetch the SRV records for discovering
+ // etcd servers and clients.
+ // For more info: https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery
+ EtcdDiscoveryDomain string `json:"etcdDiscoveryDomain"`
+
+ // apiServerURL is a valid URI with scheme(http/https), address and
+ // port. apiServerURL can be used by components like the web console
+ // to tell users where to find the Kubernetes API.
+ APIServerURL string `json:"apiServerURL"`
+
+ // apiServerInternalURL is a valid URI with scheme(http/https),
+ // address and port. apiServerInternalURL can be used by components
+ // like kubelets, to contact the Kubernetes API server using the
+ // infrastructure provider rather than Kubernetes networking.
+ APIServerInternalURL string `json:"apiServerInternalURI"`
+}
+
+// PlatformType is a specific supported infrastructure provider.
+type PlatformType string
+
+const (
+ // AWSPlatformType represents Amazon Web Services infrastructure.
+ AWSPlatformType PlatformType = "AWS"
+
+ // AzurePlatformType represents Microsoft Azure infrastructure.
+ AzurePlatformType PlatformType = "Azure"
+
+ // BareMetalPlatformType represents managed bare metal infrastructure.
+ BareMetalPlatformType PlatformType = "BareMetal"
+
+ // GCPPlatformType represents Google Cloud Platform infrastructure.
+ GCPPlatformType PlatformType = "GCP"
+
+ // LibvirtPlatformType represents libvirt infrastructure.
+ LibvirtPlatformType PlatformType = "Libvirt"
+
+ // OpenStackPlatformType represents OpenStack infrastructure.
+ OpenStackPlatformType PlatformType = "OpenStack"
+
+ // NonePlatformType means there is no infrastructure provider.
+ NonePlatformType PlatformType = "None"
+
+ // VSpherePlatformType represents VMWare vSphere infrastructure.
+ VSpherePlatformType PlatformType = "VSphere"
+
+ // OvirtPlatformType represents oVirt/RHV infrastructure.
+ OvirtPlatformType PlatformType = "oVirt"
+)
+
+// PlatformStatus holds the current status specific to the underlying infrastructure provider
+// of the current cluster. Since these are used at status-level for the underlying cluster, it
+// is supposed that only one of the status structs is set.
+type PlatformStatus struct {
+ // type is the underlying infrastructure provider for the cluster. This
+ // value controls whether infrastructure automation such as service load
+ // balancers, dynamic volume provisioning, machine creation and deletion, and
+ // other integrations are enabled. If None, no infrastructure automation is
+ // enabled. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "Libvirt",
+ // "OpenStack", "VSphere", "oVirt", and "None". Individual components may not support
+ // all platforms, and must handle unrecognized platforms as None if they do
+ // not support that platform.
+ Type PlatformType `json:"type"`
+
+ // AWS contains settings specific to the Amazon Web Services infrastructure provider.
+ // +optional
+ AWS *AWSPlatformStatus `json:"aws,omitempty"`
+
+ // Azure contains settings specific to the Azure infrastructure provider.
+ // +optional
+ Azure *AzurePlatformStatus `json:"azure,omitempty"`
+
+ // GCP contains settings specific to the Google Cloud Platform infrastructure provider.
+ // +optional
+ GCP *GCPPlatformStatus `json:"gcp,omitempty"`
+
+ // BareMetal contains settings specific to the BareMetal platform.
+ // +optional
+ BareMetal *BareMetalPlatformStatus `json:"baremetal,omitempty"`
+
+ // OpenStack contains settings specific to the OpenStack infrastructure provider.
+ // +optional
+ OpenStack *OpenStackPlatformStatus `json:"openstack,omitempty"`
+}
+
+// AWSPlatformStatus holds the current status of the Amazon Web Services infrastructure provider.
+type AWSPlatformStatus struct {
+ // region holds the default AWS region for new AWS resources created by the cluster.
+ Region string `json:"region"`
+}
+
+// AzurePlatformStatus holds the current status of the Azure infrastructure provider.
+type AzurePlatformStatus struct {
+ // resourceGroupName is the Resource Group for new Azure resources created for the cluster.
+ ResourceGroupName string `json:"resourceGroupName"`
+}
+
+// GCPPlatformStatus holds the current status of the Google Cloud Platform infrastructure provider.
+type GCPPlatformStatus struct {
+ // resourceGroupName is the Project ID for new GCP resources created for the cluster.
+ ProjectID string `json:"projectID"`
+
+ // region holds the region for new GCP resources created for the cluster.
+ Region string `json:"region"`
+}
+
+// BareMetalPlatformStatus holds the current status of the BareMetal infrastructure provider.
+type BareMetalPlatformStatus struct {
+ // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used
+ // by components inside the cluster, like kubelets using the infrastructure rather
+ // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI
+ // points to. It is the IP for a self-hosted load balancer in front of the API servers.
+ APIServerInternalIP string `json:"apiServerInternalIP,omitempty"`
+
+ // ingressIP is an external IP which routes to the default ingress controller.
+ // The IP is a suitable target of a wildcard DNS record used to resolve default route host names.
+ IngressIP string `json:"ingressIP,omitempty"`
+
+ // nodeDNSIP is the IP address for the internal DNS used by the
+ // nodes. Unlike the one managed by the DNS operator, `NodeDNSIP`
+ // provides name resolution for the nodes themselves. There is no DNS-as-a-service for
+ // BareMetal deployments. In order to minimize necessary changes to the
+ // datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames
+ // to the nodes in the cluster.
+ NodeDNSIP string `json:"nodeDNSIP,omitempty"`
+}
+
+// OpenStackPlatformStatus holds the current status of the OpenStack infrastructure provider.
+type OpenStackPlatformStatus struct {
+ // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used
+ // by components inside the cluster, like kubelets using the infrastructure rather
+ // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI
+ // points to. It is the IP for a self-hosted load balancer in front of the API servers.
+ APIServerInternalIP string `json:"apiServerInternalIP,omitempty"`
+
+ // cloudName is the name of the desired OpenStack cloud in the
+ // client configuration file (`clouds.yaml`).
+ CloudName string `json:"cloudName,omitempty"`
+
+ // ingressIP is an external IP which routes to the default ingress controller.
+ // The IP is a suitable target of a wildcard DNS record used to resolve default route host names.
+ IngressIP string `json:"ingressIP,omitempty"`
+
+ // nodeDNSIP is the IP address for the internal DNS used by the
+ // nodes. Unlike the one managed by the DNS operator, `NodeDNSIP`
+ // provides name resolution for the nodes themselves. There is no DNS-as-a-service for
+ // OpenStack deployments. In order to minimize necessary changes to the
+ // datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames
+ // to the nodes in the cluster.
+ NodeDNSIP string `json:"nodeDNSIP,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// InfrastructureList is
+type InfrastructureList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ metav1.ListMeta `json:"metadata"`
+ Items []Infrastructure `json:"items"`
+}
diff --git a/vendor/github.com/openshift/api/config/v1/types_ingress.go b/vendor/github.com/openshift/api/config/v1/types_ingress.go
new file mode 100644
index 000000000..484a1af0b
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/types_ingress.go
@@ -0,0 +1,42 @@
+package v1
+
+import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Ingress holds cluster-wide information about Ingress. The canonical name is `cluster`
+// TODO this object is an example of a possible grouping and is subject to change or removal
+type Ingress struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // spec holds user settable values for configuration
+ // +kubebuilder:validation:Required
+ // +required
+ Spec IngressSpec `json:"spec"`
+ // status holds observed values from the cluster. They may not be overridden.
+ // +optional
+ Status IngressStatus `json:"status"`
+}
+
+type IngressSpec struct {
+ // domain is used to generate a default host name for a route when the
+ // route's host name is empty. The generated host name will follow this
+ // pattern: "<route-name>.<route-namespace>.<domain>".
+ Domain string `json:"domain"`
+}
+
+type IngressStatus struct {
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+type IngressList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ metav1.ListMeta `json:"metadata"`
+ Items []Ingress `json:"items"`
+}
diff --git a/vendor/github.com/openshift/api/config/v1/types_network.go b/vendor/github.com/openshift/api/config/v1/types_network.go
new file mode 100644
index 000000000..a60c5f7dc
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/types_network.go
@@ -0,0 +1,117 @@
+package v1
+
+import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Network holds cluster-wide information about Network. The canonical name is `cluster`
+// TODO this object is an example of a possible grouping and is subject to change or removal
+type Network struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // spec holds user settable values for configuration.
+ // +kubebuilder:validation:Required
+ // +required
+ Spec NetworkSpec `json:"spec"`
+ // status holds observed values from the cluster. They may not be overridden.
+ // +optional
+ Status NetworkStatus `json:"status"`
+}
+
+// NetworkSpec is the desired network configuration.
+// As a general rule, this SHOULD NOT be read directly. Instead, you should
+// consume the NetworkStatus, as it indicates the currently deployed configuration.
+// Currently, changing ClusterNetwork, ServiceNetwork, or NetworkType after
+// installation is not supported.
+type NetworkSpec struct {
+ // IP address pool to use for pod IPs.
+ ClusterNetwork []ClusterNetworkEntry `json:"clusterNetwork"`
+
+ // IP address pool for services.
+ // Currently, we only support a single entry here.
+ ServiceNetwork []string `json:"serviceNetwork"`
+
+ // NetworkType is the plugin that is to be deployed (e.g. OpenShiftSDN).
+ // This should match a value that the cluster-network-operator understands,
+ // or else no networking will be installed.
+ // Currently supported values are:
+ // - OpenShiftSDN
+ NetworkType string `json:"networkType"`
+
+ // externalIP defines configuration for controllers that
+ // affect Service.ExternalIP
+ // +optional
+ ExternalIP *ExternalIPConfig `json:"externalIP,omitempty"`
+}
+
+// NetworkStatus is the current network configuration.
+type NetworkStatus struct {
+ // IP address pool to use for pod IPs.
+ ClusterNetwork []ClusterNetworkEntry `json:"clusterNetwork,omitempty"`
+
+ // IP address pool for services.
+ // Currently, we only support a single entry here.
+ ServiceNetwork []string `json:"serviceNetwork,omitempty"`
+
+ // NetworkType is the plugin that is deployed (e.g. OpenShiftSDN).
+ NetworkType string `json:"networkType,omitempty"`
+
+ // ClusterNetworkMTU is the MTU for inter-pod networking.
+ ClusterNetworkMTU int `json:"clusterNetworkMTU,omitempty"`
+}
+
+// ClusterNetworkEntry is a contiguous block of IP addresses from which pod IPs
+// are allocated.
+type ClusterNetworkEntry struct {
+ // The complete block for pod IPs.
+ CIDR string `json:"cidr"`
+
+ // The size (prefix) of block to allocate to each node.
+ HostPrefix uint32 `json:"hostPrefix"`
+}
+
+// ExternalIPConfig specifies some IP blocks relevant for the ExternalIP field
+// of a Service resource.
+type ExternalIPConfig struct {
+ // policy is a set of restrictions applied to the ExternalIP field.
+ // If nil, any value is allowed for an ExternalIP. If the empty/zero
+ // policy is supplied, then ExternalIP is not allowed to be set.
+ // +optional
+ Policy *ExternalIPPolicy `json:"policy,omitempty"`
+
+ // autoAssignCIDRs is a list of CIDRs from which to automatically assign
+ // Service.ExternalIP. These are assigned when the service is of type
+ // LoadBalancer. In general, this is only useful for bare-metal clusters.
+ // In Openshift 3.x, this was misleadingly called "IngressIPs".
+ // Automatically assigned External IPs are not affected by any
+ // ExternalIPPolicy rules.
+ // Currently, only one entry may be provided.
+ // +optional
+ AutoAssignCIDRs []string `json:"autoAssignCIDRs,omitempty"`
+}
+
+// ExternalIPPolicy configures exactly which IPs are allowed for the ExternalIP
+// field in a Service. If the zero struct is supplied, then none are permitted.
+// The policy controller always allows automatically assigned external IPs.
+type ExternalIPPolicy struct {
+ // allowedCIDRs is the list of allowed CIDRs.
+ AllowedCIDRs []string `json:"allowedCIDRs,omitempty"`
+
+ // rejectedCIDRs is the list of disallowed CIDRs. These take precedence
+ // over allowedCIDRs.
+ // +optional
+ RejectedCIDRs []string `json:"rejectedCIDRs,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+type NetworkList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ metav1.ListMeta `json:"metadata"`
+ Items []Network `json:"items"`
+}
diff --git a/vendor/github.com/openshift/api/config/v1/types_oauth.go b/vendor/github.com/openshift/api/config/v1/types_oauth.go
new file mode 100644
index 000000000..15bc5b1c1
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/types_oauth.go
@@ -0,0 +1,557 @@
+package v1
+
+import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+// OAuth Server and Identity Provider Config
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// OAuth holds cluster-wide information about OAuth. The canonical name is `cluster`.
+// It is used to configure the integrated OAuth server.
+// This configuration is only honored when the top level Authentication config has type set to IntegratedOAuth.
+type OAuth struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata"`
+
+ // +kubebuilder:validation:Required
+ // +required
+ Spec OAuthSpec `json:"spec"`
+ // +optional
+ Status OAuthStatus `json:"status"`
+}
+
+// OAuthSpec contains desired cluster auth configuration
+type OAuthSpec struct {
+ // identityProviders is an ordered list of ways for a user to identify themselves.
+ // When this list is empty, no identities are provisioned for users.
+ // +optional
+ IdentityProviders []IdentityProvider `json:"identityProviders,omitempty"`
+
+ // tokenConfig contains options for authorization and access tokens
+ TokenConfig TokenConfig `json:"tokenConfig"`
+
+ // templates allow you to customize pages like the login page.
+ // +optional
+ Templates OAuthTemplates `json:"templates"`
+}
+
+// OAuthStatus shows current known state of OAuth server in the cluster
+type OAuthStatus struct {
+ // TODO Fill in with status of identityProviders and templates (and maybe tokenConfig)
+}
+
+// TokenConfig holds the necessary configuration options for authorization and access tokens
+type TokenConfig struct {
+ // accessTokenMaxAgeSeconds defines the maximum age of access tokens
+ AccessTokenMaxAgeSeconds int32 `json:"accessTokenMaxAgeSeconds"`
+
+ // accessTokenInactivityTimeoutSeconds defines the default token
+ // inactivity timeout for tokens granted by any client.
+ // The value represents the maximum amount of time that can occur between
+ // consecutive uses of the token. Tokens become invalid if they are not
+ // used within this temporal window. The user will need to acquire a new
+ // token to regain access once a token times out.
+ // Valid values are integer values:
+ // x < 0 Tokens time out is enabled but tokens never timeout unless configured per client (e.g. `-1`)
+ // x = 0 Tokens time out is disabled (default)
+ // x > 0 Tokens time out if there is no activity for x seconds
+ // The current minimum allowed value for X is 300 (5 minutes)
+ // +optional
+ AccessTokenInactivityTimeoutSeconds int32 `json:"accessTokenInactivityTimeoutSeconds,omitempty"`
+}
+
+const (
+ // LoginTemplateKey is the key of the login template in a secret
+ LoginTemplateKey = "login.html"
+
+ // ProviderSelectionTemplateKey is the key for the provider selection template in a secret
+ ProviderSelectionTemplateKey = "providers.html"
+
+ // ErrorsTemplateKey is the key for the errors template in a secret
+ ErrorsTemplateKey = "errors.html"
+
+ // BindPasswordKey is the key for the LDAP bind password in a secret
+ BindPasswordKey = "bindPassword"
+
+ // ClientSecretKey is the key for the oauth client secret data in a secret
+ ClientSecretKey = "clientSecret"
+
+ // HTPasswdDataKey is the key for the htpasswd file data in a secret
+ HTPasswdDataKey = "htpasswd"
+)
+
+// OAuthTemplates allow for customization of pages like the login page
+type OAuthTemplates struct {
+ // login is the name of a secret that specifies a go template to use to render the login page.
+ // The key "login.html" is used to locate the template data.
+ // If specified and the secret or expected key is not found, the default login page is used.
+ // If the specified template is not valid, the default login page is used.
+ // If unspecified, the default login page is used.
+ // The namespace for this secret is openshift-config.
+ // +optional
+ Login SecretNameReference `json:"login"`
+
+ // providerSelection is the name of a secret that specifies a go template to use to render
+ // the provider selection page.
+ // The key "providers.html" is used to locate the template data.
+ // If specified and the secret or expected key is not found, the default provider selection page is used.
+ // If the specified template is not valid, the default provider selection page is used.
+ // If unspecified, the default provider selection page is used.
+ // The namespace for this secret is openshift-config.
+ // +optional
+ ProviderSelection SecretNameReference `json:"providerSelection"`
+
+ // error is the name of a secret that specifies a go template to use to render error pages
+ // during the authentication or grant flow.
+ // The key "errors.html" is used to locate the template data.
+ // If specified and the secret or expected key is not found, the default error page is used.
+ // If the specified template is not valid, the default error page is used.
+ // If unspecified, the default error page is used.
+ // The namespace for this secret is openshift-config.
+ // +optional
+ Error SecretNameReference `json:"error"`
+}
+
+// IdentityProvider provides identities for users authenticating using credentials
+type IdentityProvider struct {
+ // name is used to qualify the identities returned by this provider.
+ // - It MUST be unique and not shared by any other identity provider used
+ // - It MUST be a valid path segment: name cannot equal "." or ".." or contain "/" or "%" or ":"
+ // Ref: https://godoc.org/github.com/openshift/origin/pkg/user/apis/user/validation#ValidateIdentityProviderName
+ Name string `json:"name"`
+
+ // mappingMethod determines how identities from this provider are mapped to users
+ // Defaults to "claim"
+ // +optional
+ MappingMethod MappingMethodType `json:"mappingMethod,omitempty"`
+
+ IdentityProviderConfig `json:",inline"`
+}
+
+// MappingMethodType specifies how new identities should be mapped to users when they log in
+type MappingMethodType string
+
+const (
+ // MappingMethodClaim provisions a user with the identity’s preferred user name. Fails if a user
+ // with that user name is already mapped to another identity.
+ // Default.
+ MappingMethodClaim MappingMethodType = "claim"
+
+ // MappingMethodLookup looks up existing users already mapped to an identity but does not
+ // automatically provision users or identities. Requires identities and users be set up
+ // manually or using an external process.
+ MappingMethodLookup MappingMethodType = "lookup"
+
+ // MappingMethodAdd provisions a user with the identity’s preferred user name. If a user with
+ // that user name already exists, the identity is mapped to the existing user, adding to any
+ // existing identity mappings for the user.
+ MappingMethodAdd MappingMethodType = "add"
+)
+
+type IdentityProviderType string
+
+const (
+ // IdentityProviderTypeBasicAuth provides identities for users authenticating with HTTP Basic Auth
+ IdentityProviderTypeBasicAuth IdentityProviderType = "BasicAuth"
+
+ // IdentityProviderTypeGitHub provides identities for users authenticating using GitHub credentials
+ IdentityProviderTypeGitHub IdentityProviderType = "GitHub"
+
+ // IdentityProviderTypeGitLab provides identities for users authenticating using GitLab credentials
+ IdentityProviderTypeGitLab IdentityProviderType = "GitLab"
+
+ // IdentityProviderTypeGoogle provides identities for users authenticating using Google credentials
+ IdentityProviderTypeGoogle IdentityProviderType = "Google"
+
+ // IdentityProviderTypeHTPasswd provides identities from an HTPasswd file
+ IdentityProviderTypeHTPasswd IdentityProviderType = "HTPasswd"
+
+ // IdentityProviderTypeKeystone provides identitities for users authenticating using keystone password credentials
+ IdentityProviderTypeKeystone IdentityProviderType = "Keystone"
+
+ // IdentityProviderTypeLDAP provides identities for users authenticating using LDAP credentials
+ IdentityProviderTypeLDAP IdentityProviderType = "LDAP"
+
+ // IdentityProviderTypeOpenID provides identities for users authenticating using OpenID credentials
+ IdentityProviderTypeOpenID IdentityProviderType = "OpenID"
+
+ // IdentityProviderTypeRequestHeader provides identities for users authenticating using request header credentials
+ IdentityProviderTypeRequestHeader IdentityProviderType = "RequestHeader"
+)
+
+// IdentityProviderConfig contains configuration for using a specific identity provider
+type IdentityProviderConfig struct {
+ // type identifies the identity provider type for this entry.
+ Type IdentityProviderType `json:"type"`
+
+ // Provider-specific configuration
+ // The json tag MUST match the `Type` specified above, case-insensitively
+ // e.g. For `Type: "LDAP"`, the `ldap` configuration should be provided
+
+ // basicAuth contains configuration options for the BasicAuth IdP
+ // +optional
+ BasicAuth *BasicAuthIdentityProvider `json:"basicAuth,omitempty"`
+
+ // github enables user authentication using GitHub credentials
+ // +optional
+ GitHub *GitHubIdentityProvider `json:"github,omitempty"`
+
+ // gitlab enables user authentication using GitLab credentials
+ // +optional
+ GitLab *GitLabIdentityProvider `json:"gitlab,omitempty"`
+
+ // google enables user authentication using Google credentials
+ // +optional
+ Google *GoogleIdentityProvider `json:"google,omitempty"`
+
+ // htpasswd enables user authentication using an HTPasswd file to validate credentials
+ // +optional
+ HTPasswd *HTPasswdIdentityProvider `json:"htpasswd,omitempty"`
+
+ // keystone enables user authentication using keystone password credentials
+ // +optional
+ Keystone *KeystoneIdentityProvider `json:"keystone,omitempty"`
+
+ // ldap enables user authentication using LDAP credentials
+ // +optional
+ LDAP *LDAPIdentityProvider `json:"ldap,omitempty"`
+
+ // openID enables user authentication using OpenID credentials
+ // +optional
+ OpenID *OpenIDIdentityProvider `json:"openID,omitempty"`
+
+ // requestHeader enables user authentication using request header credentials
+ // +optional
+ RequestHeader *RequestHeaderIdentityProvider `json:"requestHeader,omitempty"`
+}
+
+// BasicAuthPasswordIdentityProvider provides identities for users authenticating using HTTP basic auth credentials
+type BasicAuthIdentityProvider struct {
+ // OAuthRemoteConnectionInfo contains information about how to connect to the external basic auth server
+ OAuthRemoteConnectionInfo `json:",inline"`
+}
+
+// OAuthRemoteConnectionInfo holds information necessary for establishing a remote connection
+type OAuthRemoteConnectionInfo struct {
+ // url is the remote URL to connect to
+ URL string `json:"url"`
+
+ // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle.
+ // It is used as a trust anchor to validate the TLS certificate presented by the remote server.
+ // The key "ca.crt" is used to locate the data.
+ // If specified and the config map or expected key is not found, the identity provider is not honored.
+ // If the specified ca data is not valid, the identity provider is not honored.
+ // If empty, the default system roots are used.
+ // The namespace for this config map is openshift-config.
+ // +optional
+ CA ConfigMapNameReference `json:"ca"`
+
+ // tlsClientCert is an optional reference to a secret by name that contains the
+ // PEM-encoded TLS client certificate to present when connecting to the server.
+ // The key "tls.crt" is used to locate the data.
+ // If specified and the secret or expected key is not found, the identity provider is not honored.
+ // If the specified certificate data is not valid, the identity provider is not honored.
+ // The namespace for this secret is openshift-config.
+ // +optional
+ TLSClientCert SecretNameReference `json:"tlsClientCert"`
+
+ // tlsClientKey is an optional reference to a secret by name that contains the
+ // PEM-encoded TLS private key for the client certificate referenced in tlsClientCert.
+ // The key "tls.key" is used to locate the data.
+ // If specified and the secret or expected key is not found, the identity provider is not honored.
+ // If the specified certificate data is not valid, the identity provider is not honored.
+ // The namespace for this secret is openshift-config.
+ // +optional
+ TLSClientKey SecretNameReference `json:"tlsClientKey"`
+}
+
+// HTPasswdPasswordIdentityProvider provides identities for users authenticating using htpasswd credentials
+type HTPasswdIdentityProvider struct {
+ // fileData is a required reference to a secret by name containing the data to use as the htpasswd file.
+ // The key "htpasswd" is used to locate the data.
+ // If the secret or expected key is not found, the identity provider is not honored.
+ // If the specified htpasswd data is not valid, the identity provider is not honored.
+ // The namespace for this secret is openshift-config.
+ FileData SecretNameReference `json:"fileData"`
+}
+
+// LDAPPasswordIdentityProvider provides identities for users authenticating using LDAP credentials
+type LDAPIdentityProvider struct {
+ // url is an RFC 2255 URL which specifies the LDAP search parameters to use.
+ // The syntax of the URL is:
+ // ldap://host:port/basedn?attribute?scope?filter
+ URL string `json:"url"`
+
+ // bindDN is an optional DN to bind with during the search phase.
+ // +optional
+ BindDN string `json:"bindDN"`
+
+ // bindPassword is an optional reference to a secret by name
+ // containing a password to bind with during the search phase.
+ // The key "bindPassword" is used to locate the data.
+ // If specified and the secret or expected key is not found, the identity provider is not honored.
+ // The namespace for this secret is openshift-config.
+ // +optional
+ BindPassword SecretNameReference `json:"bindPassword"`
+
+ // insecure, if true, indicates the connection should not use TLS
+ // WARNING: Should not be set to `true` with the URL scheme "ldaps://" as "ldaps://" URLs always
+ // attempt to connect using TLS, even when `insecure` is set to `true`
+ // When `true`, "ldap://" URLS connect insecurely. When `false`, "ldap://" URLs are upgraded to
+ // a TLS connection using StartTLS as specified in https://tools.ietf.org/html/rfc2830.
+ Insecure bool `json:"insecure"`
+
+ // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle.
+ // It is used as a trust anchor to validate the TLS certificate presented by the remote server.
+ // The key "ca.crt" is used to locate the data.
+ // If specified and the config map or expected key is not found, the identity provider is not honored.
+ // If the specified ca data is not valid, the identity provider is not honored.
+ // If empty, the default system roots are used.
+ // The namespace for this config map is openshift-config.
+ // +optional
+ CA ConfigMapNameReference `json:"ca"`
+
+ // attributes maps LDAP attributes to identities
+ Attributes LDAPAttributeMapping `json:"attributes"`
+}
+
+// LDAPAttributeMapping maps LDAP attributes to OpenShift identity fields
+type LDAPAttributeMapping struct {
+ // id is the list of attributes whose values should be used as the user ID. Required.
+ // First non-empty attribute is used. At least one attribute is required. If none of the listed
+ // attribute have a value, authentication fails.
+ // LDAP standard identity attribute is "dn"
+ ID []string `json:"id"`
+
+ // preferredUsername is the list of attributes whose values should be used as the preferred username.
+ // LDAP standard login attribute is "uid"
+ // +optional
+ PreferredUsername []string `json:"preferredUsername,omitempty"`
+
+ // name is the list of attributes whose values should be used as the display name. Optional.
+ // If unspecified, no display name is set for the identity
+ // LDAP standard display name attribute is "cn"
+ // +optional
+ Name []string `json:"name,omitempty"`
+
+ // email is the list of attributes whose values should be used as the email address. Optional.
+ // If unspecified, no email is set for the identity
+ // +optional
+ Email []string `json:"email,omitempty"`
+}
+
+// KeystonePasswordIdentityProvider provides identities for users authenticating using keystone password credentials
+type KeystoneIdentityProvider struct {
+ // OAuthRemoteConnectionInfo contains information about how to connect to the keystone server
+ OAuthRemoteConnectionInfo `json:",inline"`
+
+ // domainName is required for keystone v3
+ DomainName string `json:"domainName"`
+
+ // TODO if we ever add support for 3.11 to 4.0 upgrades, add this configuration
+ // useUsernameIdentity indicates that users should be authenticated by username, not keystone ID
+ // DEPRECATED - only use this option for legacy systems to ensure backwards compatibility
+ // +optional
+ // UseUsernameIdentity bool `json:"useUsernameIdentity"`
+}
+
+// RequestHeaderIdentityProvider provides identities for users authenticating using request header credentials
+type RequestHeaderIdentityProvider struct {
+ // loginURL is a URL to redirect unauthenticated /authorize requests to
+ // Unauthenticated requests from OAuth clients which expect interactive logins will be redirected here
+ // ${url} is replaced with the current URL, escaped to be safe in a query parameter
+ // https://www.example.com/sso-login?then=${url}
+ // ${query} is replaced with the current query string
+ // https://www.example.com/auth-proxy/oauth/authorize?${query}
+ // Required when login is set to true.
+ LoginURL string `json:"loginURL"`
+
+ // challengeURL is a URL to redirect unauthenticated /authorize requests to
+ // Unauthenticated requests from OAuth clients which expect WWW-Authenticate challenges will be
+ // redirected here.
+ // ${url} is replaced with the current URL, escaped to be safe in a query parameter
+ // https://www.example.com/sso-login?then=${url}
+ // ${query} is replaced with the current query string
+ // https://www.example.com/auth-proxy/oauth/authorize?${query}
+ // Required when challenge is set to true.
+ ChallengeURL string `json:"challengeURL"`
+
+ // ca is a required reference to a config map by name containing the PEM-encoded CA bundle.
+ // It is used as a trust anchor to validate the TLS certificate presented by the remote server.
+ // Specifically, it allows verification of incoming requests to prevent header spoofing.
+ // The key "ca.crt" is used to locate the data.
+ // If the config map or expected key is not found, the identity provider is not honored.
+ // If the specified ca data is not valid, the identity provider is not honored.
+ // The namespace for this config map is openshift-config.
+ ClientCA ConfigMapNameReference `json:"ca"`
+
+ // clientCommonNames is an optional list of common names to require a match from. If empty, any
+ // client certificate validated against the clientCA bundle is considered authoritative.
+ // +optional
+ ClientCommonNames []string `json:"clientCommonNames,omitempty"`
+
+ // headers is the set of headers to check for identity information
+ Headers []string `json:"headers"`
+
+ // preferredUsernameHeaders is the set of headers to check for the preferred username
+ PreferredUsernameHeaders []string `json:"preferredUsernameHeaders"`
+
+ // nameHeaders is the set of headers to check for the display name
+ NameHeaders []string `json:"nameHeaders"`
+
+ // emailHeaders is the set of headers to check for the email address
+ EmailHeaders []string `json:"emailHeaders"`
+}
+
+// GitHubIdentityProvider provides identities for users authenticating using GitHub credentials
+type GitHubIdentityProvider struct {
+ // clientID is the oauth client ID
+ ClientID string `json:"clientID"`
+
+ // clientSecret is a required reference to the secret by name containing the oauth client secret.
+ // The key "clientSecret" is used to locate the data.
+ // If the secret or expected key is not found, the identity provider is not honored.
+ // The namespace for this secret is openshift-config.
+ ClientSecret SecretNameReference `json:"clientSecret"`
+
+ // organizations optionally restricts which organizations are allowed to log in
+ // +optional
+ Organizations []string `json:"organizations,omitempty"`
+
+ // teams optionally restricts which teams are allowed to log in. Format is <org>/<team>.
+ // +optional
+ Teams []string `json:"teams,omitempty"`
+
+ // hostname is the optional domain (e.g. "mycompany.com") for use with a hosted instance of
+ // GitHub Enterprise.
+ // It must match the GitHub Enterprise settings value configured at /setup/settings#hostname.
+ // +optional
+ Hostname string `json:"hostname"`
+
+ // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle.
+ // It is used as a trust anchor to validate the TLS certificate presented by the remote server.
+ // The key "ca.crt" is used to locate the data.
+ // If specified and the config map or expected key is not found, the identity provider is not honored.
+ // If the specified ca data is not valid, the identity provider is not honored.
+ // If empty, the default system roots are used.
+ // This can only be configured when hostname is set to a non-empty value.
+ // The namespace for this config map is openshift-config.
+ // +optional
+ CA ConfigMapNameReference `json:"ca"`
+}
+
+// GitLabIdentityProvider provides identities for users authenticating using GitLab credentials
+type GitLabIdentityProvider struct {
+ // clientID is the oauth client ID
+ ClientID string `json:"clientID"`
+
+ // clientSecret is a required reference to the secret by name containing the oauth client secret.
+ // The key "clientSecret" is used to locate the data.
+ // If the secret or expected key is not found, the identity provider is not honored.
+ // The namespace for this secret is openshift-config.
+ ClientSecret SecretNameReference `json:"clientSecret"`
+
+ // url is the oauth server base URL
+ URL string `json:"url"`
+
+ // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle.
+ // It is used as a trust anchor to validate the TLS certificate presented by the remote server.
+ // The key "ca.crt" is used to locate the data.
+ // If specified and the config map or expected key is not found, the identity provider is not honored.
+ // If the specified ca data is not valid, the identity provider is not honored.
+ // If empty, the default system roots are used.
+ // The namespace for this config map is openshift-config.
+ // +optional
+ CA ConfigMapNameReference `json:"ca"`
+}
+
+// GoogleIdentityProvider provides identities for users authenticating using Google credentials
+type GoogleIdentityProvider struct {
+ // clientID is the oauth client ID
+ ClientID string `json:"clientID"`
+
+ // clientSecret is a required reference to the secret by name containing the oauth client secret.
+ // The key "clientSecret" is used to locate the data.
+ // If the secret or expected key is not found, the identity provider is not honored.
+ // The namespace for this secret is openshift-config.
+ ClientSecret SecretNameReference `json:"clientSecret"`
+
+ // hostedDomain is the optional Google App domain (e.g. "mycompany.com") to restrict logins to
+ // +optional
+ HostedDomain string `json:"hostedDomain"`
+}
+
+// OpenIDIdentityProvider provides identities for users authenticating using OpenID credentials
+type OpenIDIdentityProvider struct {
+ // clientID is the oauth client ID
+ ClientID string `json:"clientID"`
+
+ // clientSecret is a required reference to the secret by name containing the oauth client secret.
+ // The key "clientSecret" is used to locate the data.
+ // If the secret or expected key is not found, the identity provider is not honored.
+ // The namespace for this secret is openshift-config.
+ ClientSecret SecretNameReference `json:"clientSecret"`
+
+ // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle.
+ // It is used as a trust anchor to validate the TLS certificate presented by the remote server.
+ // The key "ca.crt" is used to locate the data.
+ // If specified and the config map or expected key is not found, the identity provider is not honored.
+ // If the specified ca data is not valid, the identity provider is not honored.
+ // If empty, the default system roots are used.
+ // The namespace for this config map is openshift-config.
+ // +optional
+ CA ConfigMapNameReference `json:"ca"`
+
+ // extraScopes are any scopes to request in addition to the standard "openid" scope.
+ // +optional
+ ExtraScopes []string `json:"extraScopes,omitempty"`
+
+ // extraAuthorizeParameters are any custom parameters to add to the authorize request.
+ // +optional
+ ExtraAuthorizeParameters map[string]string `json:"extraAuthorizeParameters,omitempty"`
+
+ // issuer is the URL that the OpenID Provider asserts as its Issuer Identifier.
+ // It must use the https scheme with no query or fragment component.
+ Issuer string `json:"issuer"`
+
+ // claims mappings
+ Claims OpenIDClaims `json:"claims"`
+}
+
+// UserIDClaim is the claim used to provide a stable identifier for OIDC identities.
+// Per http://openid.net/specs/openid-connect-core-1_0.html#ClaimStability
+// "The sub (subject) and iss (issuer) Claims, used together, are the only Claims that an RP can
+// rely upon as a stable identifier for the End-User, since the sub Claim MUST be locally unique
+// and never reassigned within the Issuer for a particular End-User, as described in Section 2.
+// Therefore, the only guaranteed unique identifier for a given End-User is the combination of the
+// iss Claim and the sub Claim."
+const UserIDClaim = "sub"
+
+// OpenIDClaims contains a list of OpenID claims to use when authenticating with an OpenID identity provider
+type OpenIDClaims struct {
+ // preferredUsername is the list of claims whose values should be used as the preferred username.
+ // If unspecified, the preferred username is determined from the value of the sub claim
+ // +optional
+ PreferredUsername []string `json:"preferredUsername,omitempty"`
+
+ // name is the list of claims whose values should be used as the display name. Optional.
+ // If unspecified, no display name is set for the identity
+ // +optional
+ Name []string `json:"name,omitempty"`
+
+ // email is the list of claims whose values should be used as the email address. Optional.
+ // If unspecified, no email is set for the identity
+ // +optional
+ Email []string `json:"email,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+type OAuthList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata"`
+
+ Items []OAuth `json:"items"`
+}
diff --git a/vendor/github.com/openshift/api/config/v1/types_operatorhub.go b/vendor/github.com/openshift/api/config/v1/types_operatorhub.go
new file mode 100644
index 000000000..cf821f9e3
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/types_operatorhub.go
@@ -0,0 +1,69 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// OperatorHubSpec defines the desired state of OperatorHub
+type OperatorHubSpec struct {
+ // sources is the list of default hub sources and their configuration.
+ // If the list is empty, it indicates that the default hub sources are
+ // enabled on the cluster. The list of default hub sources and their
+ // current state will always be reflected in the status block.
+ // +optional
+ Sources []HubSource `json:"sources,omitempty"`
+}
+
+// OperatorHubStatus defines the observed state of OperatorHub. The current
+// state of the default hub sources will always be reflected here.
+type OperatorHubStatus struct {
+ // sources encapsulates the result of applying the configuration for each
+ // hub source
+ Sources []HubSourceStatus `json:"sources,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// OperatorHub is the Schema for the operatorhubs API. It can be used to change
+// the state of the default hub sources for OperatorHub on the cluster from
+// enabled to disabled and vice versa.
+// +kubebuilder:subresource:status
+// +genclient:nonNamespaced
+type OperatorHub struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata"`
+
+ Spec OperatorHubSpec `json:"spec"`
+ Status OperatorHubStatus `json:"status"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// OperatorHubList contains a list of OperatorHub
+type OperatorHubList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata"`
+ Items []OperatorHub `json:"items"`
+}
+
+// HubSource is used to specify the hub source and its configuration
+type HubSource struct {
+ // name is the name of one of the default hub sources
+ // +kubebuilder:validation:MaxLength=253
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:Required
+ Name string `json:"name"`
+ // disabled is used to disable a default hub source on cluster
+ // +kubebuilder:Required
+ Disabled bool `json:"disabled"`
+}
+
+// HubSourceStatus is used to reflect the current state of applying the
+// configuration to a default source
+type HubSourceStatus struct {
+ HubSource
+ // status indicates success or failure in applying the configuration
+ Status string `json:"status"`
+ // message provides more information regarding failures
+ Message string `json:"message,omitempty"`
+}
diff --git a/vendor/github.com/openshift/api/config/v1/types_project.go b/vendor/github.com/openshift/api/config/v1/types_project.go
new file mode 100644
index 000000000..61152a6f7
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/types_project.go
@@ -0,0 +1,55 @@
+package v1
+
+import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Project holds cluster-wide information about Project. The canonical name is `cluster`
+type Project struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // spec holds user settable values for configuration
+ // +kubebuilder:validation:Required
+ // +required
+ Spec ProjectSpec `json:"spec"`
+ // status holds observed values from the cluster. They may not be overridden.
+ // +optional
+ Status ProjectStatus `json:"status"`
+}
+
+// TemplateReference references a template in a specific namespace.
+// The namespace must be specified at the point of use.
+type TemplateReference struct {
+ // name is the metadata.name of the referenced project request template
+ Name string `json:"name"`
+}
+
+// ProjectSpec holds the project creation configuration.
+type ProjectSpec struct {
+ // projectRequestMessage is the string presented to a user if they are unable to request a project via the projectrequest api endpoint
+ // +optional
+ ProjectRequestMessage string `json:"projectRequestMessage"`
+
+ // projectRequestTemplate is the template to use for creating projects in response to projectrequest.
+ // This must point to a template in 'openshift-config' namespace. It is optional.
+ // If it is not specified, a default template is used.
+ //
+ // +optional
+ ProjectRequestTemplate TemplateReference `json:"projectRequestTemplate"`
+}
+
+type ProjectStatus struct {
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+type ProjectList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ metav1.ListMeta `json:"metadata"`
+ Items []Project `json:"items"`
+}
diff --git a/vendor/github.com/openshift/api/config/v1/types_proxy.go b/vendor/github.com/openshift/api/config/v1/types_proxy.go
new file mode 100644
index 000000000..1413a48ca
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/types_proxy.go
@@ -0,0 +1,89 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Proxy holds cluster-wide information on how to configure default proxies for the cluster. The canonical name is `cluster`
+type Proxy struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+ // Spec holds user-settable values for the proxy configuration
+ // +kubebuilder:validation:Required
+ // +required
+ Spec ProxySpec `json:"spec"`
+ // status holds observed values from the cluster. They may not be overridden.
+ // +optional
+ Status ProxyStatus `json:"status"`
+}
+
+// ProxySpec contains cluster proxy creation configuration.
+type ProxySpec struct {
+ // httpProxy is the URL of the proxy for HTTP requests. Empty means unset and will not result in an env var.
+ // +optional
+ HTTPProxy string `json:"httpProxy,omitempty"`
+
+ // httpsProxy is the URL of the proxy for HTTPS requests. Empty means unset and will not result in an env var.
+ // +optional
+ HTTPSProxy string `json:"httpsProxy,omitempty"`
+
+ // noProxy is a comma-separated list of hostnames and/or CIDRs for which the proxy should not be used.
+ // Empty means unset and will not result in an env var.
+ // +optional
+ NoProxy string `json:"noProxy,omitempty"`
+
+ // readinessEndpoints is a list of endpoints used to verify readiness of the proxy.
+ // +optional
+ ReadinessEndpoints []string `json:"readinessEndpoints,omitempty"`
+
+ // trustedCA is a reference to a ConfigMap containing a CA certificate bundle used
+ // for client egress HTTPS connections. The certificate bundle must be from the CA
+ // that signed the proxy's certificate and be signed for everything. The trustedCA
+ // field should only be consumed by a proxy validator. The validator is responsible
+ // for reading the certificate bundle from required key "ca-bundle.crt" and copying
+ // it to a ConfigMap named "trusted-ca-bundle" in the "openshift-config-managed"
+ // namespace. The namespace for the ConfigMap referenced by trustedCA is
+ // "openshift-config". Here is an example ConfigMap (in yaml):
+ //
+ // apiVersion: v1
+ // kind: ConfigMap
+ // metadata:
+ // name: user-ca-bundle
+ // namespace: openshift-config
+ // data:
+ // ca-bundle.crt: |
+ // -----BEGIN CERTIFICATE-----
+ // Custom CA certificate bundle.
+ // -----END CERTIFICATE-----
+ //
+ // +optional
+ TrustedCA ConfigMapNameReference `json:"trustedCA,omitempty"`
+}
+
+// ProxyStatus shows current known state of the cluster proxy.
+type ProxyStatus struct {
+ // httpProxy is the URL of the proxy for HTTP requests.
+ // +optional
+ HTTPProxy string `json:"httpProxy,omitempty"`
+
+ // httpsProxy is the URL of the proxy for HTTPS requests.
+ // +optional
+ HTTPSProxy string `json:"httpsProxy,omitempty"`
+
+ // noProxy is a comma-separated list of hostnames and/or CIDRs for which the proxy should not be used.
+ // +optional
+ NoProxy string `json:"noProxy,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+type ProxyList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ metav1.ListMeta `json:"metadata"`
+ Items []Proxy `json:"items"`
+}
diff --git a/vendor/github.com/openshift/api/config/v1/types_scheduling.go b/vendor/github.com/openshift/api/config/v1/types_scheduling.go
new file mode 100644
index 000000000..9b8fa3a52
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/types_scheduling.go
@@ -0,0 +1,75 @@
+package v1
+
+import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Scheduler holds cluster-wide config information to run the Kubernetes Scheduler
+// and influence its placement decisions. The canonical name for this config is `cluster`.
+type Scheduler struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // spec holds user settable values for configuration
+ // +kubebuilder:validation:Required
+ // +required
+ Spec SchedulerSpec `json:"spec"`
+ // status holds observed values from the cluster. They may not be overridden.
+ // +optional
+ Status SchedulerStatus `json:"status"`
+}
+
+type SchedulerSpec struct {
+ // policy is a reference to a ConfigMap containing scheduler policy which has
+ // user specified predicates and priorities. If this ConfigMap is not available
+ // scheduler will default to use DefaultAlgorithmProvider.
+ // The namespace for this configmap is openshift-config.
+ // +optional
+ Policy ConfigMapNameReference `json:"policy"`
+ // defaultNodeSelector helps set the cluster-wide default node selector to
+ // restrict pod placement to specific nodes. This is applied to the pods
+ // created in all namespaces without a specified nodeSelector value.
+ // For example,
+ // defaultNodeSelector: "type=user-node,region=east" would set nodeSelector
+ // field in pod spec to "type=user-node,region=east" to all pods created
+ // in all namespaces. Namespaces having project-wide node selectors won't be
+ // impacted even if this field is set. This adds an annotation section to
+ // the namespace.
+ // For example, if a new namespace is created with
+ // node-selector='type=user-node,region=east',
+ // the annotation openshift.io/node-selector: type=user-node,region=east
+ // gets added to the project. When the openshift.io/node-selector annotation
+ // is set on the project the value is used in preference to the value we are setting
+ // for defaultNodeSelector field.
+ // For instance,
+ // openshift.io/node-selector: "type=user-node,region=west" means
+ // that the default of "type=user-node,region=east" set in defaultNodeSelector
+ // would not be applied.
+ // +optional
+ DefaultNodeSelector string `json:"defaultNodeSelector,omitempty"`
+ // MastersSchedulable allows masters nodes to be schedulable. When this flag is
+ // turned on, all the master nodes in the cluster will be made schedulable,
+ // so that workload pods can run on them. The default value for this field is false,
+ // meaning none of the master nodes are schedulable.
+ // Important Note: Once the workload pods start running on the master nodes,
+ // extreme care must be taken to ensure that cluster-critical control plane components
+ // are not impacted.
+ // Please turn on this field after doing due diligence.
+ // +optional
+ MastersSchedulable bool `json:"mastersSchedulable"`
+}
+
+type SchedulerStatus struct {
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+type SchedulerList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ metav1.ListMeta `json:"metadata"`
+ Items []Scheduler `json:"items"`
+}
diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go
new file mode 100644
index 000000000..3d44627f9
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go
@@ -0,0 +1,3200 @@
+// +build !ignore_autogenerated
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *APIServer) DeepCopyInto(out *APIServer) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ out.Status = in.Status
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServer.
+func (in *APIServer) DeepCopy() *APIServer {
+ if in == nil {
+ return nil
+ }
+ out := new(APIServer)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *APIServer) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *APIServerList) DeepCopyInto(out *APIServerList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]APIServer, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerList.
+func (in *APIServerList) DeepCopy() *APIServerList {
+ if in == nil {
+ return nil
+ }
+ out := new(APIServerList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *APIServerList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *APIServerNamedServingCert) DeepCopyInto(out *APIServerNamedServingCert) {
+ *out = *in
+ if in.Names != nil {
+ in, out := &in.Names, &out.Names
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ out.ServingCertificate = in.ServingCertificate
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerNamedServingCert.
+func (in *APIServerNamedServingCert) DeepCopy() *APIServerNamedServingCert {
+ if in == nil {
+ return nil
+ }
+ out := new(APIServerNamedServingCert)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *APIServerServingCerts) DeepCopyInto(out *APIServerServingCerts) {
+ *out = *in
+ if in.NamedCertificates != nil {
+ in, out := &in.NamedCertificates, &out.NamedCertificates
+ *out = make([]APIServerNamedServingCert, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerServingCerts.
+func (in *APIServerServingCerts) DeepCopy() *APIServerServingCerts {
+ if in == nil {
+ return nil
+ }
+ out := new(APIServerServingCerts)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *APIServerSpec) DeepCopyInto(out *APIServerSpec) {
+ *out = *in
+ in.ServingCerts.DeepCopyInto(&out.ServingCerts)
+ out.ClientCA = in.ClientCA
+ if in.AdditionalCORSAllowedOrigins != nil {
+ in, out := &in.AdditionalCORSAllowedOrigins, &out.AdditionalCORSAllowedOrigins
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerSpec.
+func (in *APIServerSpec) DeepCopy() *APIServerSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(APIServerSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *APIServerStatus) DeepCopyInto(out *APIServerStatus) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerStatus.
+func (in *APIServerStatus) DeepCopy() *APIServerStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(APIServerStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AWSPlatformStatus) DeepCopyInto(out *AWSPlatformStatus) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSPlatformStatus.
+func (in *AWSPlatformStatus) DeepCopy() *AWSPlatformStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(AWSPlatformStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AdmissionConfig) DeepCopyInto(out *AdmissionConfig) {
+ *out = *in
+ if in.PluginConfig != nil {
+ in, out := &in.PluginConfig, &out.PluginConfig
+ *out = make(map[string]AdmissionPluginConfig, len(*in))
+ for key, val := range *in {
+ (*out)[key] = *val.DeepCopy()
+ }
+ }
+ if in.EnabledAdmissionPlugins != nil {
+ in, out := &in.EnabledAdmissionPlugins, &out.EnabledAdmissionPlugins
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.DisabledAdmissionPlugins != nil {
+ in, out := &in.DisabledAdmissionPlugins, &out.DisabledAdmissionPlugins
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionConfig.
+func (in *AdmissionConfig) DeepCopy() *AdmissionConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(AdmissionConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AdmissionPluginConfig) DeepCopyInto(out *AdmissionPluginConfig) {
+ *out = *in
+ in.Configuration.DeepCopyInto(&out.Configuration)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionPluginConfig.
+func (in *AdmissionPluginConfig) DeepCopy() *AdmissionPluginConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(AdmissionPluginConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AuditConfig) DeepCopyInto(out *AuditConfig) {
+ *out = *in
+ in.PolicyConfiguration.DeepCopyInto(&out.PolicyConfiguration)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditConfig.
+func (in *AuditConfig) DeepCopy() *AuditConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(AuditConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Authentication) DeepCopyInto(out *Authentication) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ out.Status = in.Status
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Authentication.
+func (in *Authentication) DeepCopy() *Authentication {
+ if in == nil {
+ return nil
+ }
+ out := new(Authentication)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Authentication) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AuthenticationList) DeepCopyInto(out *AuthenticationList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Authentication, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationList.
+func (in *AuthenticationList) DeepCopy() *AuthenticationList {
+ if in == nil {
+ return nil
+ }
+ out := new(AuthenticationList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *AuthenticationList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AuthenticationSpec) DeepCopyInto(out *AuthenticationSpec) {
+ *out = *in
+ out.OAuthMetadata = in.OAuthMetadata
+ if in.WebhookTokenAuthenticators != nil {
+ in, out := &in.WebhookTokenAuthenticators, &out.WebhookTokenAuthenticators
+ *out = make([]WebhookTokenAuthenticator, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationSpec.
+func (in *AuthenticationSpec) DeepCopy() *AuthenticationSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(AuthenticationSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AuthenticationStatus) DeepCopyInto(out *AuthenticationStatus) {
+ *out = *in
+ out.IntegratedOAuthMetadata = in.IntegratedOAuthMetadata
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationStatus.
+func (in *AuthenticationStatus) DeepCopy() *AuthenticationStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(AuthenticationStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AzurePlatformStatus) DeepCopyInto(out *AzurePlatformStatus) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzurePlatformStatus.
+func (in *AzurePlatformStatus) DeepCopy() *AzurePlatformStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(AzurePlatformStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BareMetalPlatformStatus) DeepCopyInto(out *BareMetalPlatformStatus) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BareMetalPlatformStatus.
+func (in *BareMetalPlatformStatus) DeepCopy() *BareMetalPlatformStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(BareMetalPlatformStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BasicAuthIdentityProvider) DeepCopyInto(out *BasicAuthIdentityProvider) {
+ *out = *in
+ out.OAuthRemoteConnectionInfo = in.OAuthRemoteConnectionInfo
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicAuthIdentityProvider.
+func (in *BasicAuthIdentityProvider) DeepCopy() *BasicAuthIdentityProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(BasicAuthIdentityProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Build) DeepCopyInto(out *Build) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Build.
+func (in *Build) DeepCopy() *Build {
+ if in == nil {
+ return nil
+ }
+ out := new(Build)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Build) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BuildDefaults) DeepCopyInto(out *BuildDefaults) {
+ *out = *in
+ if in.DefaultProxy != nil {
+ in, out := &in.DefaultProxy, &out.DefaultProxy
+ *out = new(ProxySpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.GitProxy != nil {
+ in, out := &in.GitProxy, &out.GitProxy
+ *out = new(ProxySpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Env != nil {
+ in, out := &in.Env, &out.Env
+ *out = make([]corev1.EnvVar, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.ImageLabels != nil {
+ in, out := &in.ImageLabels, &out.ImageLabels
+ *out = make([]ImageLabel, len(*in))
+ copy(*out, *in)
+ }
+ in.Resources.DeepCopyInto(&out.Resources)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildDefaults.
+func (in *BuildDefaults) DeepCopy() *BuildDefaults {
+ if in == nil {
+ return nil
+ }
+ out := new(BuildDefaults)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BuildList) DeepCopyInto(out *BuildList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Build, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildList.
+func (in *BuildList) DeepCopy() *BuildList {
+ if in == nil {
+ return nil
+ }
+ out := new(BuildList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *BuildList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BuildOverrides) DeepCopyInto(out *BuildOverrides) {
+ *out = *in
+ if in.ImageLabels != nil {
+ in, out := &in.ImageLabels, &out.ImageLabels
+ *out = make([]ImageLabel, len(*in))
+ copy(*out, *in)
+ }
+ if in.NodeSelector != nil {
+ in, out := &in.NodeSelector, &out.NodeSelector
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.Tolerations != nil {
+ in, out := &in.Tolerations, &out.Tolerations
+ *out = make([]corev1.Toleration, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildOverrides.
+func (in *BuildOverrides) DeepCopy() *BuildOverrides {
+ if in == nil {
+ return nil
+ }
+ out := new(BuildOverrides)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BuildSpec) DeepCopyInto(out *BuildSpec) {
+ *out = *in
+ out.AdditionalTrustedCA = in.AdditionalTrustedCA
+ in.BuildDefaults.DeepCopyInto(&out.BuildDefaults)
+ in.BuildOverrides.DeepCopyInto(&out.BuildOverrides)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildSpec.
+func (in *BuildSpec) DeepCopy() *BuildSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(BuildSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CertInfo) DeepCopyInto(out *CertInfo) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertInfo.
+func (in *CertInfo) DeepCopy() *CertInfo {
+ if in == nil {
+ return nil
+ }
+ out := new(CertInfo)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClientConnectionOverrides) DeepCopyInto(out *ClientConnectionOverrides) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientConnectionOverrides.
+func (in *ClientConnectionOverrides) DeepCopy() *ClientConnectionOverrides {
+ if in == nil {
+ return nil
+ }
+ out := new(ClientConnectionOverrides)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterNetworkEntry) DeepCopyInto(out *ClusterNetworkEntry) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetworkEntry.
+func (in *ClusterNetworkEntry) DeepCopy() *ClusterNetworkEntry {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterNetworkEntry)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterOperator) DeepCopyInto(out *ClusterOperator) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ out.Spec = in.Spec
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOperator.
+func (in *ClusterOperator) DeepCopy() *ClusterOperator {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterOperator)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ClusterOperator) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterOperatorList) DeepCopyInto(out *ClusterOperatorList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ClusterOperator, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOperatorList.
+func (in *ClusterOperatorList) DeepCopy() *ClusterOperatorList {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterOperatorList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ClusterOperatorList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterOperatorSpec) DeepCopyInto(out *ClusterOperatorSpec) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOperatorSpec.
+func (in *ClusterOperatorSpec) DeepCopy() *ClusterOperatorSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterOperatorSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterOperatorStatus) DeepCopyInto(out *ClusterOperatorStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]ClusterOperatorStatusCondition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Versions != nil {
+ in, out := &in.Versions, &out.Versions
+ *out = make([]OperandVersion, len(*in))
+ copy(*out, *in)
+ }
+ if in.RelatedObjects != nil {
+ in, out := &in.RelatedObjects, &out.RelatedObjects
+ *out = make([]ObjectReference, len(*in))
+ copy(*out, *in)
+ }
+ in.Extension.DeepCopyInto(&out.Extension)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOperatorStatus.
+func (in *ClusterOperatorStatus) DeepCopy() *ClusterOperatorStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterOperatorStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterOperatorStatusCondition) DeepCopyInto(out *ClusterOperatorStatusCondition) {
+ *out = *in
+ in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOperatorStatusCondition.
+func (in *ClusterOperatorStatusCondition) DeepCopy() *ClusterOperatorStatusCondition {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterOperatorStatusCondition)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterVersion) DeepCopyInto(out *ClusterVersion) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersion.
+func (in *ClusterVersion) DeepCopy() *ClusterVersion {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterVersion)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ClusterVersion) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterVersionList) DeepCopyInto(out *ClusterVersionList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ClusterVersion, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersionList.
+func (in *ClusterVersionList) DeepCopy() *ClusterVersionList {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterVersionList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ClusterVersionList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterVersionSpec) DeepCopyInto(out *ClusterVersionSpec) {
+ *out = *in
+ if in.DesiredUpdate != nil {
+ in, out := &in.DesiredUpdate, &out.DesiredUpdate
+ *out = new(Update)
+ **out = **in
+ }
+ if in.Overrides != nil {
+ in, out := &in.Overrides, &out.Overrides
+ *out = make([]ComponentOverride, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersionSpec.
+func (in *ClusterVersionSpec) DeepCopy() *ClusterVersionSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterVersionSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterVersionStatus) DeepCopyInto(out *ClusterVersionStatus) {
+ *out = *in
+ out.Desired = in.Desired
+ if in.History != nil {
+ in, out := &in.History, &out.History
+ *out = make([]UpdateHistory, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]ClusterOperatorStatusCondition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.AvailableUpdates != nil {
+ in, out := &in.AvailableUpdates, &out.AvailableUpdates
+ *out = make([]Update, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersionStatus.
+func (in *ClusterVersionStatus) DeepCopy() *ClusterVersionStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterVersionStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ComponentOverride) DeepCopyInto(out *ComponentOverride) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentOverride.
+func (in *ComponentOverride) DeepCopy() *ComponentOverride {
+ if in == nil {
+ return nil
+ }
+ out := new(ComponentOverride)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ConfigMapFileReference) DeepCopyInto(out *ConfigMapFileReference) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapFileReference.
+func (in *ConfigMapFileReference) DeepCopy() *ConfigMapFileReference {
+ if in == nil {
+ return nil
+ }
+ out := new(ConfigMapFileReference)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ConfigMapNameReference) DeepCopyInto(out *ConfigMapNameReference) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapNameReference.
+func (in *ConfigMapNameReference) DeepCopy() *ConfigMapNameReference {
+ if in == nil {
+ return nil
+ }
+ out := new(ConfigMapNameReference)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Console) DeepCopyInto(out *Console) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ out.Spec = in.Spec
+ out.Status = in.Status
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Console.
+func (in *Console) DeepCopy() *Console {
+ if in == nil {
+ return nil
+ }
+ out := new(Console)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Console) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ConsoleAuthentication) DeepCopyInto(out *ConsoleAuthentication) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleAuthentication.
+func (in *ConsoleAuthentication) DeepCopy() *ConsoleAuthentication {
+ if in == nil {
+ return nil
+ }
+ out := new(ConsoleAuthentication)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ConsoleList) DeepCopyInto(out *ConsoleList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Console, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleList.
+func (in *ConsoleList) DeepCopy() *ConsoleList {
+ if in == nil {
+ return nil
+ }
+ out := new(ConsoleList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ConsoleList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ConsoleSpec) DeepCopyInto(out *ConsoleSpec) {
+ *out = *in
+ out.Authentication = in.Authentication
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleSpec.
+func (in *ConsoleSpec) DeepCopy() *ConsoleSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ConsoleSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ConsoleStatus) DeepCopyInto(out *ConsoleStatus) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleStatus.
+func (in *ConsoleStatus) DeepCopy() *ConsoleStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ConsoleStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CustomFeatureGates) DeepCopyInto(out *CustomFeatureGates) {
+ *out = *in
+ if in.Enabled != nil {
+ in, out := &in.Enabled, &out.Enabled
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Disabled != nil {
+ in, out := &in.Disabled, &out.Disabled
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomFeatureGates.
+func (in *CustomFeatureGates) DeepCopy() *CustomFeatureGates {
+ if in == nil {
+ return nil
+ }
+ out := new(CustomFeatureGates)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DNS) DeepCopyInto(out *DNS) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ out.Status = in.Status
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNS.
+func (in *DNS) DeepCopy() *DNS {
+ if in == nil {
+ return nil
+ }
+ out := new(DNS)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *DNS) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DNSList) DeepCopyInto(out *DNSList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]DNS, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSList.
+func (in *DNSList) DeepCopy() *DNSList {
+ if in == nil {
+ return nil
+ }
+ out := new(DNSList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *DNSList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DNSSpec) DeepCopyInto(out *DNSSpec) {
+ *out = *in
+ if in.PublicZone != nil {
+ in, out := &in.PublicZone, &out.PublicZone
+ *out = new(DNSZone)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.PrivateZone != nil {
+ in, out := &in.PrivateZone, &out.PrivateZone
+ *out = new(DNSZone)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSSpec.
+func (in *DNSSpec) DeepCopy() *DNSSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(DNSSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DNSStatus) DeepCopyInto(out *DNSStatus) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSStatus.
+func (in *DNSStatus) DeepCopy() *DNSStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(DNSStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DNSZone) DeepCopyInto(out *DNSZone) {
+ *out = *in
+ if in.Tags != nil {
+ in, out := &in.Tags, &out.Tags
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSZone.
+func (in *DNSZone) DeepCopy() *DNSZone {
+ if in == nil {
+ return nil
+ }
+ out := new(DNSZone)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DelegatedAuthentication) DeepCopyInto(out *DelegatedAuthentication) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DelegatedAuthentication.
+func (in *DelegatedAuthentication) DeepCopy() *DelegatedAuthentication {
+ if in == nil {
+ return nil
+ }
+ out := new(DelegatedAuthentication)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DelegatedAuthorization) DeepCopyInto(out *DelegatedAuthorization) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DelegatedAuthorization.
+func (in *DelegatedAuthorization) DeepCopy() *DelegatedAuthorization {
+ if in == nil {
+ return nil
+ }
+ out := new(DelegatedAuthorization)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EtcdConnectionInfo) DeepCopyInto(out *EtcdConnectionInfo) {
+ *out = *in
+ if in.URLs != nil {
+ in, out := &in.URLs, &out.URLs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ out.CertInfo = in.CertInfo
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdConnectionInfo.
+func (in *EtcdConnectionInfo) DeepCopy() *EtcdConnectionInfo {
+ if in == nil {
+ return nil
+ }
+ out := new(EtcdConnectionInfo)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EtcdStorageConfig) DeepCopyInto(out *EtcdStorageConfig) {
+ *out = *in
+ in.EtcdConnectionInfo.DeepCopyInto(&out.EtcdConnectionInfo)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdStorageConfig.
+func (in *EtcdStorageConfig) DeepCopy() *EtcdStorageConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(EtcdStorageConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExternalIPConfig) DeepCopyInto(out *ExternalIPConfig) {
+ *out = *in
+ if in.Policy != nil {
+ in, out := &in.Policy, &out.Policy
+ *out = new(ExternalIPPolicy)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.AutoAssignCIDRs != nil {
+ in, out := &in.AutoAssignCIDRs, &out.AutoAssignCIDRs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalIPConfig.
+func (in *ExternalIPConfig) DeepCopy() *ExternalIPConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(ExternalIPConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExternalIPPolicy) DeepCopyInto(out *ExternalIPPolicy) {
+ *out = *in
+ if in.AllowedCIDRs != nil {
+ in, out := &in.AllowedCIDRs, &out.AllowedCIDRs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.RejectedCIDRs != nil {
+ in, out := &in.RejectedCIDRs, &out.RejectedCIDRs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalIPPolicy.
+func (in *ExternalIPPolicy) DeepCopy() *ExternalIPPolicy {
+ if in == nil {
+ return nil
+ }
+ out := new(ExternalIPPolicy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FeatureGate) DeepCopyInto(out *FeatureGate) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ out.Status = in.Status
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGate.
+func (in *FeatureGate) DeepCopy() *FeatureGate {
+ if in == nil {
+ return nil
+ }
+ out := new(FeatureGate)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *FeatureGate) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FeatureGateEnabledDisabled) DeepCopyInto(out *FeatureGateEnabledDisabled) {
+ *out = *in
+ if in.Enabled != nil {
+ in, out := &in.Enabled, &out.Enabled
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Disabled != nil {
+ in, out := &in.Disabled, &out.Disabled
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateEnabledDisabled.
+func (in *FeatureGateEnabledDisabled) DeepCopy() *FeatureGateEnabledDisabled {
+ if in == nil {
+ return nil
+ }
+ out := new(FeatureGateEnabledDisabled)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FeatureGateList) DeepCopyInto(out *FeatureGateList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]FeatureGate, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateList.
+func (in *FeatureGateList) DeepCopy() *FeatureGateList {
+ if in == nil {
+ return nil
+ }
+ out := new(FeatureGateList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *FeatureGateList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FeatureGateSelection) DeepCopyInto(out *FeatureGateSelection) {
+ *out = *in
+ if in.CustomNoUpgrade != nil {
+ in, out := &in.CustomNoUpgrade, &out.CustomNoUpgrade
+ *out = new(CustomFeatureGates)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateSelection.
+func (in *FeatureGateSelection) DeepCopy() *FeatureGateSelection {
+ if in == nil {
+ return nil
+ }
+ out := new(FeatureGateSelection)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FeatureGateSpec) DeepCopyInto(out *FeatureGateSpec) {
+ *out = *in
+ in.FeatureGateSelection.DeepCopyInto(&out.FeatureGateSelection)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateSpec.
+func (in *FeatureGateSpec) DeepCopy() *FeatureGateSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(FeatureGateSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FeatureGateStatus) DeepCopyInto(out *FeatureGateStatus) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateStatus.
+func (in *FeatureGateStatus) DeepCopy() *FeatureGateStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(FeatureGateStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GCPPlatformStatus) DeepCopyInto(out *GCPPlatformStatus) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPPlatformStatus.
+func (in *GCPPlatformStatus) DeepCopy() *GCPPlatformStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(GCPPlatformStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GenericAPIServerConfig) DeepCopyInto(out *GenericAPIServerConfig) {
+ *out = *in
+ in.ServingInfo.DeepCopyInto(&out.ServingInfo)
+ if in.CORSAllowedOrigins != nil {
+ in, out := &in.CORSAllowedOrigins, &out.CORSAllowedOrigins
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ in.AuditConfig.DeepCopyInto(&out.AuditConfig)
+ in.StorageConfig.DeepCopyInto(&out.StorageConfig)
+ in.AdmissionConfig.DeepCopyInto(&out.AdmissionConfig)
+ out.KubeClientConfig = in.KubeClientConfig
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericAPIServerConfig.
+func (in *GenericAPIServerConfig) DeepCopy() *GenericAPIServerConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(GenericAPIServerConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GenericControllerConfig) DeepCopyInto(out *GenericControllerConfig) {
+ *out = *in
+ in.ServingInfo.DeepCopyInto(&out.ServingInfo)
+ out.LeaderElection = in.LeaderElection
+ out.Authentication = in.Authentication
+ out.Authorization = in.Authorization
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericControllerConfig.
+func (in *GenericControllerConfig) DeepCopy() *GenericControllerConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(GenericControllerConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GitHubIdentityProvider) DeepCopyInto(out *GitHubIdentityProvider) {
+ *out = *in
+ out.ClientSecret = in.ClientSecret
+ if in.Organizations != nil {
+ in, out := &in.Organizations, &out.Organizations
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Teams != nil {
+ in, out := &in.Teams, &out.Teams
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ out.CA = in.CA
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitHubIdentityProvider.
+func (in *GitHubIdentityProvider) DeepCopy() *GitHubIdentityProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(GitHubIdentityProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GitLabIdentityProvider) DeepCopyInto(out *GitLabIdentityProvider) {
+ *out = *in
+ out.ClientSecret = in.ClientSecret
+ out.CA = in.CA
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitLabIdentityProvider.
+func (in *GitLabIdentityProvider) DeepCopy() *GitLabIdentityProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(GitLabIdentityProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GoogleIdentityProvider) DeepCopyInto(out *GoogleIdentityProvider) {
+ *out = *in
+ out.ClientSecret = in.ClientSecret
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GoogleIdentityProvider.
+func (in *GoogleIdentityProvider) DeepCopy() *GoogleIdentityProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(GoogleIdentityProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HTPasswdIdentityProvider) DeepCopyInto(out *HTPasswdIdentityProvider) {
+ *out = *in
+ out.FileData = in.FileData
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTPasswdIdentityProvider.
+func (in *HTPasswdIdentityProvider) DeepCopy() *HTPasswdIdentityProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(HTPasswdIdentityProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HTTPServingInfo) DeepCopyInto(out *HTTPServingInfo) {
+ *out = *in
+ in.ServingInfo.DeepCopyInto(&out.ServingInfo)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPServingInfo.
+func (in *HTTPServingInfo) DeepCopy() *HTTPServingInfo {
+ if in == nil {
+ return nil
+ }
+ out := new(HTTPServingInfo)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HubSource) DeepCopyInto(out *HubSource) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HubSource.
+func (in *HubSource) DeepCopy() *HubSource {
+ if in == nil {
+ return nil
+ }
+ out := new(HubSource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HubSourceStatus) DeepCopyInto(out *HubSourceStatus) {
+ *out = *in
+ out.HubSource = in.HubSource
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HubSourceStatus.
+func (in *HubSourceStatus) DeepCopy() *HubSourceStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(HubSourceStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IdentityProvider) DeepCopyInto(out *IdentityProvider) {
+ *out = *in
+ in.IdentityProviderConfig.DeepCopyInto(&out.IdentityProviderConfig)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityProvider.
+func (in *IdentityProvider) DeepCopy() *IdentityProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(IdentityProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IdentityProviderConfig) DeepCopyInto(out *IdentityProviderConfig) {
+ *out = *in
+ if in.BasicAuth != nil {
+ in, out := &in.BasicAuth, &out.BasicAuth
+ *out = new(BasicAuthIdentityProvider)
+ **out = **in
+ }
+ if in.GitHub != nil {
+ in, out := &in.GitHub, &out.GitHub
+ *out = new(GitHubIdentityProvider)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.GitLab != nil {
+ in, out := &in.GitLab, &out.GitLab
+ *out = new(GitLabIdentityProvider)
+ **out = **in
+ }
+ if in.Google != nil {
+ in, out := &in.Google, &out.Google
+ *out = new(GoogleIdentityProvider)
+ **out = **in
+ }
+ if in.HTPasswd != nil {
+ in, out := &in.HTPasswd, &out.HTPasswd
+ *out = new(HTPasswdIdentityProvider)
+ **out = **in
+ }
+ if in.Keystone != nil {
+ in, out := &in.Keystone, &out.Keystone
+ *out = new(KeystoneIdentityProvider)
+ **out = **in
+ }
+ if in.LDAP != nil {
+ in, out := &in.LDAP, &out.LDAP
+ *out = new(LDAPIdentityProvider)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.OpenID != nil {
+ in, out := &in.OpenID, &out.OpenID
+ *out = new(OpenIDIdentityProvider)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.RequestHeader != nil {
+ in, out := &in.RequestHeader, &out.RequestHeader
+ *out = new(RequestHeaderIdentityProvider)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityProviderConfig.
+func (in *IdentityProviderConfig) DeepCopy() *IdentityProviderConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(IdentityProviderConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Image) DeepCopyInto(out *Image) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Image.
+func (in *Image) DeepCopy() *Image {
+ if in == nil {
+ return nil
+ }
+ out := new(Image)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Image) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageLabel) DeepCopyInto(out *ImageLabel) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageLabel.
+func (in *ImageLabel) DeepCopy() *ImageLabel {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageLabel)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageList) DeepCopyInto(out *ImageList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Image, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageList.
+func (in *ImageList) DeepCopy() *ImageList {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ImageList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageSpec) DeepCopyInto(out *ImageSpec) {
+ *out = *in
+ if in.AllowedRegistriesForImport != nil {
+ in, out := &in.AllowedRegistriesForImport, &out.AllowedRegistriesForImport
+ *out = make([]RegistryLocation, len(*in))
+ copy(*out, *in)
+ }
+ if in.ExternalRegistryHostnames != nil {
+ in, out := &in.ExternalRegistryHostnames, &out.ExternalRegistryHostnames
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ out.AdditionalTrustedCA = in.AdditionalTrustedCA
+ in.RegistrySources.DeepCopyInto(&out.RegistrySources)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageSpec.
+func (in *ImageSpec) DeepCopy() *ImageSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageStatus) DeepCopyInto(out *ImageStatus) {
+ *out = *in
+ if in.ExternalRegistryHostnames != nil {
+ in, out := &in.ExternalRegistryHostnames, &out.ExternalRegistryHostnames
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStatus.
+func (in *ImageStatus) DeepCopy() *ImageStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Infrastructure) DeepCopyInto(out *Infrastructure) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ out.Spec = in.Spec
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Infrastructure.
+func (in *Infrastructure) DeepCopy() *Infrastructure {
+ if in == nil {
+ return nil
+ }
+ out := new(Infrastructure)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Infrastructure) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *InfrastructureList) DeepCopyInto(out *InfrastructureList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Infrastructure, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfrastructureList.
+func (in *InfrastructureList) DeepCopy() *InfrastructureList {
+ if in == nil {
+ return nil
+ }
+ out := new(InfrastructureList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *InfrastructureList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *InfrastructureSpec) DeepCopyInto(out *InfrastructureSpec) {
+ *out = *in
+ out.CloudConfig = in.CloudConfig
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfrastructureSpec.
+func (in *InfrastructureSpec) DeepCopy() *InfrastructureSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(InfrastructureSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *InfrastructureStatus) DeepCopyInto(out *InfrastructureStatus) {
+ *out = *in
+ if in.PlatformStatus != nil {
+ in, out := &in.PlatformStatus, &out.PlatformStatus
+ *out = new(PlatformStatus)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfrastructureStatus.
+func (in *InfrastructureStatus) DeepCopy() *InfrastructureStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(InfrastructureStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Ingress) DeepCopyInto(out *Ingress) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ out.Spec = in.Spec
+ out.Status = in.Status
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Ingress.
+func (in *Ingress) DeepCopy() *Ingress {
+ if in == nil {
+ return nil
+ }
+ out := new(Ingress)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Ingress) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngressList) DeepCopyInto(out *IngressList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Ingress, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressList.
+func (in *IngressList) DeepCopy() *IngressList {
+ if in == nil {
+ return nil
+ }
+ out := new(IngressList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *IngressList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngressSpec) DeepCopyInto(out *IngressSpec) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressSpec.
+func (in *IngressSpec) DeepCopy() *IngressSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(IngressSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngressStatus) DeepCopyInto(out *IngressStatus) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressStatus.
+func (in *IngressStatus) DeepCopy() *IngressStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(IngressStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KeystoneIdentityProvider) DeepCopyInto(out *KeystoneIdentityProvider) {
+ *out = *in
+ out.OAuthRemoteConnectionInfo = in.OAuthRemoteConnectionInfo
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeystoneIdentityProvider.
+func (in *KeystoneIdentityProvider) DeepCopy() *KeystoneIdentityProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(KeystoneIdentityProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeClientConfig) DeepCopyInto(out *KubeClientConfig) {
+ *out = *in
+ out.ConnectionOverrides = in.ConnectionOverrides
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeClientConfig.
+func (in *KubeClientConfig) DeepCopy() *KubeClientConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeClientConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LDAPAttributeMapping) DeepCopyInto(out *LDAPAttributeMapping) {
+ *out = *in
+ if in.ID != nil {
+ in, out := &in.ID, &out.ID
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.PreferredUsername != nil {
+ in, out := &in.PreferredUsername, &out.PreferredUsername
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Name != nil {
+ in, out := &in.Name, &out.Name
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Email != nil {
+ in, out := &in.Email, &out.Email
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LDAPAttributeMapping.
+func (in *LDAPAttributeMapping) DeepCopy() *LDAPAttributeMapping {
+ if in == nil {
+ return nil
+ }
+ out := new(LDAPAttributeMapping)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LDAPIdentityProvider) DeepCopyInto(out *LDAPIdentityProvider) {
+ *out = *in
+ out.BindPassword = in.BindPassword
+ out.CA = in.CA
+ in.Attributes.DeepCopyInto(&out.Attributes)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LDAPIdentityProvider.
+func (in *LDAPIdentityProvider) DeepCopy() *LDAPIdentityProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(LDAPIdentityProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LeaderElection) DeepCopyInto(out *LeaderElection) {
+ *out = *in
+ out.LeaseDuration = in.LeaseDuration
+ out.RenewDeadline = in.RenewDeadline
+ out.RetryPeriod = in.RetryPeriod
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaderElection.
+func (in *LeaderElection) DeepCopy() *LeaderElection {
+ if in == nil {
+ return nil
+ }
+ out := new(LeaderElection)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NamedCertificate) DeepCopyInto(out *NamedCertificate) {
+ *out = *in
+ if in.Names != nil {
+ in, out := &in.Names, &out.Names
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ out.CertInfo = in.CertInfo
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedCertificate.
+func (in *NamedCertificate) DeepCopy() *NamedCertificate {
+ if in == nil {
+ return nil
+ }
+ out := new(NamedCertificate)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Network) DeepCopyInto(out *Network) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Network.
+func (in *Network) DeepCopy() *Network {
+ if in == nil {
+ return nil
+ }
+ out := new(Network)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Network) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NetworkList) DeepCopyInto(out *NetworkList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Network, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkList.
+func (in *NetworkList) DeepCopy() *NetworkList {
+ if in == nil {
+ return nil
+ }
+ out := new(NetworkList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *NetworkList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NetworkSpec) DeepCopyInto(out *NetworkSpec) {
+ *out = *in
+ if in.ClusterNetwork != nil {
+ in, out := &in.ClusterNetwork, &out.ClusterNetwork
+ *out = make([]ClusterNetworkEntry, len(*in))
+ copy(*out, *in)
+ }
+ if in.ServiceNetwork != nil {
+ in, out := &in.ServiceNetwork, &out.ServiceNetwork
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.ExternalIP != nil {
+ in, out := &in.ExternalIP, &out.ExternalIP
+ *out = new(ExternalIPConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSpec.
+func (in *NetworkSpec) DeepCopy() *NetworkSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(NetworkSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NetworkStatus) DeepCopyInto(out *NetworkStatus) {
+ *out = *in
+ if in.ClusterNetwork != nil {
+ in, out := &in.ClusterNetwork, &out.ClusterNetwork
+ *out = make([]ClusterNetworkEntry, len(*in))
+ copy(*out, *in)
+ }
+ if in.ServiceNetwork != nil {
+ in, out := &in.ServiceNetwork, &out.ServiceNetwork
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkStatus.
+func (in *NetworkStatus) DeepCopy() *NetworkStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(NetworkStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OAuth) DeepCopyInto(out *OAuth) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ out.Status = in.Status
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuth.
+func (in *OAuth) DeepCopy() *OAuth {
+ if in == nil {
+ return nil
+ }
+ out := new(OAuth)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *OAuth) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OAuthList) DeepCopyInto(out *OAuthList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]OAuth, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthList.
+func (in *OAuthList) DeepCopy() *OAuthList {
+ if in == nil {
+ return nil
+ }
+ out := new(OAuthList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *OAuthList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OAuthRemoteConnectionInfo) DeepCopyInto(out *OAuthRemoteConnectionInfo) {
+ *out = *in
+ out.CA = in.CA
+ out.TLSClientCert = in.TLSClientCert
+ out.TLSClientKey = in.TLSClientKey
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthRemoteConnectionInfo.
+func (in *OAuthRemoteConnectionInfo) DeepCopy() *OAuthRemoteConnectionInfo {
+ if in == nil {
+ return nil
+ }
+ out := new(OAuthRemoteConnectionInfo)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OAuthSpec) DeepCopyInto(out *OAuthSpec) {
+ *out = *in
+ if in.IdentityProviders != nil {
+ in, out := &in.IdentityProviders, &out.IdentityProviders
+ *out = make([]IdentityProvider, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ out.TokenConfig = in.TokenConfig
+ out.Templates = in.Templates
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthSpec.
+func (in *OAuthSpec) DeepCopy() *OAuthSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(OAuthSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OAuthStatus) DeepCopyInto(out *OAuthStatus) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthStatus.
+func (in *OAuthStatus) DeepCopy() *OAuthStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(OAuthStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OAuthTemplates) DeepCopyInto(out *OAuthTemplates) {
+ *out = *in
+ out.Login = in.Login
+ out.ProviderSelection = in.ProviderSelection
+ out.Error = in.Error
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthTemplates.
+func (in *OAuthTemplates) DeepCopy() *OAuthTemplates {
+ if in == nil {
+ return nil
+ }
+ out := new(OAuthTemplates)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ObjectReference) DeepCopyInto(out *ObjectReference) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectReference.
+func (in *ObjectReference) DeepCopy() *ObjectReference {
+ if in == nil {
+ return nil
+ }
+ out := new(ObjectReference)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OpenIDClaims) DeepCopyInto(out *OpenIDClaims) {
+ *out = *in
+ if in.PreferredUsername != nil {
+ in, out := &in.PreferredUsername, &out.PreferredUsername
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Name != nil {
+ in, out := &in.Name, &out.Name
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Email != nil {
+ in, out := &in.Email, &out.Email
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDClaims.
+func (in *OpenIDClaims) DeepCopy() *OpenIDClaims {
+ if in == nil {
+ return nil
+ }
+ out := new(OpenIDClaims)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OpenIDIdentityProvider) DeepCopyInto(out *OpenIDIdentityProvider) {
+ *out = *in
+ out.ClientSecret = in.ClientSecret
+ out.CA = in.CA
+ if in.ExtraScopes != nil {
+ in, out := &in.ExtraScopes, &out.ExtraScopes
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.ExtraAuthorizeParameters != nil {
+ in, out := &in.ExtraAuthorizeParameters, &out.ExtraAuthorizeParameters
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ in.Claims.DeepCopyInto(&out.Claims)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDIdentityProvider.
+func (in *OpenIDIdentityProvider) DeepCopy() *OpenIDIdentityProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(OpenIDIdentityProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OpenStackPlatformStatus) DeepCopyInto(out *OpenStackPlatformStatus) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackPlatformStatus.
+func (in *OpenStackPlatformStatus) DeepCopy() *OpenStackPlatformStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(OpenStackPlatformStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OperandVersion) DeepCopyInto(out *OperandVersion) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperandVersion.
+func (in *OperandVersion) DeepCopy() *OperandVersion {
+ if in == nil {
+ return nil
+ }
+ out := new(OperandVersion)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OperatorHub) DeepCopyInto(out *OperatorHub) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorHub.
+func (in *OperatorHub) DeepCopy() *OperatorHub {
+ if in == nil {
+ return nil
+ }
+ out := new(OperatorHub)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *OperatorHub) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OperatorHubList) DeepCopyInto(out *OperatorHubList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]OperatorHub, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorHubList.
+func (in *OperatorHubList) DeepCopy() *OperatorHubList {
+ if in == nil {
+ return nil
+ }
+ out := new(OperatorHubList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *OperatorHubList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OperatorHubSpec) DeepCopyInto(out *OperatorHubSpec) {
+ *out = *in
+ if in.Sources != nil {
+ in, out := &in.Sources, &out.Sources
+ *out = make([]HubSource, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorHubSpec.
+func (in *OperatorHubSpec) DeepCopy() *OperatorHubSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(OperatorHubSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OperatorHubStatus) DeepCopyInto(out *OperatorHubStatus) {
+ *out = *in
+ if in.Sources != nil {
+ in, out := &in.Sources, &out.Sources
+ *out = make([]HubSourceStatus, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorHubStatus.
+func (in *OperatorHubStatus) DeepCopy() *OperatorHubStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(OperatorHubStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PlatformStatus) DeepCopyInto(out *PlatformStatus) {
+ *out = *in
+ if in.AWS != nil {
+ in, out := &in.AWS, &out.AWS
+ *out = new(AWSPlatformStatus)
+ **out = **in
+ }
+ if in.Azure != nil {
+ in, out := &in.Azure, &out.Azure
+ *out = new(AzurePlatformStatus)
+ **out = **in
+ }
+ if in.GCP != nil {
+ in, out := &in.GCP, &out.GCP
+ *out = new(GCPPlatformStatus)
+ **out = **in
+ }
+ if in.BareMetal != nil {
+ in, out := &in.BareMetal, &out.BareMetal
+ *out = new(BareMetalPlatformStatus)
+ **out = **in
+ }
+ if in.OpenStack != nil {
+ in, out := &in.OpenStack, &out.OpenStack
+ *out = new(OpenStackPlatformStatus)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlatformStatus.
+func (in *PlatformStatus) DeepCopy() *PlatformStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(PlatformStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Project) DeepCopyInto(out *Project) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ out.Spec = in.Spec
+ out.Status = in.Status
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Project.
+func (in *Project) DeepCopy() *Project {
+ if in == nil {
+ return nil
+ }
+ out := new(Project)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Project) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ProjectList) DeepCopyInto(out *ProjectList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Project, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectList.
+func (in *ProjectList) DeepCopy() *ProjectList {
+ if in == nil {
+ return nil
+ }
+ out := new(ProjectList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ProjectList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ProjectSpec) DeepCopyInto(out *ProjectSpec) {
+ *out = *in
+ out.ProjectRequestTemplate = in.ProjectRequestTemplate
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectSpec.
+func (in *ProjectSpec) DeepCopy() *ProjectSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ProjectSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ProjectStatus) DeepCopyInto(out *ProjectStatus) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectStatus.
+func (in *ProjectStatus) DeepCopy() *ProjectStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ProjectStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Proxy) DeepCopyInto(out *Proxy) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ out.Status = in.Status
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Proxy.
+func (in *Proxy) DeepCopy() *Proxy {
+ if in == nil {
+ return nil
+ }
+ out := new(Proxy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Proxy) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ProxyList) DeepCopyInto(out *ProxyList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Proxy, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyList.
+func (in *ProxyList) DeepCopy() *ProxyList {
+ if in == nil {
+ return nil
+ }
+ out := new(ProxyList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ProxyList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ProxySpec) DeepCopyInto(out *ProxySpec) {
+ *out = *in
+ if in.ReadinessEndpoints != nil {
+ in, out := &in.ReadinessEndpoints, &out.ReadinessEndpoints
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ out.TrustedCA = in.TrustedCA
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxySpec.
+func (in *ProxySpec) DeepCopy() *ProxySpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ProxySpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ProxyStatus) DeepCopyInto(out *ProxyStatus) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyStatus.
+func (in *ProxyStatus) DeepCopy() *ProxyStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ProxyStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RegistryLocation) DeepCopyInto(out *RegistryLocation) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryLocation.
+func (in *RegistryLocation) DeepCopy() *RegistryLocation {
+ if in == nil {
+ return nil
+ }
+ out := new(RegistryLocation)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RegistrySources) DeepCopyInto(out *RegistrySources) {
+ *out = *in
+ if in.InsecureRegistries != nil {
+ in, out := &in.InsecureRegistries, &out.InsecureRegistries
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.BlockedRegistries != nil {
+ in, out := &in.BlockedRegistries, &out.BlockedRegistries
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.AllowedRegistries != nil {
+ in, out := &in.AllowedRegistries, &out.AllowedRegistries
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistrySources.
+func (in *RegistrySources) DeepCopy() *RegistrySources {
+ if in == nil {
+ return nil
+ }
+ out := new(RegistrySources)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RemoteConnectionInfo) DeepCopyInto(out *RemoteConnectionInfo) {
+ *out = *in
+ out.CertInfo = in.CertInfo
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteConnectionInfo.
+func (in *RemoteConnectionInfo) DeepCopy() *RemoteConnectionInfo {
+ if in == nil {
+ return nil
+ }
+ out := new(RemoteConnectionInfo)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RequestHeaderIdentityProvider) DeepCopyInto(out *RequestHeaderIdentityProvider) {
+ *out = *in
+ out.ClientCA = in.ClientCA
+ if in.ClientCommonNames != nil {
+ in, out := &in.ClientCommonNames, &out.ClientCommonNames
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Headers != nil {
+ in, out := &in.Headers, &out.Headers
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.PreferredUsernameHeaders != nil {
+ in, out := &in.PreferredUsernameHeaders, &out.PreferredUsernameHeaders
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.NameHeaders != nil {
+ in, out := &in.NameHeaders, &out.NameHeaders
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.EmailHeaders != nil {
+ in, out := &in.EmailHeaders, &out.EmailHeaders
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestHeaderIdentityProvider.
+func (in *RequestHeaderIdentityProvider) DeepCopy() *RequestHeaderIdentityProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(RequestHeaderIdentityProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Scheduler) DeepCopyInto(out *Scheduler) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ out.Spec = in.Spec
+ out.Status = in.Status
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scheduler.
+func (in *Scheduler) DeepCopy() *Scheduler {
+ if in == nil {
+ return nil
+ }
+ out := new(Scheduler)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Scheduler) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SchedulerList) DeepCopyInto(out *SchedulerList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Scheduler, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulerList.
+func (in *SchedulerList) DeepCopy() *SchedulerList {
+ if in == nil {
+ return nil
+ }
+ out := new(SchedulerList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *SchedulerList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SchedulerSpec) DeepCopyInto(out *SchedulerSpec) {
+ *out = *in
+ out.Policy = in.Policy
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulerSpec.
+func (in *SchedulerSpec) DeepCopy() *SchedulerSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(SchedulerSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SchedulerStatus) DeepCopyInto(out *SchedulerStatus) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulerStatus.
+func (in *SchedulerStatus) DeepCopy() *SchedulerStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(SchedulerStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SecretNameReference) DeepCopyInto(out *SecretNameReference) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretNameReference.
+func (in *SecretNameReference) DeepCopy() *SecretNameReference {
+ if in == nil {
+ return nil
+ }
+ out := new(SecretNameReference)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServingInfo) DeepCopyInto(out *ServingInfo) {
+ *out = *in
+ out.CertInfo = in.CertInfo
+ if in.NamedCertificates != nil {
+ in, out := &in.NamedCertificates, &out.NamedCertificates
+ *out = make([]NamedCertificate, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.CipherSuites != nil {
+ in, out := &in.CipherSuites, &out.CipherSuites
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServingInfo.
+func (in *ServingInfo) DeepCopy() *ServingInfo {
+ if in == nil {
+ return nil
+ }
+ out := new(ServingInfo)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StringSource) DeepCopyInto(out *StringSource) {
+ *out = *in
+ out.StringSourceSpec = in.StringSourceSpec
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StringSource.
+func (in *StringSource) DeepCopy() *StringSource {
+ if in == nil {
+ return nil
+ }
+ out := new(StringSource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StringSourceSpec) DeepCopyInto(out *StringSourceSpec) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StringSourceSpec.
+func (in *StringSourceSpec) DeepCopy() *StringSourceSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(StringSourceSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TemplateReference) DeepCopyInto(out *TemplateReference) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateReference.
+func (in *TemplateReference) DeepCopy() *TemplateReference {
+ if in == nil {
+ return nil
+ }
+ out := new(TemplateReference)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TokenConfig) DeepCopyInto(out *TokenConfig) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenConfig.
+func (in *TokenConfig) DeepCopy() *TokenConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(TokenConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Update) DeepCopyInto(out *Update) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Update.
+func (in *Update) DeepCopy() *Update {
+ if in == nil {
+ return nil
+ }
+ out := new(Update)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *UpdateHistory) DeepCopyInto(out *UpdateHistory) {
+ *out = *in
+ in.StartedTime.DeepCopyInto(&out.StartedTime)
+ if in.CompletionTime != nil {
+ in, out := &in.CompletionTime, &out.CompletionTime
+ *out = (*in).DeepCopy()
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdateHistory.
+func (in *UpdateHistory) DeepCopy() *UpdateHistory {
+ if in == nil {
+ return nil
+ }
+ out := new(UpdateHistory)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *WebhookTokenAuthenticator) DeepCopyInto(out *WebhookTokenAuthenticator) {
+ *out = *in
+ out.KubeConfig = in.KubeConfig
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookTokenAuthenticator.
+func (in *WebhookTokenAuthenticator) DeepCopy() *WebhookTokenAuthenticator {
+ if in == nil {
+ return nil
+ }
+ out := new(WebhookTokenAuthenticator)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go
new file mode 100644
index 000000000..6bd877a4f
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go
@@ -0,0 +1,1312 @@
+package v1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE
+var map_AdmissionConfig = map[string]string{
+ "enabledPlugins": "enabledPlugins is a list of admission plugins that must be on in addition to the default list. Some admission plugins are disabled by default, but certain configurations require them. This is fairly uncommon and can result in performance penalties and unexpected behavior.",
+ "disabledPlugins": "disabledPlugins is a list of admission plugins that must be off. Putting something in this list is almost always a mistake and likely to result in cluster instability.",
+}
+
+func (AdmissionConfig) SwaggerDoc() map[string]string {
+ return map_AdmissionConfig
+}
+
+var map_AdmissionPluginConfig = map[string]string{
+ "": "AdmissionPluginConfig holds the necessary configuration options for admission plugins",
+ "location": "Location is the path to a configuration file that contains the plugin's configuration",
+ "configuration": "Configuration is an embedded configuration object to be used as the plugin's configuration. If present, it will be used instead of the path to the configuration file.",
+}
+
+func (AdmissionPluginConfig) SwaggerDoc() map[string]string {
+ return map_AdmissionPluginConfig
+}
+
+var map_AuditConfig = map[string]string{
+ "": "AuditConfig holds configuration for the audit capabilities",
+ "enabled": "If this flag is set, audit log will be printed in the logs. The logs contains, method, user and a requested URL.",
+ "auditFilePath": "All requests coming to the apiserver will be logged to this file.",
+ "maximumFileRetentionDays": "Maximum number of days to retain old log files based on the timestamp encoded in their filename.",
+ "maximumRetainedFiles": "Maximum number of old log files to retain.",
+ "maximumFileSizeMegabytes": "Maximum size in megabytes of the log file before it gets rotated. Defaults to 100MB.",
+ "policyFile": "PolicyFile is a path to the file that defines the audit policy configuration.",
+ "policyConfiguration": "PolicyConfiguration is an embedded policy configuration object to be used as the audit policy configuration. If present, it will be used instead of the path to the policy file.",
+ "logFormat": "Format of saved audits (legacy or json).",
+ "webHookKubeConfig": "Path to a .kubeconfig formatted file that defines the audit webhook configuration.",
+ "webHookMode": "Strategy for sending audit events (block or batch).",
+}
+
+func (AuditConfig) SwaggerDoc() map[string]string {
+ return map_AuditConfig
+}
+
+var map_CertInfo = map[string]string{
+ "": "CertInfo relates a certificate with a private key",
+ "certFile": "CertFile is a file containing a PEM-encoded certificate",
+ "keyFile": "KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile",
+}
+
+func (CertInfo) SwaggerDoc() map[string]string {
+ return map_CertInfo
+}
+
+var map_ClientConnectionOverrides = map[string]string{
+ "acceptContentTypes": "acceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the default value of 'application/json'. This field will control all connections to the server used by a particular client.",
+ "contentType": "contentType is the content type used when sending data to the server from this client.",
+ "qps": "qps controls the number of queries per second allowed for this connection.",
+ "burst": "burst allows extra queries to accumulate when a client is exceeding its rate.",
+}
+
+func (ClientConnectionOverrides) SwaggerDoc() map[string]string {
+ return map_ClientConnectionOverrides
+}
+
+var map_ConfigMapFileReference = map[string]string{
+ "": "ConfigMapFileReference references a config map in a specific namespace. The namespace must be specified at the point of use.",
+ "key": "Key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references.",
+}
+
+func (ConfigMapFileReference) SwaggerDoc() map[string]string {
+ return map_ConfigMapFileReference
+}
+
+var map_ConfigMapNameReference = map[string]string{
+ "": "ConfigMapNameReference references a config map in a specific namespace. The namespace must be specified at the point of use.",
+ "name": "name is the metadata.name of the referenced config map",
+}
+
+func (ConfigMapNameReference) SwaggerDoc() map[string]string {
+ return map_ConfigMapNameReference
+}
+
+var map_DelegatedAuthentication = map[string]string{
+ "": "DelegatedAuthentication allows authentication to be disabled.",
+ "disabled": "disabled indicates that authentication should be disabled. By default it will use delegated authentication.",
+}
+
+func (DelegatedAuthentication) SwaggerDoc() map[string]string {
+ return map_DelegatedAuthentication
+}
+
+var map_DelegatedAuthorization = map[string]string{
+ "": "DelegatedAuthorization allows authorization to be disabled.",
+ "disabled": "disabled indicates that authorization should be disabled. By default it will use delegated authorization.",
+}
+
+func (DelegatedAuthorization) SwaggerDoc() map[string]string {
+ return map_DelegatedAuthorization
+}
+
+var map_EtcdConnectionInfo = map[string]string{
+ "": "EtcdConnectionInfo holds information necessary for connecting to an etcd server",
+ "urls": "URLs are the URLs for etcd",
+ "ca": "CA is a file containing trusted roots for the etcd server certificates",
+}
+
+func (EtcdConnectionInfo) SwaggerDoc() map[string]string {
+ return map_EtcdConnectionInfo
+}
+
+var map_EtcdStorageConfig = map[string]string{
+ "storagePrefix": "StoragePrefix is the path within etcd that the OpenShift resources will be rooted under. This value, if changed, will mean existing objects in etcd will no longer be located.",
+}
+
+func (EtcdStorageConfig) SwaggerDoc() map[string]string {
+ return map_EtcdStorageConfig
+}
+
+var map_GenericAPIServerConfig = map[string]string{
+ "": "GenericAPIServerConfig is an inline-able struct for aggregated apiservers that need to store data in etcd",
+ "servingInfo": "servingInfo describes how to start serving",
+ "corsAllowedOrigins": "corsAllowedOrigins",
+ "auditConfig": "auditConfig describes how to configure audit information",
+ "storageConfig": "storageConfig contains information about how to use",
+ "admission": "admissionConfig holds information about how to configure admission.",
+}
+
+func (GenericAPIServerConfig) SwaggerDoc() map[string]string {
+ return map_GenericAPIServerConfig
+}
+
+var map_GenericControllerConfig = map[string]string{
+ "": "GenericControllerConfig provides information to configure a controller",
+ "servingInfo": "ServingInfo is the HTTP serving information for the controller's endpoints",
+ "leaderElection": "leaderElection provides information to elect a leader. Only override this if you have a specific need",
+ "authentication": "authentication allows configuration of authentication for the endpoints",
+ "authorization": "authorization allows configuration of authentication for the endpoints",
+}
+
+func (GenericControllerConfig) SwaggerDoc() map[string]string {
+ return map_GenericControllerConfig
+}
+
+var map_HTTPServingInfo = map[string]string{
+ "": "HTTPServingInfo holds configuration for serving HTTP",
+ "maxRequestsInFlight": "MaxRequestsInFlight is the number of concurrent requests allowed to the server. If zero, no limit.",
+ "requestTimeoutSeconds": "RequestTimeoutSeconds is the number of seconds before requests are timed out. The default is 60 minutes, if -1 there is no limit on requests.",
+}
+
+func (HTTPServingInfo) SwaggerDoc() map[string]string {
+ return map_HTTPServingInfo
+}
+
+var map_KubeClientConfig = map[string]string{
+ "kubeConfig": "kubeConfig is a .kubeconfig filename for going to the owning kube-apiserver. Empty uses an in-cluster-config",
+ "connectionOverrides": "connectionOverrides specifies client overrides for system components to loop back to this master.",
+}
+
+func (KubeClientConfig) SwaggerDoc() map[string]string {
+ return map_KubeClientConfig
+}
+
+var map_LeaderElection = map[string]string{
+ "": "LeaderElection provides information to elect a leader",
+ "disable": "disable allows leader election to be suspended while allowing a fully defaulted \"normal\" startup case.",
+ "namespace": "namespace indicates which namespace the resource is in",
+ "name": "name indicates what name to use for the resource",
+ "leaseDuration": "leaseDuration is the duration that non-leader candidates will wait after observing a leadership renewal until attempting to acquire leadership of a led but unrenewed leader slot. This is effectively the maximum duration that a leader can be stopped before it is replaced by another candidate. This is only applicable if leader election is enabled.",
+ "renewDeadline": "renewDeadline is the interval between attempts by the acting master to renew a leadership slot before it stops leading. This must be less than or equal to the lease duration. This is only applicable if leader election is enabled.",
+ "retryPeriod": "retryPeriod is the duration the clients should wait between attempting acquisition and renewal of a leadership. This is only applicable if leader election is enabled.",
+}
+
+func (LeaderElection) SwaggerDoc() map[string]string {
+ return map_LeaderElection
+}
+
+var map_NamedCertificate = map[string]string{
+ "": "NamedCertificate specifies a certificate/key, and the names it should be served for",
+ "names": "Names is a list of DNS names this certificate should be used to secure A name can be a normal DNS name, or can contain leading wildcard segments.",
+}
+
+func (NamedCertificate) SwaggerDoc() map[string]string {
+ return map_NamedCertificate
+}
+
+var map_RemoteConnectionInfo = map[string]string{
+ "": "RemoteConnectionInfo holds information necessary for establishing a remote connection",
+ "url": "URL is the remote URL to connect to",
+ "ca": "CA is the CA for verifying TLS connections",
+}
+
+func (RemoteConnectionInfo) SwaggerDoc() map[string]string {
+ return map_RemoteConnectionInfo
+}
+
+var map_SecretNameReference = map[string]string{
+ "": "SecretNameReference references a secret in a specific namespace. The namespace must be specified at the point of use.",
+ "name": "name is the metadata.name of the referenced secret",
+}
+
+func (SecretNameReference) SwaggerDoc() map[string]string {
+ return map_SecretNameReference
+}
+
+var map_ServingInfo = map[string]string{
+ "": "ServingInfo holds information about serving web pages",
+ "bindAddress": "BindAddress is the ip:port to serve on",
+ "bindNetwork": "BindNetwork is the type of network to bind to - defaults to \"tcp4\", accepts \"tcp\", \"tcp4\", and \"tcp6\"",
+ "clientCA": "ClientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates",
+ "namedCertificates": "NamedCertificates is a list of certificates to use to secure requests to specific hostnames",
+ "minTLSVersion": "MinTLSVersion is the minimum TLS version supported. Values must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants",
+ "cipherSuites": "CipherSuites contains an overridden list of ciphers for the server to support. Values must match cipher suite IDs from https://golang.org/pkg/crypto/tls/#pkg-constants",
+}
+
+func (ServingInfo) SwaggerDoc() map[string]string {
+ return map_ServingInfo
+}
+
+var map_StringSource = map[string]string{
+ "": "StringSource allows specifying a string inline, or externally via env var or file. When it contains only a string value, it marshals to a simple JSON string.",
+}
+
+func (StringSource) SwaggerDoc() map[string]string {
+ return map_StringSource
+}
+
+var map_StringSourceSpec = map[string]string{
+ "": "StringSourceSpec specifies a string value, or external location",
+ "value": "Value specifies the cleartext value, or an encrypted value if keyFile is specified.",
+ "env": "Env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified.",
+ "file": "File references a file containing the cleartext value, or an encrypted value if a keyFile is specified.",
+ "keyFile": "KeyFile references a file containing the key to use to decrypt the value.",
+}
+
+func (StringSourceSpec) SwaggerDoc() map[string]string {
+ return map_StringSourceSpec
+}
+
+var map_APIServer = map[string]string{
+ "": "APIServer holds configuration (like serving certificates, client CA and CORS domains) shared by all API servers in the system, among them especially kube-apiserver and openshift-apiserver. The canonical name of an instance is 'cluster'.",
+}
+
+func (APIServer) SwaggerDoc() map[string]string {
+ return map_APIServer
+}
+
+var map_APIServerNamedServingCert = map[string]string{
+ "": "APIServerNamedServingCert maps a server DNS name, as understood by a client, to a certificate.",
+ "names": "names is a optional list of explicit DNS names (leading wildcards allowed) that should use this certificate to serve secure traffic. If no names are provided, the implicit names will be extracted from the certificates. Exact names trump over wildcard names. Explicit names defined here trump over extracted implicit names.",
+ "servingCertificate": "servingCertificate references a kubernetes.io/tls type secret containing the TLS cert info for serving secure traffic. The secret must exist in the openshift-config namespace and contain the following required fields: - Secret.Data[\"tls.key\"] - TLS private key. - Secret.Data[\"tls.crt\"] - TLS certificate.",
+}
+
+func (APIServerNamedServingCert) SwaggerDoc() map[string]string {
+ return map_APIServerNamedServingCert
+}
+
+var map_APIServerServingCerts = map[string]string{
+ "namedCertificates": "namedCertificates references secrets containing the TLS cert info for serving secure traffic to specific hostnames. If no named certificates are provided, or no named certificates match the server name as understood by a client, the defaultServingCertificate will be used.",
+}
+
+func (APIServerServingCerts) SwaggerDoc() map[string]string {
+ return map_APIServerServingCerts
+}
+
+var map_APIServerSpec = map[string]string{
+ "servingCerts": "servingCert is the TLS cert info for serving secure traffic. If not specified, operator managed certificates will be used for serving secure traffic.",
+ "clientCA": "clientCA references a ConfigMap containing a certificate bundle for the signers that will be recognized for incoming client certificates in addition to the operator managed signers. If this is empty, then only operator managed signers are valid. You usually only have to set this if you have your own PKI you wish to honor client certificates from. The ConfigMap must exist in the openshift-config namespace and contain the following required fields: - ConfigMap.Data[\"ca-bundle.crt\"] - CA bundle.",
+ "additionalCORSAllowedOrigins": "additionalCORSAllowedOrigins lists additional, user-defined regular expressions describing hosts for which the API server allows access using the CORS headers. This may be needed to access the API and the integrated OAuth server from JavaScript applications. The values are regular expressions that correspond to the Golang regular expression language.",
+}
+
+func (APIServerSpec) SwaggerDoc() map[string]string {
+ return map_APIServerSpec
+}
+
+var map_Authentication = map[string]string{
+ "": "Authentication specifies cluster-wide settings for authentication (like OAuth and webhook token authenticators). The canonical name of an instance is `cluster`.",
+ "metadata": "Standard object's metadata.",
+ "spec": "spec holds user settable values for configuration",
+ "status": "status holds observed values from the cluster. They may not be overridden.",
+}
+
+func (Authentication) SwaggerDoc() map[string]string {
+ return map_Authentication
+}
+
+var map_AuthenticationList = map[string]string{
+ "metadata": "Standard object's metadata.",
+}
+
+func (AuthenticationList) SwaggerDoc() map[string]string {
+ return map_AuthenticationList
+}
+
+var map_AuthenticationSpec = map[string]string{
+ "type": "type identifies the cluster managed, user facing authentication mode in use. Specifically, it manages the component that responds to login attempts. The default is IntegratedOAuth.",
+ "oauthMetadata": "oauthMetadata contains the discovery endpoint data for OAuth 2.0 Authorization Server Metadata for an external OAuth server. This discovery document can be viewed from its served location: oc get --raw '/.well-known/oauth-authorization-server' For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 If oauthMetadata.name is non-empty, this value has precedence over any metadata reference stored in status. The key \"oauthMetadata\" is used to locate the data. If specified and the config map or expected key is not found, no metadata is served. If the specified metadata is not valid, no metadata is served. The namespace for this config map is openshift-config.",
+ "webhookTokenAuthenticators": "webhookTokenAuthenticators configures remote token reviewers. These remote authentication webhooks can be used to verify bearer tokens via the tokenreviews.authentication.k8s.io REST API. This is required to honor bearer tokens that are provisioned by an external authentication service. The namespace for these secrets is openshift-config.",
+}
+
+func (AuthenticationSpec) SwaggerDoc() map[string]string {
+ return map_AuthenticationSpec
+}
+
+var map_AuthenticationStatus = map[string]string{
+ "integratedOAuthMetadata": "integratedOAuthMetadata contains the discovery endpoint data for OAuth 2.0 Authorization Server Metadata for the in-cluster integrated OAuth server. This discovery document can be viewed from its served location: oc get --raw '/.well-known/oauth-authorization-server' For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 This contains the observed value based on cluster state. An explicitly set value in spec.oauthMetadata has precedence over this field. This field has no meaning if authentication spec.type is not set to IntegratedOAuth. The key \"oauthMetadata\" is used to locate the data. If the config map or expected key is not found, no metadata is served. If the specified metadata is not valid, no metadata is served. The namespace for this config map is openshift-config-managed.",
+}
+
+func (AuthenticationStatus) SwaggerDoc() map[string]string {
+ return map_AuthenticationStatus
+}
+
+var map_WebhookTokenAuthenticator = map[string]string{
+ "": "webhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator",
+ "kubeConfig": "kubeConfig contains kube config file data which describes how to access the remote webhook service. For further details, see: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication The key \"kubeConfig\" is used to locate the data. If the secret or expected key is not found, the webhook is not honored. If the specified kube config data is not valid, the webhook is not honored. The namespace for this secret is determined by the point of use.",
+}
+
+func (WebhookTokenAuthenticator) SwaggerDoc() map[string]string {
+ return map_WebhookTokenAuthenticator
+}
+
+var map_Build = map[string]string{
+ "": "Build holds cluster-wide information on how to handle builds. The canonical name is `cluster`",
+ "spec": "Spec holds user-settable values for the build controller configuration",
+}
+
+func (Build) SwaggerDoc() map[string]string {
+ return map_Build
+}
+
+var map_BuildDefaults = map[string]string{
+ "defaultProxy": "DefaultProxy contains the default proxy settings for all build operations, including image pull/push and source download.\n\nValues can be overrode by setting the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables in the build config's strategy.",
+ "gitProxy": "GitProxy contains the proxy settings for git operations only. If set, this will override any Proxy settings for all git commands, such as git clone.\n\nValues that are not set here will be inherited from DefaultProxy.",
+ "env": "Env is a set of default environment variables that will be applied to the build if the specified variables do not exist on the build",
+ "imageLabels": "ImageLabels is a list of docker labels that are applied to the resulting image. User can override a default label by providing a label with the same name in their Build/BuildConfig.",
+ "resources": "Resources defines resource requirements to execute the build.",
+}
+
+func (BuildDefaults) SwaggerDoc() map[string]string {
+ return map_BuildDefaults
+}
+
+var map_BuildList = map[string]string{
+ "metadata": "Standard object's metadata.",
+}
+
+func (BuildList) SwaggerDoc() map[string]string {
+ return map_BuildList
+}
+
+var map_BuildOverrides = map[string]string{
+ "imageLabels": "ImageLabels is a list of docker labels that are applied to the resulting image. If user provided a label in their Build/BuildConfig with the same name as one in this list, the user's label will be overwritten.",
+ "nodeSelector": "NodeSelector is a selector which must be true for the build pod to fit on a node",
+ "tolerations": "Tolerations is a list of Tolerations that will override any existing tolerations set on a build pod.",
+}
+
+func (BuildOverrides) SwaggerDoc() map[string]string {
+ return map_BuildOverrides
+}
+
+var map_BuildSpec = map[string]string{
+ "additionalTrustedCA": "AdditionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted for image pushes and pulls during builds. The namespace for this config map is openshift-config.",
+ "buildDefaults": "BuildDefaults controls the default information for Builds",
+ "buildOverrides": "BuildOverrides controls override settings for builds",
+}
+
+func (BuildSpec) SwaggerDoc() map[string]string {
+ return map_BuildSpec
+}
+
+var map_ImageLabel = map[string]string{
+ "name": "Name defines the name of the label. It must have non-zero length.",
+ "value": "Value defines the literal value of the label.",
+}
+
+func (ImageLabel) SwaggerDoc() map[string]string {
+ return map_ImageLabel
+}
+
+var map_ClusterOperator = map[string]string{
+ "": "ClusterOperator is the Custom Resource object which holds the current state of an operator. This object is used by operators to convey their state to the rest of the cluster.",
+ "spec": "spec hold the intent of how this operator should behave.",
+ "status": "status holds the information about the state of an operator. It is consistent with status information across the kube ecosystem.",
+}
+
+func (ClusterOperator) SwaggerDoc() map[string]string {
+ return map_ClusterOperator
+}
+
+var map_ClusterOperatorList = map[string]string{
+ "": "ClusterOperatorList is a list of OperatorStatus resources.",
+}
+
+func (ClusterOperatorList) SwaggerDoc() map[string]string {
+ return map_ClusterOperatorList
+}
+
+var map_ClusterOperatorSpec = map[string]string{
+ "": "ClusterOperatorSpec is empty for now, but you could imagine holding information like \"pause\".",
+}
+
+func (ClusterOperatorSpec) SwaggerDoc() map[string]string {
+ return map_ClusterOperatorSpec
+}
+
+var map_ClusterOperatorStatus = map[string]string{
+ "": "ClusterOperatorStatus provides information about the status of the operator.",
+ "conditions": "conditions describes the state of the operator's reconciliation functionality.",
+ "versions": "versions is a slice of operand version tuples. Operators which manage multiple operands will have multiple entries in the array. If an operator is Available, it must have at least one entry. You must report the version of the operator itself with the name \"operator\".",
+ "relatedObjects": "relatedObjects is a list of objects that are \"interesting\" or related to this operator. Common uses are: 1. the detailed resource driving the operator 2. operator namespaces 3. operand namespaces",
+ "extension": "extension contains any additional status information specific to the operator which owns this status object.",
+}
+
+func (ClusterOperatorStatus) SwaggerDoc() map[string]string {
+ return map_ClusterOperatorStatus
+}
+
+var map_ClusterOperatorStatusCondition = map[string]string{
+ "": "ClusterOperatorStatusCondition represents the state of the operator's reconciliation functionality.",
+ "type": "type specifies the state of the operator's reconciliation functionality.",
+ "status": "status of the condition, one of True, False, Unknown.",
+ "lastTransitionTime": "lastTransitionTime is the time of the last update to the current status object.",
+ "reason": "reason is the reason for the condition's last transition. Reasons are CamelCase",
+ "message": "message provides additional information about the current condition. This is only to be consumed by humans.",
+}
+
+func (ClusterOperatorStatusCondition) SwaggerDoc() map[string]string {
+ return map_ClusterOperatorStatusCondition
+}
+
+var map_ObjectReference = map[string]string{
+ "": "ObjectReference contains enough information to let you inspect or modify the referred object.",
+ "group": "group of the referent.",
+ "resource": "resource of the referent.",
+ "namespace": "namespace of the referent.",
+ "name": "name of the referent.",
+}
+
+func (ObjectReference) SwaggerDoc() map[string]string {
+ return map_ObjectReference
+}
+
+var map_OperandVersion = map[string]string{
+ "name": "name is the name of the particular operand this version is for. It usually matches container images, not operators.",
+ "version": "version indicates which version of a particular operand is currently being manage. It must always match the Available condition. If 1.0.0 is Available, then this must indicate 1.0.0 even if the operator is trying to rollout 1.1.0",
+}
+
+func (OperandVersion) SwaggerDoc() map[string]string {
+ return map_OperandVersion
+}
+
+var map_ClusterVersion = map[string]string{
+ "": "ClusterVersion is the configuration for the ClusterVersionOperator. This is where parameters related to automatic updates can be set.",
+ "spec": "spec is the desired state of the cluster version - the operator will work to ensure that the desired version is applied to the cluster.",
+ "status": "status contains information about the available updates and any in-progress updates.",
+}
+
+func (ClusterVersion) SwaggerDoc() map[string]string {
+ return map_ClusterVersion
+}
+
+var map_ClusterVersionList = map[string]string{
+ "": "ClusterVersionList is a list of ClusterVersion resources.",
+}
+
+func (ClusterVersionList) SwaggerDoc() map[string]string {
+ return map_ClusterVersionList
+}
+
+var map_ClusterVersionSpec = map[string]string{
+ "": "ClusterVersionSpec is the desired version state of the cluster. It includes the version the cluster should be at, how the cluster is identified, and where the cluster should look for version updates.",
+ "clusterID": "clusterID uniquely identifies this cluster. This is expected to be an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx in hexadecimal values). This is a required field.",
+ "desiredUpdate": "desiredUpdate is an optional field that indicates the desired value of the cluster version. Setting this value will trigger an upgrade (if the current version does not match the desired version). The set of recommended update values is listed as part of available updates in status, and setting values outside that range may cause the upgrade to fail. You may specify the version field without setting image if an update exists with that version in the availableUpdates or history.\n\nIf an upgrade fails the operator will halt and report status about the failing component. Setting the desired update value back to the previous version will cause a rollback to be attempted. Not all rollbacks will succeed.",
+ "upstream": "upstream may be used to specify the preferred update server. By default it will use the appropriate update server for the cluster and region.",
+ "channel": "channel is an identifier for explicitly requesting that a non-default set of updates be applied to this cluster. The default channel will be contain stable updates that are appropriate for production clusters.",
+ "overrides": "overrides is list of overides for components that are managed by cluster version operator. Marking a component unmanaged will prevent the operator from creating or updating the object.",
+}
+
+func (ClusterVersionSpec) SwaggerDoc() map[string]string {
+ return map_ClusterVersionSpec
+}
+
+var map_ClusterVersionStatus = map[string]string{
+ "": "ClusterVersionStatus reports the status of the cluster versioning, including any upgrades that are in progress. The current field will be set to whichever version the cluster is reconciling to, and the conditions array will report whether the update succeeded, is in progress, or is failing.",
+ "desired": "desired is the version that the cluster is reconciling towards. If the cluster is not yet fully initialized desired will be set with the information available, which may be an image or a tag.",
+ "history": "history contains a list of the most recent versions applied to the cluster. This value may be empty during cluster startup, and then will be updated when a new update is being applied. The newest update is first in the list and it is ordered by recency. Updates in the history have state Completed if the rollout completed - if an update was failing or halfway applied the state will be Partial. Only a limited amount of update history is preserved.",
+ "observedGeneration": "observedGeneration reports which version of the spec is being synced. If this value is not equal to metadata.generation, then the desired and conditions fields may represent from a previous version.",
+ "versionHash": "versionHash is a fingerprint of the content that the cluster will be updated with. It is used by the operator to avoid unnecessary work and is for internal use only.",
+ "conditions": "conditions provides information about the cluster version. The condition \"Available\" is set to true if the desiredUpdate has been reached. The condition \"Progressing\" is set to true if an update is being applied. The condition \"Degraded\" is set to true if an update is currently blocked by a temporary or permanent error. Conditions are only valid for the current desiredUpdate when metadata.generation is equal to status.generation.",
+ "availableUpdates": "availableUpdates contains the list of updates that are appropriate for this cluster. This list may be empty if no updates are recommended, if the update service is unavailable, or if an invalid channel has been specified.",
+}
+
+func (ClusterVersionStatus) SwaggerDoc() map[string]string {
+ return map_ClusterVersionStatus
+}
+
+var map_ComponentOverride = map[string]string{
+ "": "ComponentOverride allows overriding cluster version operator's behavior for a component.",
+ "kind": "kind indentifies which object to override.",
+ "group": "group identifies the API group that the kind is in.",
+ "namespace": "namespace is the component's namespace. If the resource is cluster scoped, the namespace should be empty.",
+ "name": "name is the component's name.",
+ "unmanaged": "unmanaged controls if cluster version operator should stop managing the resources in this cluster. Default: false",
+}
+
+func (ComponentOverride) SwaggerDoc() map[string]string {
+ return map_ComponentOverride
+}
+
+var map_Update = map[string]string{
+ "": "Update represents a release of the ClusterVersionOperator, referenced by the Image member.",
+ "version": "version is a semantic versioning identifying the update version. When this field is part of spec, version is optional if image is specified.",
+ "image": "image is a container image location that contains the update. When this field is part of spec, image is optional if version is specified and the availableUpdates field contains a matching version.",
+ "force": "force allows an administrator to update to an image that has failed verification, does not appear in the availableUpdates list, or otherwise would be blocked by normal protections on update. This option should only be used when the authenticity of the provided image has been verified out of band because the provided image will run with full administrative access to the cluster. Do not use this flag with images that comes from unknown or potentially malicious sources.\n\nThis flag does not override other forms of consistency checking that are required before a new update is deployed.",
+}
+
+func (Update) SwaggerDoc() map[string]string {
+ return map_Update
+}
+
+var map_UpdateHistory = map[string]string{
+ "": "UpdateHistory is a single attempted update to the cluster.",
+ "state": "state reflects whether the update was fully applied. The Partial state indicates the update is not fully applied, while the Completed state indicates the update was successfully rolled out at least once (all parts of the update successfully applied).",
+ "startedTime": "startedTime is the time at which the update was started.",
+ "completionTime": "completionTime, if set, is when the update was fully applied. The update that is currently being applied will have a null completion time. Completion time will always be set for entries that are not the current update (usually to the started time of the next update).",
+ "version": "version is a semantic versioning identifying the update version. If the requested image does not define a version, or if a failure occurs retrieving the image, this value may be empty.",
+ "image": "image is a container image location that contains the update. This value is always populated.",
+ "verified": "verified indicates whether the provided update was properly verified before it was installed. If this is false the cluster may not be trusted.",
+}
+
+func (UpdateHistory) SwaggerDoc() map[string]string {
+ return map_UpdateHistory
+}
+
+var map_Console = map[string]string{
+ "": "Console holds cluster-wide configuration for the web console, including the logout URL, and reports the public URL of the console. The canonical name is `cluster`.",
+ "metadata": "Standard object's metadata.",
+ "spec": "spec holds user settable values for configuration",
+ "status": "status holds observed values from the cluster. They may not be overridden.",
+}
+
+func (Console) SwaggerDoc() map[string]string {
+ return map_Console
+}
+
+var map_ConsoleAuthentication = map[string]string{
+ "": "ConsoleAuthentication defines a list of optional configuration for console authentication.",
+ "logoutRedirect": "An optional, absolute URL to redirect web browsers to after logging out of the console. If not specified, it will redirect to the default login page. This is required when using an identity provider that supports single sign-on (SSO) such as: - OpenID (Keycloak, Azure) - RequestHeader (GSSAPI, SSPI, SAML) - OAuth (GitHub, GitLab, Google) Logging out of the console will destroy the user's token. The logoutRedirect provides the user the option to perform single logout (SLO) through the identity provider to destroy their single sign-on session.",
+}
+
+func (ConsoleAuthentication) SwaggerDoc() map[string]string {
+ return map_ConsoleAuthentication
+}
+
+var map_ConsoleList = map[string]string{
+ "metadata": "Standard object's metadata.",
+}
+
+func (ConsoleList) SwaggerDoc() map[string]string {
+ return map_ConsoleList
+}
+
+var map_ConsoleSpec = map[string]string{
+ "": "ConsoleSpec is the specification of the desired behavior of the Console.",
+}
+
+func (ConsoleSpec) SwaggerDoc() map[string]string {
+ return map_ConsoleSpec
+}
+
+var map_ConsoleStatus = map[string]string{
+ "": "ConsoleStatus defines the observed status of the Console.",
+ "consoleURL": "The URL for the console. This will be derived from the host for the route that is created for the console.",
+}
+
+func (ConsoleStatus) SwaggerDoc() map[string]string {
+ return map_ConsoleStatus
+}
+
+var map_DNS = map[string]string{
+ "": "DNS holds cluster-wide information about DNS. The canonical name is `cluster`",
+ "metadata": "Standard object's metadata.",
+ "spec": "spec holds user settable values for configuration",
+ "status": "status holds observed values from the cluster. They may not be overridden.",
+}
+
+func (DNS) SwaggerDoc() map[string]string {
+ return map_DNS
+}
+
+var map_DNSList = map[string]string{
+ "metadata": "Standard object's metadata.",
+}
+
+func (DNSList) SwaggerDoc() map[string]string {
+ return map_DNSList
+}
+
+var map_DNSSpec = map[string]string{
+ "baseDomain": "baseDomain is the base domain of the cluster. All managed DNS records will be sub-domains of this base.\n\nFor example, given the base domain `openshift.example.com`, an API server DNS record may be created for `cluster-api.openshift.example.com`.\n\nOnce set, this field cannot be changed.",
+ "publicZone": "publicZone is the location where all the DNS records that are publicly accessible to the internet exist.\n\nIf this field is nil, no public records should be created.\n\nOnce set, this field cannot be changed.",
+ "privateZone": "privateZone is the location where all the DNS records that are only available internally to the cluster exist.\n\nIf this field is nil, no private records should be created.\n\nOnce set, this field cannot be changed.",
+}
+
+func (DNSSpec) SwaggerDoc() map[string]string {
+ return map_DNSSpec
+}
+
+var map_DNSZone = map[string]string{
+ "": "DNSZone is used to define a DNS hosted zone. A zone can be identified by an ID or tags.",
+ "id": "id is the identifier that can be used to find the DNS hosted zone.\n\non AWS zone can be fetched using `ID` as id in [1] on Azure zone can be fetched using `ID` as a pre-determined name in [2], on GCP zone can be fetched using `ID` as a pre-determined name in [3].\n\n[1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get",
+ "tags": "tags can be used to query the DNS hosted zone.\n\non AWS, resourcegroupstaggingapi [1] can be used to fetch a zone using `Tags` as tag-filters,\n\n[1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options",
+}
+
+func (DNSZone) SwaggerDoc() map[string]string {
+ return map_DNSZone
+}
+
+var map_CustomFeatureGates = map[string]string{
+ "enabled": "enabled is a list of all feature gates that you want to force on",
+ "disabled": "disabled is a list of all feature gates that you want to force off",
+}
+
+func (CustomFeatureGates) SwaggerDoc() map[string]string {
+ return map_CustomFeatureGates
+}
+
+var map_FeatureGate = map[string]string{
+ "": "Feature holds cluster-wide information about feature gates. The canonical name is `cluster`",
+ "metadata": "Standard object's metadata.",
+ "spec": "spec holds user settable values for configuration",
+ "status": "status holds observed values from the cluster. They may not be overridden.",
+}
+
+func (FeatureGate) SwaggerDoc() map[string]string {
+ return map_FeatureGate
+}
+
+var map_FeatureGateList = map[string]string{
+ "metadata": "Standard object's metadata.",
+}
+
+func (FeatureGateList) SwaggerDoc() map[string]string {
+ return map_FeatureGateList
+}
+
+var map_FeatureGateSelection = map[string]string{
+ "featureSet": "featureSet changes the list of features in the cluster. The default is empty. Be very careful adjusting this setting. Turning on or off features may cause irreversible changes in your cluster which cannot be undone.",
+ "customNoUpgrade": "customNoUpgrade allows the enabling or disabling of any feature. Turning this feature set on IS NOT SUPPORTED, CANNOT BE UNDONE, and PREVENTS UPGRADES. Because of its nature, this setting cannot be validated. If you have any typos or accidentally apply invalid combinations your cluster may fail in an unrecoverable way. featureSet must equal \"CustomNoUpgrade\" must be set to use this field.",
+}
+
+func (FeatureGateSelection) SwaggerDoc() map[string]string {
+ return map_FeatureGateSelection
+}
+
+var map_Image = map[string]string{
+ "": "Image holds cluster-wide information about how to handle images. The canonical name is `cluster`",
+ "metadata": "Standard object's metadata.",
+ "spec": "spec holds user settable values for configuration",
+ "status": "status holds observed values from the cluster. They may not be overridden.",
+}
+
+func (Image) SwaggerDoc() map[string]string {
+ return map_Image
+}
+
+var map_ImageList = map[string]string{
+ "metadata": "Standard object's metadata.",
+}
+
+func (ImageList) SwaggerDoc() map[string]string {
+ return map_ImageList
+}
+
+var map_ImageSpec = map[string]string{
+ "allowedRegistriesForImport": "AllowedRegistriesForImport limits the container image registries that normal users may import images from. Set this list to the registries that you trust to contain valid Docker images and that you want applications to be able to import from. Users with permission to create Images or ImageStreamMappings via the API are not affected by this policy - typically only administrators or system integrations will have those permissions.",
+ "externalRegistryHostnames": "externalRegistryHostnames provides the hostnames for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The first value is used in 'publicDockerImageRepository' field in ImageStreams. The value must be in \"hostname[:port]\" format.",
+ "additionalTrustedCA": "AdditionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted during imagestream import, pod image pull, and imageregistry pullthrough. The namespace for this config map is openshift-config.",
+ "registrySources": "RegistrySources contains configuration that determines how the container runtime should treat individual registries when accessing images for builds+pods. (e.g. whether or not to allow insecure access). It does not contain configuration for the internal cluster registry.",
+}
+
+func (ImageSpec) SwaggerDoc() map[string]string {
+ return map_ImageSpec
+}
+
+var map_ImageStatus = map[string]string{
+ "internalRegistryHostname": "this value is set by the image registry operator which controls the internal registry hostname InternalRegistryHostname sets the hostname for the default internal image registry. The value must be in \"hostname[:port]\" format. For backward compatibility, users can still use OPENSHIFT_DEFAULT_REGISTRY environment variable but this setting overrides the environment variable.",
+ "externalRegistryHostnames": "externalRegistryHostnames provides the hostnames for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The first value is used in 'publicDockerImageRepository' field in ImageStreams. The value must be in \"hostname[:port]\" format.",
+}
+
+func (ImageStatus) SwaggerDoc() map[string]string {
+ return map_ImageStatus
+}
+
+var map_RegistryLocation = map[string]string{
+ "": "RegistryLocation contains a location of the registry specified by the registry domain name. The domain name might include wildcards, like '*' or '??'.",
+ "domainName": "DomainName specifies a domain name for the registry In case the registry use non-standard (80 or 443) port, the port should be included in the domain name as well.",
+ "insecure": "Insecure indicates whether the registry is secure (https) or insecure (http) By default (if not specified) the registry is assumed as secure.",
+}
+
+func (RegistryLocation) SwaggerDoc() map[string]string {
+ return map_RegistryLocation
+}
+
+var map_RegistrySources = map[string]string{
+ "": "RegistrySources holds cluster-wide information about how to handle the registries config.",
+ "insecureRegistries": "InsecureRegistries are registries which do not have a valid TLS certificates or only support HTTP connections.",
+ "blockedRegistries": "BlockedRegistries are blacklisted from image pull/push. All other registries are allowed.\n\nOnly one of BlockedRegistries or AllowedRegistries may be set.",
+ "allowedRegistries": "AllowedRegistries are whitelisted for image pull/push. All other registries are blocked.\n\nOnly one of BlockedRegistries or AllowedRegistries may be set.",
+}
+
+func (RegistrySources) SwaggerDoc() map[string]string {
+ return map_RegistrySources
+}
+
+var map_AWSPlatformStatus = map[string]string{
+ "": "AWSPlatformStatus holds the current status of the Amazon Web Services infrastructure provider.",
+ "region": "region holds the default AWS region for new AWS resources created by the cluster.",
+}
+
+func (AWSPlatformStatus) SwaggerDoc() map[string]string {
+ return map_AWSPlatformStatus
+}
+
+var map_AzurePlatformStatus = map[string]string{
+ "": "AzurePlatformStatus holds the current status of the Azure infrastructure provider.",
+ "resourceGroupName": "resourceGroupName is the Resource Group for new Azure resources created for the cluster.",
+}
+
+func (AzurePlatformStatus) SwaggerDoc() map[string]string {
+ return map_AzurePlatformStatus
+}
+
+var map_BareMetalPlatformStatus = map[string]string{
+ "": "BareMetalPlatformStatus holds the current status of the BareMetal infrastructure provider.",
+ "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.",
+ "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.",
+ "nodeDNSIP": "nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for BareMetal deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster.",
+}
+
+func (BareMetalPlatformStatus) SwaggerDoc() map[string]string {
+ return map_BareMetalPlatformStatus
+}
+
+var map_GCPPlatformStatus = map[string]string{
+ "": "GCPPlatformStatus holds the current status of the Google Cloud Platform infrastructure provider.",
+ "projectID": "resourceGroupName is the Project ID for new GCP resources created for the cluster.",
+ "region": "region holds the region for new GCP resources created for the cluster.",
+}
+
+func (GCPPlatformStatus) SwaggerDoc() map[string]string {
+ return map_GCPPlatformStatus
+}
+
+var map_Infrastructure = map[string]string{
+ "": "Infrastructure holds cluster-wide information about Infrastructure. The canonical name is `cluster`",
+ "metadata": "Standard object's metadata.",
+ "spec": "spec holds user settable values for configuration",
+ "status": "status holds observed values from the cluster. They may not be overridden.",
+}
+
+func (Infrastructure) SwaggerDoc() map[string]string {
+ return map_Infrastructure
+}
+
+var map_InfrastructureList = map[string]string{
+ "": "InfrastructureList is",
+ "metadata": "Standard object's metadata.",
+}
+
+func (InfrastructureList) SwaggerDoc() map[string]string {
+ return map_InfrastructureList
+}
+
+var map_InfrastructureSpec = map[string]string{
+ "": "InfrastructureSpec contains settings that apply to the cluster infrastructure.",
+ "cloudConfig": "cloudConfig is a reference to a ConfigMap containing the cloud provider configuration file. This configuration file is used to configure the Kubernetes cloud provider integration when using the built-in cloud provider integration or the external cloud controller manager. The namespace for this config map is openshift-config.",
+}
+
+func (InfrastructureSpec) SwaggerDoc() map[string]string {
+ return map_InfrastructureSpec
+}
+
+var map_InfrastructureStatus = map[string]string{
+ "": "InfrastructureStatus describes the infrastructure the cluster is leveraging.",
+ "infrastructureName": "infrastructureName uniquely identifies a cluster with a human friendly name. Once set it should not be changed. Must be of max length 27 and must have only alphanumeric or hyphen characters.",
+ "platform": "platform is the underlying infrastructure provider for the cluster.\n\nDeprecated: Use platformStatus.type instead.",
+ "platformStatus": "platformStatus holds status information specific to the underlying infrastructure provider.",
+ "etcdDiscoveryDomain": "etcdDiscoveryDomain is the domain used to fetch the SRV records for discovering etcd servers and clients. For more info: https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery",
+ "apiServerURL": "apiServerURL is a valid URI with scheme(http/https), address and port. apiServerURL can be used by components like the web console to tell users where to find the Kubernetes API.",
+ "apiServerInternalURI": "apiServerInternalURL is a valid URI with scheme(http/https), address and port. apiServerInternalURL can be used by components like kubelets, to contact the Kubernetes API server using the infrastructure provider rather than Kubernetes networking.",
+}
+
+func (InfrastructureStatus) SwaggerDoc() map[string]string {
+ return map_InfrastructureStatus
+}
+
+var map_OpenStackPlatformStatus = map[string]string{
+ "": "OpenStackPlatformStatus holds the current status of the OpenStack infrastructure provider.",
+ "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.",
+ "cloudName": "cloudName is the name of the desired OpenStack cloud in the client configuration file (`clouds.yaml`).",
+ "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.",
+ "nodeDNSIP": "nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for OpenStack deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster.",
+}
+
+func (OpenStackPlatformStatus) SwaggerDoc() map[string]string {
+ return map_OpenStackPlatformStatus
+}
+
+var map_PlatformStatus = map[string]string{
+ "": "PlatformStatus holds the current status specific to the underlying infrastructure provider of the current cluster. Since these are used at status-level for the underlying cluster, it is supposed that only one of the status structs is set.",
+ "type": "type is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", \"OpenStack\", \"VSphere\", \"oVirt\", and \"None\". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform.",
+ "aws": "AWS contains settings specific to the Amazon Web Services infrastructure provider.",
+ "azure": "Azure contains settings specific to the Azure infrastructure provider.",
+ "gcp": "GCP contains settings specific to the Google Cloud Platform infrastructure provider.",
+ "baremetal": "BareMetal contains settings specific to the BareMetal platform.",
+ "openstack": "OpenStack contains settings specific to the OpenStack infrastructure provider.",
+}
+
+func (PlatformStatus) SwaggerDoc() map[string]string {
+ return map_PlatformStatus
+}
+
+var map_Ingress = map[string]string{
+ "": "Ingress holds cluster-wide information about Ingress. The canonical name is `cluster`",
+ "metadata": "Standard object's metadata.",
+ "spec": "spec holds user settable values for configuration",
+ "status": "status holds observed values from the cluster. They may not be overridden.",
+}
+
+func (Ingress) SwaggerDoc() map[string]string {
+ return map_Ingress
+}
+
+var map_IngressList = map[string]string{
+ "metadata": "Standard object's metadata.",
+}
+
+func (IngressList) SwaggerDoc() map[string]string {
+ return map_IngressList
+}
+
+var map_IngressSpec = map[string]string{
+ "domain": "domain is used to generate a default host name for a route when the route's host name is empty. The generated host name will follow this pattern: \"<route-name>.<route-namespace>.<domain>\".",
+}
+
+func (IngressSpec) SwaggerDoc() map[string]string {
+ return map_IngressSpec
+}
+
+var map_ClusterNetworkEntry = map[string]string{
+ "": "ClusterNetworkEntry is a contiguous block of IP addresses from which pod IPs are allocated.",
+ "cidr": "The complete block for pod IPs.",
+ "hostPrefix": "The size (prefix) of block to allocate to each node.",
+}
+
+func (ClusterNetworkEntry) SwaggerDoc() map[string]string {
+ return map_ClusterNetworkEntry
+}
+
+var map_ExternalIPConfig = map[string]string{
+ "": "ExternalIPConfig specifies some IP blocks relevant for the ExternalIP field of a Service resource.",
+ "policy": "policy is a set of restrictions applied to the ExternalIP field. If nil, any value is allowed for an ExternalIP. If the empty/zero policy is supplied, then ExternalIP is not allowed to be set.",
+ "autoAssignCIDRs": "autoAssignCIDRs is a list of CIDRs from which to automatically assign Service.ExternalIP. These are assigned when the service is of type LoadBalancer. In general, this is only useful for bare-metal clusters. In Openshift 3.x, this was misleadingly called \"IngressIPs\". Automatically assigned External IPs are not affected by any ExternalIPPolicy rules. Currently, only one entry may be provided.",
+}
+
+func (ExternalIPConfig) SwaggerDoc() map[string]string {
+ return map_ExternalIPConfig
+}
+
+var map_ExternalIPPolicy = map[string]string{
+ "": "ExternalIPPolicy configures exactly which IPs are allowed for the ExternalIP field in a Service. If the zero struct is supplied, then none are permitted. The policy controller always allows automatically assigned external IPs.",
+ "allowedCIDRs": "allowedCIDRs is the list of allowed CIDRs.",
+ "rejectedCIDRs": "rejectedCIDRs is the list of disallowed CIDRs. These take precedence over allowedCIDRs.",
+}
+
+func (ExternalIPPolicy) SwaggerDoc() map[string]string {
+ return map_ExternalIPPolicy
+}
+
+var map_Network = map[string]string{
+ "": "Network holds cluster-wide information about Network. The canonical name is `cluster`",
+ "metadata": "Standard object's metadata.",
+ "spec": "spec holds user settable values for configuration.",
+ "status": "status holds observed values from the cluster. They may not be overridden.",
+}
+
+func (Network) SwaggerDoc() map[string]string {
+ return map_Network
+}
+
+var map_NetworkList = map[string]string{
+ "metadata": "Standard object's metadata.",
+}
+
+func (NetworkList) SwaggerDoc() map[string]string {
+ return map_NetworkList
+}
+
+var map_NetworkSpec = map[string]string{
+ "": "NetworkSpec is the desired network configuration. As a general rule, this SHOULD NOT be read directly. Instead, you should consume the NetworkStatus, as it indicates the currently deployed configuration. Currently, changing ClusterNetwork, ServiceNetwork, or NetworkType after installation is not supported.",
+ "clusterNetwork": "IP address pool to use for pod IPs.",
+ "serviceNetwork": "IP address pool for services. Currently, we only support a single entry here.",
+ "networkType": "NetworkType is the plugin that is to be deployed (e.g. OpenShiftSDN). This should match a value that the cluster-network-operator understands, or else no networking will be installed. Currently supported values are: - OpenShiftSDN",
+ "externalIP": "externalIP defines configuration for controllers that affect Service.ExternalIP",
+}
+
+func (NetworkSpec) SwaggerDoc() map[string]string {
+ return map_NetworkSpec
+}
+
+var map_NetworkStatus = map[string]string{
+ "": "NetworkStatus is the current network configuration.",
+ "clusterNetwork": "IP address pool to use for pod IPs.",
+ "serviceNetwork": "IP address pool for services. Currently, we only support a single entry here.",
+ "networkType": "NetworkType is the plugin that is deployed (e.g. OpenShiftSDN).",
+ "clusterNetworkMTU": "ClusterNetworkMTU is the MTU for inter-pod networking.",
+}
+
+func (NetworkStatus) SwaggerDoc() map[string]string {
+ return map_NetworkStatus
+}
+
+var map_BasicAuthIdentityProvider = map[string]string{
+ "": "BasicAuthPasswordIdentityProvider provides identities for users authenticating using HTTP basic auth credentials",
+}
+
+func (BasicAuthIdentityProvider) SwaggerDoc() map[string]string {
+ return map_BasicAuthIdentityProvider
+}
+
+var map_GitHubIdentityProvider = map[string]string{
+ "": "GitHubIdentityProvider provides identities for users authenticating using GitHub credentials",
+ "clientID": "clientID is the oauth client ID",
+ "clientSecret": "clientSecret is a required reference to the secret by name containing the oauth client secret. The key \"clientSecret\" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config.",
+ "organizations": "organizations optionally restricts which organizations are allowed to log in",
+ "teams": "teams optionally restricts which teams are allowed to log in. Format is <org>/<team>.",
+ "hostname": "hostname is the optional domain (e.g. \"mycompany.com\") for use with a hosted instance of GitHub Enterprise. It must match the GitHub Enterprise settings value configured at /setup/settings#hostname.",
+ "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca.crt\" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. This can only be configured when hostname is set to a non-empty value. The namespace for this config map is openshift-config.",
+}
+
+func (GitHubIdentityProvider) SwaggerDoc() map[string]string {
+ return map_GitHubIdentityProvider
+}
+
+var map_GitLabIdentityProvider = map[string]string{
+ "": "GitLabIdentityProvider provides identities for users authenticating using GitLab credentials",
+ "clientID": "clientID is the oauth client ID",
+ "clientSecret": "clientSecret is a required reference to the secret by name containing the oauth client secret. The key \"clientSecret\" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config.",
+ "url": "url is the oauth server base URL",
+ "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca.crt\" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config.",
+}
+
+func (GitLabIdentityProvider) SwaggerDoc() map[string]string {
+ return map_GitLabIdentityProvider
+}
+
+var map_GoogleIdentityProvider = map[string]string{
+ "": "GoogleIdentityProvider provides identities for users authenticating using Google credentials",
+ "clientID": "clientID is the oauth client ID",
+ "clientSecret": "clientSecret is a required reference to the secret by name containing the oauth client secret. The key \"clientSecret\" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config.",
+ "hostedDomain": "hostedDomain is the optional Google App domain (e.g. \"mycompany.com\") to restrict logins to",
+}
+
+func (GoogleIdentityProvider) SwaggerDoc() map[string]string {
+ return map_GoogleIdentityProvider
+}
+
+var map_HTPasswdIdentityProvider = map[string]string{
+ "": "HTPasswdPasswordIdentityProvider provides identities for users authenticating using htpasswd credentials",
+ "fileData": "fileData is a required reference to a secret by name containing the data to use as the htpasswd file. The key \"htpasswd\" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. If the specified htpasswd data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config.",
+}
+
+func (HTPasswdIdentityProvider) SwaggerDoc() map[string]string {
+ return map_HTPasswdIdentityProvider
+}
+
+var map_IdentityProvider = map[string]string{
+ "": "IdentityProvider provides identities for users authenticating using credentials",
+ "name": "name is used to qualify the identities returned by this provider. - It MUST be unique and not shared by any other identity provider used - It MUST be a valid path segment: name cannot equal \".\" or \"..\" or contain \"/\" or \"%\" or \":\"\n Ref: https://godoc.org/github.com/openshift/origin/pkg/user/apis/user/validation#ValidateIdentityProviderName",
+ "mappingMethod": "mappingMethod determines how identities from this provider are mapped to users Defaults to \"claim\"",
+}
+
+func (IdentityProvider) SwaggerDoc() map[string]string {
+ return map_IdentityProvider
+}
+
+var map_IdentityProviderConfig = map[string]string{
+ "": "IdentityProviderConfig contains configuration for using a specific identity provider",
+ "type": "type identifies the identity provider type for this entry.",
+ "basicAuth": "basicAuth contains configuration options for the BasicAuth IdP",
+ "github": "github enables user authentication using GitHub credentials",
+ "gitlab": "gitlab enables user authentication using GitLab credentials",
+ "google": "google enables user authentication using Google credentials",
+ "htpasswd": "htpasswd enables user authentication using an HTPasswd file to validate credentials",
+ "keystone": "keystone enables user authentication using keystone password credentials",
+ "ldap": "ldap enables user authentication using LDAP credentials",
+ "openID": "openID enables user authentication using OpenID credentials",
+ "requestHeader": "requestHeader enables user authentication using request header credentials",
+}
+
+func (IdentityProviderConfig) SwaggerDoc() map[string]string {
+ return map_IdentityProviderConfig
+}
+
+var map_KeystoneIdentityProvider = map[string]string{
+ "": "KeystonePasswordIdentityProvider provides identities for users authenticating using keystone password credentials",
+ "domainName": "domainName is required for keystone v3",
+}
+
+func (KeystoneIdentityProvider) SwaggerDoc() map[string]string {
+ return map_KeystoneIdentityProvider
+}
+
+var map_LDAPAttributeMapping = map[string]string{
+ "": "LDAPAttributeMapping maps LDAP attributes to OpenShift identity fields",
+ "id": "id is the list of attributes whose values should be used as the user ID. Required. First non-empty attribute is used. At least one attribute is required. If none of the listed attribute have a value, authentication fails. LDAP standard identity attribute is \"dn\"",
+ "preferredUsername": "preferredUsername is the list of attributes whose values should be used as the preferred username. LDAP standard login attribute is \"uid\"",
+ "name": "name is the list of attributes whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity LDAP standard display name attribute is \"cn\"",
+ "email": "email is the list of attributes whose values should be used as the email address. Optional. If unspecified, no email is set for the identity",
+}
+
+func (LDAPAttributeMapping) SwaggerDoc() map[string]string {
+ return map_LDAPAttributeMapping
+}
+
+var map_LDAPIdentityProvider = map[string]string{
+ "": "LDAPPasswordIdentityProvider provides identities for users authenticating using LDAP credentials",
+ "url": "url is an RFC 2255 URL which specifies the LDAP search parameters to use. The syntax of the URL is: ldap://host:port/basedn?attribute?scope?filter",
+ "bindDN": "bindDN is an optional DN to bind with during the search phase.",
+ "bindPassword": "bindPassword is an optional reference to a secret by name containing a password to bind with during the search phase. The key \"bindPassword\" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config.",
+ "insecure": "insecure, if true, indicates the connection should not use TLS WARNING: Should not be set to `true` with the URL scheme \"ldaps://\" as \"ldaps://\" URLs always\n attempt to connect using TLS, even when `insecure` is set to `true`\nWhen `true`, \"ldap://\" URLS connect insecurely. When `false`, \"ldap://\" URLs are upgraded to a TLS connection using StartTLS as specified in https://tools.ietf.org/html/rfc2830.",
+ "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca.crt\" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config.",
+ "attributes": "attributes maps LDAP attributes to identities",
+}
+
+func (LDAPIdentityProvider) SwaggerDoc() map[string]string {
+ return map_LDAPIdentityProvider
+}
+
+var map_OAuth = map[string]string{
+ "": "OAuth holds cluster-wide information about OAuth. The canonical name is `cluster`. It is used to configure the integrated OAuth server. This configuration is only honored when the top level Authentication config has type set to IntegratedOAuth.",
+}
+
+func (OAuth) SwaggerDoc() map[string]string {
+ return map_OAuth
+}
+
+var map_OAuthRemoteConnectionInfo = map[string]string{
+ "": "OAuthRemoteConnectionInfo holds information necessary for establishing a remote connection",
+ "url": "url is the remote URL to connect to",
+ "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca.crt\" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config.",
+ "tlsClientCert": "tlsClientCert is an optional reference to a secret by name that contains the PEM-encoded TLS client certificate to present when connecting to the server. The key \"tls.crt\" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. If the specified certificate data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config.",
+ "tlsClientKey": "tlsClientKey is an optional reference to a secret by name that contains the PEM-encoded TLS private key for the client certificate referenced in tlsClientCert. The key \"tls.key\" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. If the specified certificate data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config.",
+}
+
+func (OAuthRemoteConnectionInfo) SwaggerDoc() map[string]string {
+ return map_OAuthRemoteConnectionInfo
+}
+
+var map_OAuthSpec = map[string]string{
+ "": "OAuthSpec contains desired cluster auth configuration",
+ "identityProviders": "identityProviders is an ordered list of ways for a user to identify themselves. When this list is empty, no identities are provisioned for users.",
+ "tokenConfig": "tokenConfig contains options for authorization and access tokens",
+ "templates": "templates allow you to customize pages like the login page.",
+}
+
+func (OAuthSpec) SwaggerDoc() map[string]string {
+ return map_OAuthSpec
+}
+
+var map_OAuthStatus = map[string]string{
+ "": "OAuthStatus shows current known state of OAuth server in the cluster",
+}
+
+func (OAuthStatus) SwaggerDoc() map[string]string {
+ return map_OAuthStatus
+}
+
+var map_OAuthTemplates = map[string]string{
+ "": "OAuthTemplates allow for customization of pages like the login page",
+ "login": "login is the name of a secret that specifies a go template to use to render the login page. The key \"login.html\" is used to locate the template data. If specified and the secret or expected key is not found, the default login page is used. If the specified template is not valid, the default login page is used. If unspecified, the default login page is used. The namespace for this secret is openshift-config.",
+ "providerSelection": "providerSelection is the name of a secret that specifies a go template to use to render the provider selection page. The key \"providers.html\" is used to locate the template data. If specified and the secret or expected key is not found, the default provider selection page is used. If the specified template is not valid, the default provider selection page is used. If unspecified, the default provider selection page is used. The namespace for this secret is openshift-config.",
+ "error": "error is the name of a secret that specifies a go template to use to render error pages during the authentication or grant flow. The key \"errors.html\" is used to locate the template data. If specified and the secret or expected key is not found, the default error page is used. If the specified template is not valid, the default error page is used. If unspecified, the default error page is used. The namespace for this secret is openshift-config.",
+}
+
+func (OAuthTemplates) SwaggerDoc() map[string]string {
+ return map_OAuthTemplates
+}
+
+var map_OpenIDClaims = map[string]string{
+ "": "OpenIDClaims contains a list of OpenID claims to use when authenticating with an OpenID identity provider",
+ "preferredUsername": "preferredUsername is the list of claims whose values should be used as the preferred username. If unspecified, the preferred username is determined from the value of the sub claim",
+ "name": "name is the list of claims whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity",
+ "email": "email is the list of claims whose values should be used as the email address. Optional. If unspecified, no email is set for the identity",
+}
+
+func (OpenIDClaims) SwaggerDoc() map[string]string {
+ return map_OpenIDClaims
+}
+
+var map_OpenIDIdentityProvider = map[string]string{
+ "": "OpenIDIdentityProvider provides identities for users authenticating using OpenID credentials",
+ "clientID": "clientID is the oauth client ID",
+ "clientSecret": "clientSecret is a required reference to the secret by name containing the oauth client secret. The key \"clientSecret\" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config.",
+ "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca.crt\" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config.",
+ "extraScopes": "extraScopes are any scopes to request in addition to the standard \"openid\" scope.",
+ "extraAuthorizeParameters": "extraAuthorizeParameters are any custom parameters to add to the authorize request.",
+ "issuer": "issuer is the URL that the OpenID Provider asserts as its Issuer Identifier. It must use the https scheme with no query or fragment component.",
+ "claims": "claims mappings",
+}
+
+func (OpenIDIdentityProvider) SwaggerDoc() map[string]string {
+ return map_OpenIDIdentityProvider
+}
+
+var map_RequestHeaderIdentityProvider = map[string]string{
+ "": "RequestHeaderIdentityProvider provides identities for users authenticating using request header credentials",
+ "loginURL": "loginURL is a URL to redirect unauthenticated /authorize requests to Unauthenticated requests from OAuth clients which expect interactive logins will be redirected here ${url} is replaced with the current URL, escaped to be safe in a query parameter\n https://www.example.com/sso-login?then=${url}\n${query} is replaced with the current query string\n https://www.example.com/auth-proxy/oauth/authorize?${query}\nRequired when login is set to true.",
+ "challengeURL": "challengeURL is a URL to redirect unauthenticated /authorize requests to Unauthenticated requests from OAuth clients which expect WWW-Authenticate challenges will be redirected here. ${url} is replaced with the current URL, escaped to be safe in a query parameter\n https://www.example.com/sso-login?then=${url}\n${query} is replaced with the current query string\n https://www.example.com/auth-proxy/oauth/authorize?${query}\nRequired when challenge is set to true.",
+ "ca": "ca is a required reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. Specifically, it allows verification of incoming requests to prevent header spoofing. The key \"ca.crt\" is used to locate the data. If the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. The namespace for this config map is openshift-config.",
+ "clientCommonNames": "clientCommonNames is an optional list of common names to require a match from. If empty, any client certificate validated against the clientCA bundle is considered authoritative.",
+ "headers": "headers is the set of headers to check for identity information",
+ "preferredUsernameHeaders": "preferredUsernameHeaders is the set of headers to check for the preferred username",
+ "nameHeaders": "nameHeaders is the set of headers to check for the display name",
+ "emailHeaders": "emailHeaders is the set of headers to check for the email address",
+}
+
+func (RequestHeaderIdentityProvider) SwaggerDoc() map[string]string {
+ return map_RequestHeaderIdentityProvider
+}
+
+var map_TokenConfig = map[string]string{
+ "": "TokenConfig holds the necessary configuration options for authorization and access tokens",
+ "accessTokenMaxAgeSeconds": "accessTokenMaxAgeSeconds defines the maximum age of access tokens",
+ "accessTokenInactivityTimeoutSeconds": "accessTokenInactivityTimeoutSeconds defines the default token inactivity timeout for tokens granted by any client. The value represents the maximum amount of time that can occur between consecutive uses of the token. Tokens become invalid if they are not used within this temporal window. The user will need to acquire a new token to regain access once a token times out. Valid values are integer values:\n x < 0 Tokens time out is enabled but tokens never timeout unless configured per client (e.g. `-1`)\n x = 0 Tokens time out is disabled (default)\n x > 0 Tokens time out if there is no activity for x seconds\nThe current minimum allowed value for X is 300 (5 minutes)",
+}
+
+func (TokenConfig) SwaggerDoc() map[string]string {
+ return map_TokenConfig
+}
+
+var map_HubSource = map[string]string{
+ "": "HubSource is used to specify the hub source and its configuration",
+ "name": "name is the name of one of the default hub sources",
+ "disabled": "disabled is used to disable a default hub source on cluster",
+}
+
+func (HubSource) SwaggerDoc() map[string]string {
+ return map_HubSource
+}
+
+var map_HubSourceStatus = map[string]string{
+ "": "HubSourceStatus is used to reflect the current state of applying the configuration to a default source",
+ "status": "status indicates success or failure in applying the configuration",
+ "message": "message provides more information regarding failures",
+}
+
+func (HubSourceStatus) SwaggerDoc() map[string]string {
+ return map_HubSourceStatus
+}
+
+var map_OperatorHub = map[string]string{
+ "": "OperatorHub is the Schema for the operatorhubs API. It can be used to change the state of the default hub sources for OperatorHub on the cluster from enabled to disabled and vice versa.",
+}
+
+func (OperatorHub) SwaggerDoc() map[string]string {
+ return map_OperatorHub
+}
+
+var map_OperatorHubList = map[string]string{
+ "": "OperatorHubList contains a list of OperatorHub",
+}
+
+func (OperatorHubList) SwaggerDoc() map[string]string {
+ return map_OperatorHubList
+}
+
+var map_OperatorHubSpec = map[string]string{
+ "": "OperatorHubSpec defines the desired state of OperatorHub",
+ "sources": "sources is the list of default hub sources and their configuration. If the list is empty, it indicates that the default hub sources are enabled on the cluster. The list of default hub sources and their current state will always be reflected in the status block.",
+}
+
+func (OperatorHubSpec) SwaggerDoc() map[string]string {
+ return map_OperatorHubSpec
+}
+
+var map_OperatorHubStatus = map[string]string{
+ "": "OperatorHubStatus defines the observed state of OperatorHub. The current state of the default hub sources will always be reflected here.",
+ "sources": "sources encapsulates the result of applying the configuration for each hub source",
+}
+
+func (OperatorHubStatus) SwaggerDoc() map[string]string {
+ return map_OperatorHubStatus
+}
+
+var map_Project = map[string]string{
+ "": "Project holds cluster-wide information about Project. The canonical name is `cluster`",
+ "metadata": "Standard object's metadata.",
+ "spec": "spec holds user settable values for configuration",
+ "status": "status holds observed values from the cluster. They may not be overridden.",
+}
+
+func (Project) SwaggerDoc() map[string]string {
+ return map_Project
+}
+
+var map_ProjectList = map[string]string{
+ "metadata": "Standard object's metadata.",
+}
+
+func (ProjectList) SwaggerDoc() map[string]string {
+ return map_ProjectList
+}
+
+var map_ProjectSpec = map[string]string{
+ "": "ProjectSpec holds the project creation configuration.",
+ "projectRequestMessage": "projectRequestMessage is the string presented to a user if they are unable to request a project via the projectrequest api endpoint",
+ "projectRequestTemplate": "projectRequestTemplate is the template to use for creating projects in response to projectrequest. This must point to a template in 'openshift-config' namespace. It is optional. If it is not specified, a default template is used.",
+}
+
+func (ProjectSpec) SwaggerDoc() map[string]string {
+ return map_ProjectSpec
+}
+
+var map_TemplateReference = map[string]string{
+ "": "TemplateReference references a template in a specific namespace. The namespace must be specified at the point of use.",
+ "name": "name is the metadata.name of the referenced project request template",
+}
+
+func (TemplateReference) SwaggerDoc() map[string]string {
+ return map_TemplateReference
+}
+
+var map_Proxy = map[string]string{
+ "": "Proxy holds cluster-wide information on how to configure default proxies for the cluster. The canonical name is `cluster`",
+ "spec": "Spec holds user-settable values for the proxy configuration",
+ "status": "status holds observed values from the cluster. They may not be overridden.",
+}
+
+func (Proxy) SwaggerDoc() map[string]string {
+ return map_Proxy
+}
+
+var map_ProxyList = map[string]string{
+ "metadata": "Standard object's metadata.",
+}
+
+func (ProxyList) SwaggerDoc() map[string]string {
+ return map_ProxyList
+}
+
+var map_ProxySpec = map[string]string{
+ "": "ProxySpec contains cluster proxy creation configuration.",
+ "httpProxy": "httpProxy is the URL of the proxy for HTTP requests. Empty means unset and will not result in an env var.",
+ "httpsProxy": "httpsProxy is the URL of the proxy for HTTPS requests. Empty means unset and will not result in an env var.",
+ "noProxy": "noProxy is a comma-separated list of hostnames and/or CIDRs for which the proxy should not be used. Empty means unset and will not result in an env var.",
+ "readinessEndpoints": "readinessEndpoints is a list of endpoints used to verify readiness of the proxy.",
+ "trustedCA": "trustedCA is a reference to a ConfigMap containing a CA certificate bundle used for client egress HTTPS connections. The certificate bundle must be from the CA that signed the proxy's certificate and be signed for everything. The trustedCA field should only be consumed by a proxy validator. The validator is responsible for reading the certificate bundle from required key \"ca-bundle.crt\" and copying it to a ConfigMap named \"trusted-ca-bundle\" in the \"openshift-config-managed\" namespace. The namespace for the ConfigMap referenced by trustedCA is \"openshift-config\". Here is an example ConfigMap (in yaml):\n\napiVersion: v1 kind: ConfigMap metadata:\n name: user-ca-bundle\n namespace: openshift-config\n data:\n ca-bundle.crt: |",
+}
+
+func (ProxySpec) SwaggerDoc() map[string]string {
+ return map_ProxySpec
+}
+
+var map_ProxyStatus = map[string]string{
+ "": "ProxyStatus shows current known state of the cluster proxy.",
+ "httpProxy": "httpProxy is the URL of the proxy for HTTP requests.",
+ "httpsProxy": "httpsProxy is the URL of the proxy for HTTPS requests.",
+ "noProxy": "noProxy is a comma-separated list of hostnames and/or CIDRs for which the proxy should not be used.",
+}
+
+func (ProxyStatus) SwaggerDoc() map[string]string {
+ return map_ProxyStatus
+}
+
+var map_Scheduler = map[string]string{
+ "": "Scheduler holds cluster-wide config information to run the Kubernetes Scheduler and influence its placement decisions. The canonical name for this config is `cluster`.",
+ "metadata": "Standard object's metadata.",
+ "spec": "spec holds user settable values for configuration",
+ "status": "status holds observed values from the cluster. They may not be overridden.",
+}
+
+func (Scheduler) SwaggerDoc() map[string]string {
+ return map_Scheduler
+}
+
+var map_SchedulerList = map[string]string{
+ "metadata": "Standard object's metadata.",
+}
+
+func (SchedulerList) SwaggerDoc() map[string]string {
+ return map_SchedulerList
+}
+
+var map_SchedulerSpec = map[string]string{
+ "policy": "policy is a reference to a ConfigMap containing scheduler policy which has user specified predicates and priorities. If this ConfigMap is not available scheduler will default to use DefaultAlgorithmProvider. The namespace for this configmap is openshift-config.",
+ "defaultNodeSelector": "defaultNodeSelector helps set the cluster-wide default node selector to restrict pod placement to specific nodes. This is applied to the pods created in all namespaces without a specified nodeSelector value. For example, defaultNodeSelector: \"type=user-node,region=east\" would set nodeSelector field in pod spec to \"type=user-node,region=east\" to all pods created in all namespaces. Namespaces having project-wide node selectors won't be impacted even if this field is set. This adds an annotation section to the namespace. For example, if a new namespace is created with node-selector='type=user-node,region=east', the annotation openshift.io/node-selector: type=user-node,region=east gets added to the project. When the openshift.io/node-selector annotation is set on the project the value is used in preference to the value we are setting for defaultNodeSelector field. For instance, openshift.io/node-selector: \"type=user-node,region=west\" means that the default of \"type=user-node,region=east\" set in defaultNodeSelector would not be applied.",
+ "mastersSchedulable": "MastersSchedulable allows masters nodes to be schedulable. When this flag is turned on, all the master nodes in the cluster will be made schedulable, so that workload pods can run on them. The default value for this field is false, meaning none of the master nodes are schedulable. Important Note: Once the workload pods start running on the master nodes, extreme care must be taken to ensure that cluster-critical control plane components are not impacted. Please turn on this field after doing due diligence.",
+}
+
+func (SchedulerSpec) SwaggerDoc() map[string]string {
+ return map_SchedulerSpec
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE