summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.cirrus.yml80
-rw-r--r--.copr/Makefile2
-rw-r--r--Makefile16
-rw-r--r--RELEASE_NOTES.md26
-rw-r--r--cmd/podman/cliconfig/config.go6
-rw-r--r--cmd/podman/commands.go3
-rw-r--r--cmd/podman/service.go154
-rw-r--r--cmd/podman/service_dummy.go11
-rw-r--r--cmd/podman/tag.go4
-rw-r--r--cmd/podman/varlink.go2
-rw-r--r--cmd/service/main.go55
-rw-r--r--commands.md1
-rw-r--r--completions/bash/podman13
-rwxr-xr-xcontrib/cirrus/build_swagger.sh17
-rwxr-xr-xcontrib/cirrus/upload_release_archive.sh44
-rwxr-xr-xcontrib/upldrel/entrypoint.sh2
-rw-r--r--docs/source/Reference.rst2
-rw-r--r--docs/source/_static/api.html24
-rw-r--r--docs/source/markdown/podman-rmi.1.md2
-rw-r--r--docs/source/markdown/podman-service.1.md47
-rw-r--r--docs/source/markdown/podman-tag.1.md12
-rw-r--r--docs/source/markdown/podman.1.md1
-rw-r--r--go.mod2
-rw-r--r--go.sum3
-rwxr-xr-x[-rw-r--r--]hack/install_golangci.sh14
-rw-r--r--install.md129
-rw-r--r--libpod/container_api.go69
-rw-r--r--libpod/oci.go29
-rw-r--r--libpod/oci_conmon_linux.go264
-rw-r--r--libpod/oci_missing.go13
-rw-r--r--libpod/util.go20
-rw-r--r--pkg/adapter/client.go2
-rw-r--r--pkg/adapter/client_config.go7
-rw-r--r--pkg/api/Makefile6
-rw-r--r--pkg/api/handlers/containers_attach.go159
-rw-r--r--pkg/api/handlers/images.go17
-rw-r--r--pkg/api/handlers/libpod/containers.go16
-rw-r--r--pkg/api/handlers/libpod/pods.go17
-rw-r--r--pkg/api/handlers/swagger.go4
-rw-r--r--pkg/api/handlers/utils/errors.go5
-rw-r--r--pkg/api/server/listener_api.go31
-rw-r--r--pkg/api/server/register_containers.go200
-rw-r--r--pkg/api/server/register_images.go12
-rw-r--r--pkg/api/server/register_pods.go3
-rw-r--r--pkg/api/server/server.go60
-rw-r--r--pkg/bindings/containers.go8
-rw-r--r--pkg/rootless/rootless_linux.c9
-rw-r--r--test/e2e/run_signal_test.go2
-rw-r--r--vendor/github.com/containers/conmon/LICENSE190
-rw-r--r--vendor/github.com/containers/conmon/runner/config/config.go19
-rw-r--r--vendor/github.com/containers/conmon/runner/config/config_unix.go7
-rw-r--r--vendor/github.com/containers/conmon/runner/config/config_windows.go7
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusteroperator.crd.yaml164
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion.crd.yaml328
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_operatorhub.crd.yaml101
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_proxy.crd.yaml98
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver.crd.yaml219
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd.yaml123
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_build.crd.yaml366
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_console.crd.yaml70
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns.crd.yaml100
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_featuregate.crd.yaml76
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_image.crd.yaml144
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure.crd.yaml221
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_ingress.crd.yaml55
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network.crd.yaml141
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_oauth.crd.yaml661
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_project.crd.yaml63
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler.crd.yaml88
-rw-r--r--vendor/github.com/openshift/api/config/v1/types.go2
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_apiserver.go43
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_authentication.go4
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_build.go14
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_cluster_operator.go72
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_cluster_version.go32
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_console.go9
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_dns.go7
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_feature.go120
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_image.go40
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_infrastructure.go39
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_ingress.go18
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_network.go27
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_operatorhub.go19
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_project.go7
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_proxy.go5
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_scheduling.go7
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go260
-rw-r--r--vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go199
-rw-r--r--vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go306
-rw-r--r--vendor/modules.txt4
90 files changed, 5627 insertions, 473 deletions
diff --git a/.cirrus.yml b/.cirrus.yml
index abf3d0dd8..e1810fab6 100644
--- a/.cirrus.yml
+++ b/.cirrus.yml
@@ -514,6 +514,7 @@ special_testing_cgroupv2_task:
always:
<<: *standardlogs
+
special_testing_endpoint_task:
depends_on:
@@ -561,7 +562,8 @@ test_build_cache_images_task:
only_if: >-
$CIRRUS_BRANCH != $DEST_BRANCH &&
- $CIRRUS_CHANGE_MESSAGE =~ '.*CI:IMG.*'
+ $CIRRUS_CHANGE_MESSAGE =~ '.*CI:IMG.*' &&
+ $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:DOCS.*'
depends_on:
- "gating"
@@ -593,7 +595,8 @@ verify_test_built_images_task:
only_if: >-
$CIRRUS_BRANCH != $DEST_BRANCH &&
- $CIRRUS_CHANGE_MESSAGE =~ '.*CI:IMG.*'
+ $CIRRUS_CHANGE_MESSAGE =~ '.*CI:IMG.*' &&
+ $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:DOCS.*'
depends_on:
@@ -637,47 +640,45 @@ verify_test_built_images_task:
always:
<<: *standardlogs
-
-upload_snap_task:
-
- # Only when PR or branch is merged into master
- only_if: $CIRRUS_BRANCH == $DEST_BRANCH
-
- depends_on:
- - "test_building_snap"
-
- container:
- image: yakshaveinc/snapcraft:core18
-
- env:
- SNAPCRAFT_LOGIN: ENCRYPTED[d8e82eb31c6372fec07f405f413d57806026b1a9f8400033531ebcd54d6750a5e4a8b1f68e3ec65c98c65e0d9b2a6a75]
- snapcraft_login_file:
- path: /root/.snapcraft/login.cfg
- variable_name: SNAPCRAFT_LOGIN
- snapcraft_script:
- - 'apt-get -y update'
- - 'snapcraft login --with "/root/.snapcraft/login.cfg"'
- - 'cd contrib/snapcraft && snapcraft && snapcraft push *.snap --release edge'
-
-
-test_docs_task:
-
+ #upload_snap_task:
+ # only_if: >-
+ # $CIRRUS_BRANCH != $DEST_BRANCH &&
+ # $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:IMG.*' &&
+ # $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:DOCS.*'
+ #
+ # # Only when PR or branch is merged into master
+ #
+ # depends_on:
+ # - "test_building_snap"
+ #
+ # container:
+ # image: yakshaveinc/snapcraft:core18
+ #
+ # env:
+ # SNAPCRAFT_LOGIN: ENCRYPTED[d8e82eb31c6372fec07f405f413d57806026b1a9f8400033531ebcd54d6750a5e4a8b1f68e3ec65c98c65e0d9b2a6a75]
+ # snapcraft_login_file:
+ # path: /root/.snapcraft/login.cfg
+ # variable_name: SNAPCRAFT_LOGIN
+ # snapcraft_script:
+ # - 'apt-get -y update'
+ # - 'snapcraft login --with "/root/.snapcraft/login.cfg"'
+ # - 'cd contrib/snapcraft && snapcraft && snapcraft push *.snap --release edge'
+
+
+docs_task:
+
+ # Only run this for PRs on mention, and after merge
only_if: >-
- $CIRRUS_BRANCH != $DEST_BRANCH &&
+ $CIRRUS_BRANCH == $DEST_BRANCH &&
+ $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:IMG.*' &&
$CIRRUS_CHANGE_MESSAGE =~ '.*CI:DOCS.*'
+
depends_on:
- "gating"
- - "vendor"
- #- "test_docs"
- #- "varlink_api"
- #- "build_each_commit"
- stub_script: '/bin/true'
-
-#publish_docs_task:
-#
-# only_if: $CIRRUS_BRANCH == $DEST_BRANCH &&
-# $CIRRUS_TAG =~ '^v[0-9]\.[0-9]\.[0-9]$'
+ script:
+ - "$SCRIPT_BASE/build_swagger.sh |& ${TIMESTAMP}"
+ - "$SCRIPT_BASE/upload_release_archive.sh |& ${TIMESTAMP}"
# Post message to IRC if everything passed PR testing
@@ -704,9 +705,8 @@ success_task:
- "special_testing_endpoint"
- "test_build_cache_images"
- "test_building_snap"
- - "upload_snap"
- "verify_test_built_images"
- - "test_docs"
+ - "docs"
env:
CIRRUS_WORKING_DIR: "/usr/src/libpod"
diff --git a/.copr/Makefile b/.copr/Makefile
index 465a52b15..ff99cd5c6 100644
--- a/.copr/Makefile
+++ b/.copr/Makefile
@@ -5,6 +5,8 @@ outdir := $(CURDIR)
topdir := $(CURDIR)/rpmbuild
SHORT_COMMIT ?= $(shell git rev-parse --short=8 HEAD)
+export GO111MODULE=off
+
srpm:
mkdir -p $(topdir)
sh $(current_dir)/prepare.sh
diff --git a/Makefile b/Makefile
index dabfc65dd..a7e779dd2 100644
--- a/Makefile
+++ b/Makefile
@@ -199,19 +199,17 @@ bin/podman.cross.%: .gopathok
GOARCH="$${TARGET##*.}" \
$(GO_BUILD) -gcflags '$(GCFLAGS)' -asmflags '$(ASMFLAGS)' -ldflags '$(LDFLAGS_PODMAN)' -tags '$(BUILDTAGS_CROSS)' -o "$@" $(PROJECT)/cmd/podman
-.PHONY: service
-service: .gopathok
- $(GO_BUILD) $(BUILDFLAGS) -gcflags '$(GCFLAGS)' -asmflags '$(ASMFLAGS)' -ldflags '$(LDFLAGS_PODMAN)' -tags "$(BUILDTAGS)" -o bin/$@ $(PROJECT)/cmd/service
-
-.PHONY:
-run-service:
- systemd-socket-activate -l 8080 ./bin/service
-
.PHONY: run-docker-py-tests
run-docker-py-tests:
$(eval testLogs=$(shell mktemp))
./bin/podman run --rm --security-opt label=disable --privileged -v $(testLogs):/testLogs --net=host -e DOCKER_HOST=tcp://localhost:8080 $(DOCKERPY_IMAGE) sh -c "pytest $(DOCKERPY_TEST) "
+pkg/api/swagger.yaml: .gopathok release.txt
+ make -C pkg/api
+
+.PHONY: swagger
+swagger: pkg/api/swagger.yaml
+
clean: ## Clean artifacts
rm -rf \
.gopathok \
@@ -322,7 +320,7 @@ system.test-binary: .install.ginkgo
vagrant-check:
BOX=$(BOX) sh ./vagrant.sh
-binaries: varlink_generate podman podman-remote service ## Build podman
+binaries: varlink_generate podman podman-remote ## Build podman
install.catatonit:
./hack/install_catatonit.sh
diff --git a/RELEASE_NOTES.md b/RELEASE_NOTES.md
index 7864b9232..0ef3e4322 100644
--- a/RELEASE_NOTES.md
+++ b/RELEASE_NOTES.md
@@ -1,5 +1,31 @@
# Release Notes
+## 1.7.1
+### Features
+- Rootless Podman now uses Rootlesskit for port forwarding, which should greatly improve performance and capabilities
+- The `podman untag` command has been added to remove tags from images without deleting them
+- The `podman service` command has been added to run an API server for managing Podman remotely
+- The `podman inspect` command on images now displays previous names they used
+- The `podman generate systemd` command now supports a `--new` option to generate service files that create and run new containers instead of managing existing containers
+- Support for `--log-opt tag=` to set logging tags has been added to the `journald` log driver
+- Added support for using Seccomp profiles embedded in images for `podman run` and `podman create` via the new `--seccomp-policy` CLI flag ([#4806](https://github.com/containers/libpod/pull/4806))
+
+### Bugfixes
+- Fixed a bug where the `podman cp` command would not copy the contents of directories when paths ending in `/.` were given ([#4717](https://github.com/containers/libpod/issues/4717))
+- Fixed a bug where the `podman play kube` command did not properly locate Seccomp profiles specified relative to localhost ([#4555](https://github.com/containers/libpod/issues/4555))
+- Fixed a bug where the `podman info` command for remote Podman did not show registry information ([#4793](https://github.com/containers/libpod/issues/4793))
+- Fixed a bug where the `podman exec` command did not support having input piped into it ([#3302](https://github.com/containers/libpod/issues/3302))
+- Fixed a bug where the `podman cp` command with rootless Podman on CGroups v2 systems did not properly determine if the container could be paused while copying ([#4813](https://github.com/containers/libpod/issues/4813))
+- Fixed a bug where the `podman container prune --force` command could possible remove running containers if they were started while the command was running ([#4844](https://github.com/containers/libpod/issues/4844))
+- Fixed a bug where Podman, when run as root, would not properly configure `slirp4netns` networking when requested ([#4853](https://github.com/containers/libpod/pull/4853))
+- Fixed a bug where `podman run --userns=keep-id` did not work when the user had a UID over 65535 ([#4838](https://github.com/containers/libpod/issues/4838))
+- Fixed a bug where rootless `podman run` and `podman create` with the `--userns=keep-id` option could change permissions on `/run/user/$UID` and break KDE ([#4846](https://github.com/containers/libpod/issues/4846))
+
+### Misc
+- Initial work on version 2 of the Podman remote API has been merged, but is still in an alpha state and not ready for use. Read more [here](https://podman.io/releases/2020/01/17/podman-new-api.html)
+- Updated vendored Buildah to v1.13.1
+- Updated vendored containers/storage to v1.15.5
+
## 1.7.0
### Features
- Added support for setting a static MAC address for containers
diff --git a/cmd/podman/cliconfig/config.go b/cmd/podman/cliconfig/config.go
index b261599e6..6bc8aa4a3 100644
--- a/cmd/podman/cliconfig/config.go
+++ b/cmd/podman/cliconfig/config.go
@@ -599,6 +599,12 @@ type VarlinkValues struct {
Timeout int64
}
+type ServiceValues struct {
+ PodmanCommand
+ Varlink bool
+ Timeout int64
+}
+
type SetTrustValues struct {
PodmanCommand
PolicyPath string
diff --git a/cmd/podman/commands.go b/cmd/podman/commands.go
index 31f1b3ba4..ebd7aeb0c 100644
--- a/cmd/podman/commands.go
+++ b/cmd/podman/commands.go
@@ -26,6 +26,9 @@ func getMainCommands() []*cobra.Command {
if len(_varlinkCommand.Use) > 0 {
rootCommands = append(rootCommands, _varlinkCommand)
}
+ if len(_serviceCommand.Use) > 0 {
+ rootCommands = append(rootCommands, _serviceCommand)
+ }
return rootCommands
}
diff --git a/cmd/podman/service.go b/cmd/podman/service.go
new file mode 100644
index 000000000..6e2b4a366
--- /dev/null
+++ b/cmd/podman/service.go
@@ -0,0 +1,154 @@
+// +build varlink,!remoteclient
+
+package main
+
+import (
+ "fmt"
+ "net"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/containers/libpod/cmd/podman/cliconfig"
+ "github.com/containers/libpod/cmd/podman/libpodruntime"
+ iopodman "github.com/containers/libpod/cmd/podman/varlink"
+ "github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/pkg/adapter"
+ api "github.com/containers/libpod/pkg/api/server"
+ "github.com/containers/libpod/pkg/rootless"
+ "github.com/containers/libpod/pkg/util"
+ "github.com/containers/libpod/pkg/varlinkapi"
+ "github.com/containers/libpod/version"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "github.com/spf13/cobra"
+ "github.com/varlink/go/varlink"
+)
+
+var (
+ serviceCommand cliconfig.ServiceValues
+ serviceDescription = `Run an API service
+
+Enable a listening service for API access to Podman commands.
+`
+
+ _serviceCommand = &cobra.Command{
+ Use: "service [flags] [URI]",
+ Short: "Run API service",
+ Long: serviceDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ serviceCommand.InputArgs = args
+ serviceCommand.GlobalFlags = MainGlobalOpts
+ return serviceCmd(&serviceCommand)
+ },
+ }
+)
+
+func init() {
+ serviceCommand.Command = _serviceCommand
+ serviceCommand.SetHelpTemplate(HelpTemplate())
+ serviceCommand.SetUsageTemplate(UsageTemplate())
+ flags := serviceCommand.Flags()
+ flags.Int64VarP(&serviceCommand.Timeout, "timeout", "t", 1000, "Time until the service session expires in milliseconds. Use 0 to disable the timeout")
+ flags.BoolVar(&serviceCommand.Varlink, "varlink", false, "Use legacy varlink service instead of REST")
+}
+
+func serviceCmd(c *cliconfig.ServiceValues) error {
+ // For V2, default to the REST socket
+ apiURI := adapter.DefaultAPIAddress
+ if c.Varlink {
+ apiURI = adapter.DefaultVarlinkAddress
+ }
+
+ if rootless.IsRootless() {
+ xdg, err := util.GetRuntimeDir()
+ if err != nil {
+ return err
+ }
+ socketName := "podman.sock"
+ if c.Varlink {
+ socketName = "io.podman"
+ }
+ socketDir := filepath.Join(xdg, "podman", socketName)
+ if _, err := os.Stat(filepath.Dir(socketDir)); err != nil {
+ if os.IsNotExist(err) {
+ if err := os.Mkdir(filepath.Dir(socketDir), 0755); err != nil {
+ return err
+ }
+ } else {
+ return err
+ }
+ }
+ apiURI = fmt.Sprintf("unix:%s", socketDir)
+ }
+
+ if len(c.InputArgs) > 0 {
+ apiURI = c.InputArgs[0]
+ }
+
+ logrus.Infof("using API endpoint: %s", apiURI)
+
+ // Create a single runtime api consumption
+ runtime, err := libpodruntime.GetRuntimeDisableFDs(getContext(), &c.PodmanCommand)
+ if err != nil {
+ return errors.Wrapf(err, "error creating libpod runtime")
+ }
+ defer runtime.DeferredShutdown(false)
+
+ timeout := time.Duration(c.Timeout) * time.Millisecond
+ if c.Varlink {
+ return runVarlink(runtime, apiURI, timeout, c)
+ }
+ return runREST(runtime, apiURI, timeout)
+}
+
+func runREST(r *libpod.Runtime, uri string, timeout time.Duration) error {
+ logrus.Warn("This function is EXPERIMENTAL")
+ fmt.Println("This function is EXPERIMENTAL.")
+ fields := strings.Split(uri, ":")
+ if len(fields) == 1 {
+ return errors.Errorf("%s is an invalid socket destination", uri)
+ }
+ address := strings.Join(fields[1:], ":")
+ l, err := net.Listen(fields[0], address)
+ if err != nil {
+ return errors.Wrapf(err, "unable to create socket %s", uri)
+ }
+ server, err := api.NewServerWithSettings(r, timeout, &l)
+ if err != nil {
+ return err
+ }
+ return server.Serve()
+}
+
+func runVarlink(r *libpod.Runtime, uri string, timeout time.Duration, c *cliconfig.ServiceValues) error {
+ var varlinkInterfaces = []*iopodman.VarlinkInterface{varlinkapi.New(&c.PodmanCommand, r)}
+ service, err := varlink.NewService(
+ "Atomic",
+ "podman",
+ version.Version,
+ "https://github.com/containers/libpod",
+ )
+ if err != nil {
+ return errors.Wrapf(err, "unable to create new varlink service")
+ }
+
+ for _, i := range varlinkInterfaces {
+ if err := service.RegisterInterface(i); err != nil {
+ return errors.Errorf("unable to register varlink interface %v", i)
+ }
+ }
+
+ // Run the varlink server at the given address
+ if err = service.Listen(uri, timeout); err != nil {
+ switch err.(type) {
+ case varlink.ServiceTimeoutError:
+ logrus.Infof("varlink service expired (use --timeout to increase session time beyond %d ms, 0 means never timeout)", timeout.String())
+ return nil
+ default:
+ return errors.Wrapf(err, "unable to start varlink service")
+ }
+ }
+ return nil
+}
diff --git a/cmd/podman/service_dummy.go b/cmd/podman/service_dummy.go
new file mode 100644
index 000000000..a774c34de
--- /dev/null
+++ b/cmd/podman/service_dummy.go
@@ -0,0 +1,11 @@
+// +build !varlink
+
+package main
+
+import "github.com/spf13/cobra"
+
+var (
+ _serviceCommand = &cobra.Command{
+ Use: "",
+ }
+)
diff --git a/cmd/podman/tag.go b/cmd/podman/tag.go
index eb43d695c..215b716b8 100644
--- a/cmd/podman/tag.go
+++ b/cmd/podman/tag.go
@@ -12,7 +12,7 @@ var (
tagDescription = "Adds one or more additional names to locally-stored image."
_tagCommand = &cobra.Command{
- Use: "tag [flags] IMAGE TAG [TAG...]",
+ Use: "tag [flags] IMAGE TARGET_NAME [TARGET_NAME...]",
Short: "Add an additional name to a local image",
Long: tagDescription,
RunE: func(cmd *cobra.Command, args []string) error {
@@ -51,7 +51,7 @@ func tagCmd(c *cliconfig.TagValues) error {
for _, tagName := range args[1:] {
if err := newImage.TagImage(tagName); err != nil {
- return errors.Wrapf(err, "error adding '%s' to image %q", tagName, newImage.InputName)
+ return errors.Wrapf(err, "error adding %q to image %q", tagName, newImage.InputName)
}
}
return nil
diff --git a/cmd/podman/varlink.go b/cmd/podman/varlink.go
index cd21e3574..047d94fc2 100644
--- a/cmd/podman/varlink.go
+++ b/cmd/podman/varlink.go
@@ -51,7 +51,7 @@ func init() {
}
func varlinkCmd(c *cliconfig.VarlinkValues) error {
- varlinkURI := adapter.DefaultAddress
+ varlinkURI := adapter.DefaultVarlinkAddress
if rootless.IsRootless() {
xdg, err := util.GetRuntimeDir()
if err != nil {
diff --git a/cmd/service/main.go b/cmd/service/main.go
deleted file mode 100644
index 0290de892..000000000
--- a/cmd/service/main.go
+++ /dev/null
@@ -1,55 +0,0 @@
-package main
-
-import (
- "context"
- "fmt"
- "os"
-
- "github.com/containers/libpod/cmd/podman/cliconfig"
- "github.com/containers/libpod/cmd/podman/libpodruntime"
- api "github.com/containers/libpod/pkg/api/server"
- "github.com/containers/storage/pkg/reexec"
- log "github.com/sirupsen/logrus"
- "github.com/spf13/cobra"
-)
-
-func initConfig() {
- // we can do more stuff in here.
-}
-
-func main() {
- if reexec.Init() {
- // We were invoked with a different argv[0] indicating that we
- // had a specific job to do as a subprocess, and it's done.
- return
- }
-
- cobra.OnInitialize(initConfig)
- log.SetLevel(log.DebugLevel)
-
- config := cliconfig.PodmanCommand{
- Command: &cobra.Command{},
- InputArgs: []string{},
- GlobalFlags: cliconfig.MainFlags{},
- Remote: false,
- }
- // Create a single runtime for http
- runtime, err := libpodruntime.GetRuntimeDisableFDs(context.Background(), &config)
- if err != nil {
- fmt.Printf("error creating libpod runtime: %s", err.Error())
- os.Exit(1)
- }
- defer runtime.DeferredShutdown(false)
-
- server, err := api.NewServer(runtime)
- if err != nil {
- fmt.Println(err.Error())
- os.Exit(1)
- }
-
- err = server.Serve()
- if err != nil {
- fmt.Println(err.Error())
- os.Exit(1)
- }
-}
diff --git a/commands.md b/commands.md
index b744b702e..17e069cb1 100644
--- a/commands.md
+++ b/commands.md
@@ -73,6 +73,7 @@
| [podman-rmi(1)](/docs/source/markdown/podman-rmi.1.md) | Removes one or more images |
| [podman-run(1)](/docs/source/markdown/podman-run.1.md) | Run a command in a container |
| [podman-save(1)](/docs/source/markdown/podman-save.1.md) | Saves an image to an archive |
+| [podman-service(1)](/docs/source/markdown/podman-service.1.md) | Run an API listening service |
| [podman-search(1)](/docs/source/markdown/podman-search.1.md) | Search a registry for an image |
| [podman-start(1)](/docs/source/markdown/podman-start.1.md) | Starts one or more containers |
| [podman-stats(1)](/docs/source/markdown/podman-stats.1.md) | Display a live stream of one or more containers' resource usage statistics |
diff --git a/completions/bash/podman b/completions/bash/podman
index ca3618b0b..57b9547a7 100644
--- a/completions/bash/podman
+++ b/completions/bash/podman
@@ -1755,6 +1755,19 @@ _podman_search() {
_complete_ "$options_with_args" "$boolean_options"
}
+_podman_service() {
+ local options_with_args="
+ -t
+ --timeout
+ "
+ local boolean_options="
+ --help
+ -h
+ --varlink
+ "
+ _complete_ "$options_with_args" "$boolean_options"
+}
+
_podman_unmount() {
_podman_umount $@
}
diff --git a/contrib/cirrus/build_swagger.sh b/contrib/cirrus/build_swagger.sh
new file mode 100755
index 000000000..0471f0c10
--- /dev/null
+++ b/contrib/cirrus/build_swagger.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+
+set -e
+
+source $(dirname $0)/lib.sh
+
+# Building this is a PITA, just grab binary for use in automation
+# Ref: https://goswagger.io/install.html#static-binary
+download_url=$(curl -s https://api.github.com/repos/go-swagger/go-swagger/releases/latest | \
+ jq -r '.assets[] | select(.name | contains("'"$(uname | tr '[:upper:]' '[:lower:]')"'_amd64")) | .browser_download_url')
+curl -o /usr/local/bin/swagger -L'#' "$download_url"
+chmod +x /usr/local/bin/swagger
+
+cd $GOSRC
+make swagger
+echo "Preserving build details for later use."
+mv -v release.txt actual_release.txt # Another 'make' during testing could overwrite it
diff --git a/contrib/cirrus/upload_release_archive.sh b/contrib/cirrus/upload_release_archive.sh
index eb7742375..a94a5cc82 100755
--- a/contrib/cirrus/upload_release_archive.sh
+++ b/contrib/cirrus/upload_release_archive.sh
@@ -9,6 +9,8 @@ req_env_var CI UPLDREL_IMAGE CIRRUS_BUILD_ID GOSRC RELEASE_GCPJSON RELEASE_GCPNA
[[ "$CI" == "true" ]] || \
die 56 "$0 must be run under Cirrus-CI to function"
+SWAGGER_FILEPATH="pkg/api/swagger.yaml"
+
# We store "releases" for each PR, mostly to validate the process is functional
unset PR_OR_BRANCH BUCKET
if [[ -n "$CIRRUS_PR" ]]
@@ -17,8 +19,8 @@ then
BUCKET="libpod-pr-releases"
elif [[ -n "$CIRRUS_BRANCH" ]]
then
- # Only release non-development tagged commit ranges
- if is_release
+ # Only release binaries for tagged commit ranges, unless working on docs
+ if is_release || [[ $CIRRUS_TASK_NAME =~ "docs" ]]
then
PR_OR_BRANCH="$CIRRUS_BRANCH"
BUCKET="libpod-$CIRRUS_BRANCH-releases"
@@ -62,7 +64,7 @@ echo "$RELEASE_GCPJSON" > "$TMPF"
unset RELEASE_GCPJSON
cd $GOSRC
-for filename in $(ls -1 *.tar.gz *.zip *.msi)
+for filename in $(ls -1 *.tar.gz *.zip *.msi $SWAGGER_FILEPATH)
do
unset EXT
EXT=$(echo "$filename" | sed -r -e 's/.+\.(.+$)/\1/g')
@@ -76,24 +78,32 @@ do
EXT="tar.gz"
fi
- [[ "$OS_RELEASE_ID" == "ubuntu" ]] || \
- chcon -t container_file_t "$filename"
- # Form the generic "latest" file for this branch or pr
- TO_PREFIX="${RELEASE_BASENAME}-latest-${PR_OR_BRANCH}-${RELEASE_DIST}"
- # Form the fully-versioned filename for historical sake
- ALSO_PREFIX="${RELEASE_BASENAME}-${RELEASE_VERSION}-${PR_OR_BRANCH}-${RELEASE_DIST}"
- TO_SUFFIX="${RELEASE_ARCH}.${EXT}"
- if [[ "$RELEASE_DIST" == "windows" ]] || [[ "$RELEASE_DIST" == "darwin" ]]
+ if [[ $filename == $SWAGGER_FILEPATH ]]
then
- TO_FILENAME="${TO_PREFIX}-${TO_SUFFIX}"
- ALSO_FILENAME="${ALSO_PREFIX}-${TO_SUFFIX}"
+ # Support other tools referencing branch and/or version-specific refs.
+ TO_FILENAME="swagger-${RELEASE_VERSION}-${PR_OR_BRANCH}.yaml"
+ # For doc. ref. this must always be a static filename, e.g. swagger-latest-master.yaml
+ ALSO_FILENAME="swagger-latest-${PR_OR_BRANCH}.yaml"
else
- TO_FILENAME="${TO_PREFIX}-${RELEASE_DIST_VER}-${TO_SUFFIX}"
- ALSO_FILENAME="${ALSO_PREFIX}-${TO_SUFFIX}"
+ # Form the generic "latest" file for this branch or pr
+ TO_PREFIX="${RELEASE_BASENAME}-latest-${PR_OR_BRANCH}-${RELEASE_DIST}"
+ # Form the fully-versioned filename for historical sake
+ ALSO_PREFIX="${RELEASE_BASENAME}-${RELEASE_VERSION}-${PR_OR_BRANCH}-${RELEASE_DIST}"
+ TO_SUFFIX="${RELEASE_ARCH}.${EXT}"
+ if [[ "$RELEASE_DIST" == "windows" ]] || [[ "$RELEASE_DIST" == "darwin" ]]
+ then
+ TO_FILENAME="${TO_PREFIX}-${TO_SUFFIX}"
+ ALSO_FILENAME="${ALSO_PREFIX}-${TO_SUFFIX}"
+ else
+ TO_FILENAME="${TO_PREFIX}-${RELEASE_DIST_VER}-${TO_SUFFIX}"
+ ALSO_FILENAME="${ALSO_PREFIX}-${TO_SUFFIX}"
+ fi
fi
+ [[ "$OS_RELEASE_ID" == "ubuntu" ]] || \
+ chcon -t container_file_t "$filename"
+
echo "Running podman ... $UPLDREL_IMAGE for $filename -> $TO_FILENAME"
- echo "Warning: upload failures are completely ignored, avoiding any needless holdup of PRs."
podman run -i --rm \
-e "GCPNAME=$RELEASE_GCPNAME" \
-e "GCPPROJECT=$RELEASE_GCPROJECT" \
@@ -105,5 +115,5 @@ do
-e "BUCKET=$BUCKET" \
-v "$TMPF:$TMPF:ro" \
-v "$(realpath $GOSRC/$filename):/tmp/$filename:ro" \
- $UPLDREL_IMAGE || true
+ $UPLDREL_IMAGE
done
diff --git a/contrib/upldrel/entrypoint.sh b/contrib/upldrel/entrypoint.sh
index b79f1c5bf..dc0e69676 100755
--- a/contrib/upldrel/entrypoint.sh
+++ b/contrib/upldrel/entrypoint.sh
@@ -4,7 +4,7 @@ set -e
source /usr/local/bin/lib_entrypoint.sh
-req_env_var GCPJSON_FILEPATH GCPNAME GCPPROJECT BUCKET FROM_FILEPATH TO_FILENAME ALSO_FILENAME
+req_env_var GCPJSON_FILEPATH GCPNAME GCPPROJECT BUCKET FROM_FILEPATH TO_FILENAME
[[ -r "$FROM_FILEPATH" ]] || \
die 2 ERROR Cannot read release archive file: "$FROM_FILEPATH"
diff --git a/docs/source/Reference.rst b/docs/source/Reference.rst
index 9a771c87f..c6a131bc7 100644
--- a/docs/source/Reference.rst
+++ b/docs/source/Reference.rst
@@ -1,2 +1,4 @@
Reference
=========
+
+Check out our new in-development `API documentation <_static/api.html>`_
diff --git a/docs/source/_static/api.html b/docs/source/_static/api.html
new file mode 100644
index 000000000..08f55b620
--- /dev/null
+++ b/docs/source/_static/api.html
@@ -0,0 +1,24 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>ReDoc</title>
+ <!-- needed for adaptive design -->
+ <meta charset="utf-8"/>
+ <meta name="viewport" content="width=device-width, initial-scale=1">
+ <link href="https://fonts.googleapis.com/css?family=Montserrat:300,400,700|Roboto:300,400,700" rel="stylesheet">
+
+ <!--
+ ReDoc doesn't change outer page styles
+ -->
+ <style>
+ body {
+ margin: 0;
+ padding: 0;
+ }
+ </style>
+ </head>
+ <body>
+ <redoc spec-url='https://storage.googleapis.com/libpod-master-releases/swagger-latest-master.yaml'></redoc>
+ <script src="https://cdn.jsdelivr.net/npm/redoc@next/bundles/redoc.standalone.js"> </script>
+ </body>
+</html>
diff --git a/docs/source/markdown/podman-rmi.1.md b/docs/source/markdown/podman-rmi.1.md
index d911ee6cb..3c46bc32c 100644
--- a/docs/source/markdown/podman-rmi.1.md
+++ b/docs/source/markdown/podman-rmi.1.md
@@ -29,7 +29,7 @@ podman rmi c0ed59d05ff7
Remove an image and its associated containers.
```
podman rmi --force imageID
-````
+```
Remove multiple images by their shortened IDs.
```
diff --git a/docs/source/markdown/podman-service.1.md b/docs/source/markdown/podman-service.1.md
new file mode 100644
index 000000000..5c55e20d3
--- /dev/null
+++ b/docs/source/markdown/podman-service.1.md
@@ -0,0 +1,47 @@
+% podman-service(1)
+
+## NAME
+podman\-service - Run an API service
+
+## SYNOPSIS
+**podman service** [*options*]
+
+## DESCRIPTION
+The **podman service** command creates a listening service that will answer API calls for Podman. You may
+optionally provide an endpoint for the API in URI form. For example, *unix://tmp/foobar.sock* or *tcp:localhost:8080*.
+If no endpoint is provided, defaults will be used. The default endpoint for a rootfull
+service is *unix:/run/podman/podman.sock* and rootless is *unix:/$XDG_RUNTIME_DIR/podman/podman.sock* (for
+example *unix:/run/user/1000/podman/podman.sock*)
+
+## OPTIONS
+
+**--timeout**, **-t**
+
+The time until the session expires in _milliseconds_. The default is 1
+second. A value of `0` means no timeout and the session will not expire.
+
+**--varlink**
+
+Use the varlink protocol instead of the REST-based protocol. This option will be deprecated in the future.
+
+**--help**, **-h**
+
+Print usage statement.
+
+## EXAMPLES
+
+Run an API listening for 5 seconds using the default socket.
+```
+podman service --timeout 5000
+```
+
+Run the podman varlink service with an alternate URI and accept the default timeout.
+```
+$ podman service --varlink unix:/tmp/io.podman
+```
+
+## SEE ALSO
+podman(1), podman-varlink(1)
+
+## HISTORY
+January 2020, Originally compiled by Brent Baude<bbaude@redhat.com>
diff --git a/docs/source/markdown/podman-tag.1.md b/docs/source/markdown/podman-tag.1.md
index 291d95228..064ea401d 100644
--- a/docs/source/markdown/podman-tag.1.md
+++ b/docs/source/markdown/podman-tag.1.md
@@ -4,14 +4,15 @@
podman\-tag - Add an additional name to a local image
## SYNOPSIS
-**podman tag** *image*[:*tag*] *target-name*[:*tag*] [*options*]
+**podman tag** *image*[:*tag*] [*target-name*[:*tag*]...] [*options*]
-**podman image tag** *image*[:*tag*] *target-name*[:*tag*] [*options*]
+**podman image tag** *image*[:*tag*] [*target-name*[:*tag*]...] [*options*]
## DESCRIPTION
-Assigns a new alias to an image. An alias refers to the entire image name, including the optional
-*tag* after the `:`. If you do not provide *tag*, podman will default to `latest` for both
-the *image* and the *target-name*.
+Assigns a new image name to an existing image. A full name refers to the entire
+image name, including the optional *tag* after the `:`. If there is no *tag*
+provided, then podman will default to `latest` for both the *image* and the
+*target-name*.
## OPTIONS
@@ -32,4 +33,5 @@ $ podman tag httpd myregistryhost:5000/fedora/httpd:v2
podman(1)
## HISTORY
+December 2019, Update description to refer to 'name' instead of 'alias' by Sascha Grunert <sgrunert@suse.com>
July 2017, Originally compiled by Ryan Cole <rycole@redhat.com>
diff --git a/docs/source/markdown/podman.1.md b/docs/source/markdown/podman.1.md
index 0c9ec3d1c..6e0eff045 100644
--- a/docs/source/markdown/podman.1.md
+++ b/docs/source/markdown/podman.1.md
@@ -191,6 +191,7 @@ the exit codes follow the `chroot` standard, see below:
| [podman-rmi(1)](podman-rmi.1.md) | Removes one or more locally stored images. |
| [podman-run(1)](podman-run.1.md) | Run a command in a new container. |
| [podman-save(1)](podman-save.1.md) | Save an image to a container archive. |
+| [podman-service(1)](podman-service.1.md) | Run an API service |
| [podman-search(1)](podman-search.1.md) | Search a registry for an image. |
| [podman-start(1)](podman-start.1.md) | Start one or more containers. |
| [podman-stats(1)](podman-stats.1.md) | Display a live stream of one or more container's resource usage statistics. |
diff --git a/go.mod b/go.mod
index dff9ab4f3..54af7fda4 100644
--- a/go.mod
+++ b/go.mod
@@ -11,6 +11,7 @@ require (
github.com/containernetworking/cni v0.7.2-0.20190904153231-83439463f784
github.com/containernetworking/plugins v0.8.2
github.com/containers/buildah v1.13.1
+ github.com/containers/conmon v2.0.9+incompatible
github.com/containers/image/v5 v5.1.0
github.com/containers/psgo v1.4.0
github.com/containers/storage v1.15.5
@@ -49,7 +50,6 @@ require (
github.com/opencontainers/runtime-spec v0.1.2-0.20190618234442-a950415649c7
github.com/opencontainers/runtime-tools v0.9.0
github.com/opencontainers/selinux v1.3.0
- github.com/openshift/api v3.9.1-0.20190810003144-27fb16909b15+incompatible // indirect
github.com/opentracing/opentracing-go v1.1.0
github.com/pkg/errors v0.9.0
github.com/pkg/profile v1.4.0 // indirect
diff --git a/go.sum b/go.sum
index f3613ce2e..de7791f47 100644
--- a/go.sum
+++ b/go.sum
@@ -81,6 +81,8 @@ github.com/containers/common v0.0.3 h1:C2Zshb0w720FqPa42MCRuiGfbW0kwbURRwvK1EWIC
github.com/containers/common v0.0.3/go.mod h1:CaOgMRiwi2JJHISMZ6VPPZhQYFUDRv3YYVss2RqUCMg=
github.com/containers/common v0.0.7 h1:eKYZLKfJ2d/RNDgecLDFv45cHb4imYzIcrQHx1Y029M=
github.com/containers/common v0.0.7/go.mod h1:lhWV3MLhO1+KGE2x6v9+K38MxpjXGso+edmpkFnCOqI=
+github.com/containers/conmon v2.0.9+incompatible h1:YcEgk0Ny1WBdH35M2LKe2cG6FiQqzDdVaURw84XvS7A=
+github.com/containers/conmon v2.0.9+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
github.com/containers/image/v5 v5.0.0 h1:arnXgbt1ucsC/ndtSpiQY87rA0UjhF+/xQnPzqdBDn4=
github.com/containers/image/v5 v5.0.0/go.mod h1:MgiLzCfIeo8lrHi+4Lb8HP+rh513sm0Mlk6RrhjFOLY=
github.com/containers/image/v5 v5.1.0 h1:5FjAvPJniamuNNIQHkh4PnsL+n+xzs6Aonzaz5dqTEo=
@@ -389,6 +391,7 @@ github.com/opencontainers/selinux v1.2.2 h1:Kx9J6eDG5/24A6DtUquGSpJQ+m2MUTahn4Ft
github.com/opencontainers/selinux v1.2.2/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs=
github.com/opencontainers/selinux v1.3.0 h1:xsI95WzPZu5exzA6JzkLSfdr/DilzOhCJOqGe5TgR0g=
github.com/opencontainers/selinux v1.3.0/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs=
+github.com/openshift/api v0.0.0-20200106203948-7ab22a2c8316 h1:enQG2QUGwug4fR1yM6hL0Fjzx6Km/exZY6RbSPwMu3o=
github.com/openshift/api v0.0.0-20200106203948-7ab22a2c8316/go.mod h1:dv+J0b/HWai0QnMVb37/H0v36klkLBi2TNpPeWDxX10=
github.com/openshift/api v3.9.1-0.20190810003144-27fb16909b15+incompatible h1:s55wx8JIG/CKnewev892HifTBrtKzMdvgB3rm4rxC2s=
github.com/openshift/api v3.9.1-0.20190810003144-27fb16909b15+incompatible/go.mod h1:dh9o4Fs58gpFXGSYfnVxGR9PnV53I8TW84pQaJDdGiY=
diff --git a/hack/install_golangci.sh b/hack/install_golangci.sh
index 9d23bbd31..430685a71 100644..100755
--- a/hack/install_golangci.sh
+++ b/hack/install_golangci.sh
@@ -1,5 +1,17 @@
-#!/bin/bash -e
+#!/bin/bash
+
+if [ -z "$VERSION" ]; then
+ echo \$VERSION is empty
+ exit 1
+fi
+
+if [ -z "$GOBIN" ]; then
+ echo \$GOBIN is empty
+ exit 1
+fi
+
$GOBIN/golangci-lint --version | grep $VERSION
if [ $? -ne 0 ]; then
+ set -e
curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $GOBIN v$VERSION
fi
diff --git a/install.md b/install.md
index 5771fff45..35a931c85 100644
--- a/install.md
+++ b/install.md
@@ -10,6 +10,40 @@ sudo pacman -S podman
If you have problems when running Podman in [rootless](README.md#rootless) mode follow the instructions [here](https://wiki.archlinux.org/index.php/Linux_Containers#Enable_support_to_run_unprivileged_containers_(optional))
+#### [Debian](https://debian.org)
+
+The libpod package is [being worked on](https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=930440)
+for inclusion in the default Debian repos. Relevant status updates can also be
+found [here](https://github.com/containers/libpod/issues/1742).
+
+Alternatively, the [Kubic project](https://build.opensuse.org/project/show/devel:kubic:libcontainers:stable)
+provides packages for Debian 10, testing and unstable.
+
+```bash
+# Debian Unstable/Sid
+echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/Debian_Unstable/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list
+wget -nv https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/Debian_Unstable/Release.key -O Release.key
+
+# Debian Testing
+echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/Debian_Testing/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list
+wget -nv https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/Debian_Testing/Release.key -O Release.key
+
+# Debian 10
+echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/Debian_10/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list
+wget -nv https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/Debian_10/Release.key -O Release.key
+
+sudo apt-key add - < Release.key
+sudo apt-get update -qq
+sudo apt-get -qq -y install podman
+sudo mkdir -p /etc/containers
+echo -e "[registries.search]\nregistries = ['docker.io', 'quay.io']" | sudo tee /etc/containers/registries.conf
+```
+
+There are many [packages](https://packages.debian.org/search?keywords=libpod&searchon=names&suite=stable&section=all)
+with the libpod prefix available already on Debian. However, those are
+unrelated to this project.
+
+
#### [Fedora](https://www.fedoraproject.org), [CentOS](https://www.centos.org)
```bash
@@ -44,6 +78,23 @@ sudo zypper install podman
Built-in, no need to install
+
+#### [Raspbian](https://raspbian.org)
+
+The Kubic project provides packages for Raspbian 10.
+
+```bash
+# Raspbian 10
+echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/Raspbian_10/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list
+wget -nv https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/Raspbian_10/Release.key -O Release.key
+sudo apt-key add - < Release.key
+sudo apt-get update -qq
+sudo apt-get -qq -y install podman
+sudo mkdir -p /etc/containers
+echo -e "[registries.search]\nregistries = ['docker.io', 'quay.io']" | sudo tee /etc/containers/registries.conf
+```
+
+
#### [RHEL7](https://www.redhat.com/en/technologies/linux-platforms/enterprise-linux)
Subscribe, then enable Extras channel and install Podman.
@@ -60,22 +111,59 @@ sudo yum module enable -y container-tools:1.0
sudo yum module install -y container-tools:1.0
```
-### Installing development versions of Podman
#### [Ubuntu](https://www.ubuntu.com)
-The latest builds are available in a PPA. Take note of the [Build and Run Dependencies](#build-and-run-dependencies) listed below if you run into any issues.
+The Kubic project provides packages for Ubuntu 18.04, 19.04 and 19.10.
```bash
+. /etc/os-release
+sudo sh -c "echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/x${NAME}_${VERSION_ID}/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list"
+wget -nv https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/x${NAME}_${VERSION_ID}/Release.key -O Release.key
+sudo apt-key add - < Release.key
sudo apt-get update -qq
-sudo apt-get install -qq -y software-properties-common uidmap slirp4netns
-sudo add-apt-repository -y ppa:projectatomic/ppa
+sudo apt-get -qq -y install podman
+sudo mkdir -p /etc/containers
+echo -e "[registries.search]\nregistries = ['docker.io', 'quay.io']" | sudo tee /etc/containers/registries.conf
+```
+
+There are many [packages](https://packages.ubuntu.com/search?keywords=libpod&searchon=names&suite=eoan&section=all)
+with the libpod prefix available already on Ubuntu. However, those are
+unrelated to this project.
+
+
+### Installing development versions of Podman
+
+#### Debian
+
+The Kubic project provides RC/testing packages for Debian 10, testing and
+unstable.
+
+```bash
+# Debian Untesting/Sid
+echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/testing/Debian_Unstable/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:testing.list
+wget -nv https://download.opensuse.org/repositories/devel:kubic:libcontainers:testing/Debian_Unstable/Release.key -O Release.key
+
+# Debian Testing
+echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/testing/Debian_Testing/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:testing.list
+wget -nv https://download.opensuse.org/repositories/devel:kubic:libcontainers:testing/Debian_Testing/Release.key -O Release.key
+
+# Debian 10
+echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/testing/Debian_10/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:testing.list
+wget -nv https://download.opensuse.org/repositories/devel:kubic:libcontainers:testing/Debian_10/Release.key -O Release.key
+
+# Raspbian 10
+echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/testing/Raspbian_10/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:testing.list
+wget -nv https://download.opensuse.org/repositories/devel:kubic:libcontainers:testing/Raspbian_10/Release.key -O Release.key
+
+sudo apt-key add - < Release.key
sudo apt-get update -qq
sudo apt-get -qq -y install podman
sudo mkdir -p /etc/containers
echo -e "[registries.search]\nregistries = ['docker.io', 'quay.io']" | sudo tee /etc/containers/registries.conf
```
+
#### Fedora
You can test the very latest Podman in Fedora's `updates-testing`
@@ -89,6 +177,39 @@ If you use a newer Podman package from Fedora's `updates-testing`, we would
appreciate your `+1` feedback in [Bodhi, Fedora's update management
system](https://bodhi.fedoraproject.org/updates/?packages=podman).
+
+#### [Raspbian](https://raspbian.org)
+
+The Kubic project provides RC/testing packages for Raspbian 10.
+
+```bash
+# Raspbian 10
+echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/testing/Raspbian_10/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:testing.list
+wget -nv https://download.opensuse.org/repositories/devel:kubic:libcontainers:testing/Raspbian_10/Release.key -O Release.key
+sudo apt-key add - < Release.key
+sudo apt-get update -qq
+sudo apt-get -qq -y install podman
+sudo mkdir -p /etc/containers
+echo -e "[registries.search]\nregistries = ['docker.io', 'quay.io']" | sudo tee /etc/containers/registries.conf
+```
+
+
+#### Ubuntu
+
+The Kubic project provides RC/testing packages for Ubuntu 18.04, 19.04 and 19.10.
+
+```bash
+. /etc/os-release
+sudo sh -c "echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/testing/x${NAME}_${VERSION_ID}/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:testing.list"
+wget -nv https://download.opensuse.org/repositories/devel:kubic:libcontainers:testing/x${NAME}_${VERSION_ID}/Release.key -O Release.key
+sudo apt-key add - < Release.key
+sudo apt-get update -qq
+sudo apt-get -qq -y install podman
+sudo mkdir -p /etc/containers
+echo -e "[registries.search]\nregistries = ['docker.io', 'quay.io']" | sudo tee /etc/containers/registries.conf
+```
+
+
## Building from scratch
### Build and Run Dependencies
diff --git a/libpod/container_api.go b/libpod/container_api.go
index e36623529..d74a14f15 100644
--- a/libpod/container_api.go
+++ b/libpod/container_api.go
@@ -5,6 +5,7 @@ import (
"context"
"io"
"io/ioutil"
+ "net"
"os"
"time"
@@ -374,7 +375,9 @@ type AttachStreams struct {
AttachInput bool
}
-// Attach attaches to a container
+// Attach attaches to a container.
+// This function returns when the attach finishes. It does not hold the lock for
+// the duration of its runtime, only using it at the beginning to verify state.
func (c *Container) Attach(streams *AttachStreams, keys string, resize <-chan remotecommand.TerminalSize) error {
if !c.batched {
c.lock.Lock()
@@ -382,6 +385,7 @@ func (c *Container) Attach(streams *AttachStreams, keys string, resize <-chan re
c.lock.Unlock()
return err
}
+ // We are NOT holding the lock for the duration of the function.
c.lock.Unlock()
}
@@ -389,10 +393,71 @@ func (c *Container) Attach(streams *AttachStreams, keys string, resize <-chan re
return errors.Wrapf(define.ErrCtrStateInvalid, "can only attach to created or running containers")
}
- defer c.newContainerEvent(events.Attach)
+ c.newContainerEvent(events.Attach)
return c.attach(streams, keys, resize, false, nil)
}
+// HTTPAttach forwards an attach session over a hijacked HTTP session.
+// HTTPAttach will consume and close the included httpCon, which is expected to
+// be sourced from a hijacked HTTP connection.
+// The cancel channel is optional, and can be used to asyncronously cancel the
+// attach session.
+// The streams variable is only supported if the container was not a terminal,
+// and allows specifying which of the container's standard streams will be
+// forwarded to the client.
+// This function returns when the attach finishes. It does not hold the lock for
+// the duration of its runtime, only using it at the beginning to verify state.
+func (c *Container) HTTPAttach(httpCon net.Conn, httpBuf *bufio.ReadWriter, streams *HTTPAttachStreams, detachKeys *string, cancel <-chan bool) error {
+ if !c.batched {
+ c.lock.Lock()
+ if err := c.syncContainer(); err != nil {
+ c.lock.Unlock()
+
+ // Write any errors to the HTTP buffer before we close.
+ hijackWriteErrorAndClose(err, c.ID(), httpCon, httpBuf)
+
+ return err
+ }
+ // We are NOT holding the lock for the duration of the function.
+ c.lock.Unlock()
+ }
+
+ if !c.ensureState(define.ContainerStateCreated, define.ContainerStateRunning) {
+ toReturn := errors.Wrapf(define.ErrCtrStateInvalid, "can only attach to created or running containers")
+
+ // Write any errors to the HTTP buffer before we close.
+ hijackWriteErrorAndClose(toReturn, c.ID(), httpCon, httpBuf)
+
+ return toReturn
+ }
+
+ logrus.Infof("Performing HTTP Hijack attach to container %s", c.ID())
+
+ c.newContainerEvent(events.Attach)
+ return c.ociRuntime.HTTPAttach(c, httpCon, httpBuf, streams, detachKeys, cancel)
+}
+
+// AttachResize resizes the container's terminal, which is displayed by Attach
+// and HTTPAttach.
+func (c *Container) AttachResize(newSize remotecommand.TerminalSize) error {
+ if !c.batched {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ if err := c.syncContainer(); err != nil {
+ return err
+ }
+ }
+
+ if !c.ensureState(define.ContainerStateCreated, define.ContainerStateRunning) {
+ return errors.Wrapf(define.ErrCtrStateInvalid, "can only resize created or running containers")
+ }
+
+ logrus.Infof("Resizing TTY of container %s", c.ID())
+
+ return c.ociRuntime.AttachResize(c, newSize)
+}
+
// Mount mounts a container's filesystem on the host
// The path where the container has been mounted is returned
func (c *Container) Mount() (string, error) {
diff --git a/libpod/oci.go b/libpod/oci.go
index 05a2f37db..2ea61851f 100644
--- a/libpod/oci.go
+++ b/libpod/oci.go
@@ -1,6 +1,9 @@
package libpod
import (
+ "bufio"
+ "net"
+
"k8s.io/client-go/tools/remotecommand"
)
@@ -47,6 +50,23 @@ type OCIRuntime interface {
// UnpauseContainer unpauses the given container.
UnpauseContainer(ctr *Container) error
+ // HTTPAttach performs an attach intended to be transported over HTTP.
+ // For terminal attach, the container's output will be directly streamed
+ // to output; otherwise, STDOUT and STDERR will be multiplexed, with
+ // a header prepended as follows: 1-byte STREAM (0, 1, 2 for STDIN,
+ // STDOUT, STDERR), 3 null (0x00) bytes, 4-byte big endian length.
+ // If a cancel channel is provided, it can be used to asyncronously
+ // termninate the attach session. Detach keys, if given, will also cause
+ // the attach session to be terminated if provided via the STDIN
+ // channel. If they are not provided, the default detach keys will be
+ // used instead. Detach keys of "" will disable detaching via keyboard.
+ // The streams parameter may be passed for containers that did not
+ // create a terminal and will determine which streams to forward to the
+ // client.
+ HTTPAttach(ctr *Container, httpConn net.Conn, httpBuf *bufio.ReadWriter, streams *HTTPAttachStreams, detachKeys *string, cancel <-chan bool) error
+ // AttachResize resizes the terminal in use by the given container.
+ AttachResize(ctr *Container, newSize remotecommand.TerminalSize) error
+
// ExecContainer executes a command in a running container.
// Returns an int (exit code), error channel (errors from attach), and
// error (errors that occurred attempting to start the exec session).
@@ -130,3 +150,12 @@ type ExecOptions struct {
// detach from the container.
DetachKeys string
}
+
+// HTTPAttachStreams informs the HTTPAttach endpoint which of the container's
+// standard streams should be streamed to the client. If this is passed, at
+// least one of the streams must be set to true.
+type HTTPAttachStreams struct {
+ Stdin bool
+ Stdout bool
+ Stderr bool
+}
diff --git a/libpod/oci_conmon_linux.go b/libpod/oci_conmon_linux.go
index 7cd42f9ab..722012386 100644
--- a/libpod/oci_conmon_linux.go
+++ b/libpod/oci_conmon_linux.go
@@ -5,8 +5,11 @@ package libpod
import (
"bufio"
"bytes"
+ "encoding/binary"
"fmt"
+ "io"
"io/ioutil"
+ "net"
"os"
"os/exec"
"path/filepath"
@@ -17,6 +20,7 @@ import (
"text/template"
"time"
+ conmonConfig "github.com/containers/conmon/runner/config"
"github.com/containers/libpod/libpod/config"
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/pkg/cgroups"
@@ -33,6 +37,13 @@ import (
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
+ "k8s.io/client-go/tools/remotecommand"
+)
+
+const (
+ // This is Conmon's STDIO_BUF_SIZE. I don't believe we have access to it
+ // directly from the Go cose, so const it here
+ bufferSize = conmonConfig.BufSize
)
// ConmonOCIRuntime is an OCI runtime managed by Conmon.
@@ -465,6 +476,123 @@ func (r *ConmonOCIRuntime) UnpauseContainer(ctr *Container) error {
return utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, env, r.path, "resume", ctr.ID())
}
+// HTTPAttach performs an attach for the HTTP API.
+// This will consume, and automatically close, the hijacked HTTP session.
+// It is not necessary to close it independently.
+// The cancel channel is not closed; it is up to the caller to do so after
+// this function returns.
+// If this is a container with a terminal, we will stream raw. If it is not, we
+// will stream with an 8-byte header to multiplex STDOUT and STDERR.
+func (r *ConmonOCIRuntime) HTTPAttach(ctr *Container, httpConn net.Conn, httpBuf *bufio.ReadWriter, streams *HTTPAttachStreams, detachKeys *string, cancel <-chan bool) (deferredErr error) {
+ isTerminal := false
+ if ctr.config.Spec.Process != nil {
+ isTerminal = ctr.config.Spec.Process.Terminal
+ }
+
+ // Ensure that our contract of closing the HTTP connection is honored.
+ defer hijackWriteErrorAndClose(deferredErr, ctr.ID(), httpConn, httpBuf)
+
+ if streams != nil {
+ if isTerminal {
+ return errors.Wrapf(define.ErrInvalidArg, "cannot specify which streams to attach as container %s has a terminal", ctr.ID())
+ }
+ if !streams.Stdin && !streams.Stdout && !streams.Stderr {
+ return errors.Wrapf(define.ErrInvalidArg, "must specify at least one stream to attach to")
+ }
+ }
+
+ attachSock, err := r.AttachSocketPath(ctr)
+ if err != nil {
+ return err
+ }
+ socketPath := buildSocketPath(attachSock)
+
+ conn, err := net.DialUnix("unixpacket", nil, &net.UnixAddr{Name: socketPath, Net: "unixpacket"})
+ if err != nil {
+ return errors.Wrapf(err, "failed to connect to container's attach socket: %v", socketPath)
+ }
+ defer func() {
+ if err := conn.Close(); err != nil {
+ logrus.Errorf("unable to close container %s attach socket: %q", ctr.ID(), err)
+ }
+ }()
+
+ logrus.Debugf("Successfully connected to container %s attach socket %s", ctr.ID(), socketPath)
+
+ detachString := define.DefaultDetachKeys
+ if detachKeys != nil {
+ detachString = *detachKeys
+ }
+ detach, err := processDetachKeys(detachString)
+ if err != nil {
+ return err
+ }
+
+ // Make a channel to pass errors back
+ errChan := make(chan error)
+
+ attachStdout := true
+ attachStderr := true
+ attachStdin := true
+ if streams != nil {
+ attachStdout = streams.Stdout
+ attachStderr = streams.Stderr
+ attachStdin = streams.Stdin
+ }
+
+ // Handle STDOUT/STDERR
+ go func() {
+ var err error
+ if isTerminal {
+ logrus.Debugf("Performing terminal HTTP attach for container %s", ctr.ID())
+ err = httpAttachTerminalCopy(conn, httpBuf, ctr.ID())
+ } else {
+ logrus.Debugf("Performing non-terminal HTTP attach for container %s", ctr.ID())
+ err = httpAttachNonTerminalCopy(conn, httpBuf, ctr.ID(), attachStdin, attachStdout, attachStderr)
+ }
+ errChan <- err
+ logrus.Debugf("STDOUT/ERR copy completed")
+ }()
+ // Next, STDIN. Avoid entirely if attachStdin unset.
+ if attachStdin {
+ go func() {
+ _, err := utils.CopyDetachable(conn, httpBuf, detach)
+ logrus.Debugf("STDIN copy completed")
+ errChan <- err
+ }()
+ }
+
+ if cancel != nil {
+ select {
+ case err := <-errChan:
+ return err
+ case <-cancel:
+ return nil
+ }
+ } else {
+ var connErr error = <-errChan
+ return connErr
+ }
+}
+
+// AttachResize resizes the terminal used by the given container.
+func (r *ConmonOCIRuntime) AttachResize(ctr *Container, newSize remotecommand.TerminalSize) error {
+ // TODO: probably want a dedicated function to get ctl file path?
+ controlPath := filepath.Join(ctr.bundlePath(), "ctl")
+ controlFile, err := os.OpenFile(controlPath, unix.O_WRONLY, 0)
+ if err != nil {
+ return errors.Wrapf(err, "could not open ctl file for terminal resize")
+ }
+ defer controlFile.Close()
+
+ logrus.Debugf("Received a resize event for container %s: %+v", ctr.ID(), newSize)
+ if _, err = fmt.Fprintf(controlFile, "%d %d %d\n", 1, newSize.Height, newSize.Width); err != nil {
+ return errors.Wrapf(err, "failed to write to ctl file to resize terminal")
+ }
+
+ return nil
+}
+
// ExecContainer executes a command in a running container
// TODO: Split into Create/Start/Attach/Wait
func (r *ConmonOCIRuntime) ExecContainer(c *Container, sessionID string, options *ExecOptions) (int, chan error, error) {
@@ -1476,3 +1604,139 @@ func (r *ConmonOCIRuntime) getOCIRuntimeVersion() (string, error) {
}
return strings.TrimSuffix(output, "\n"), nil
}
+
+// Copy data from container to HTTP connection, for terminal attach.
+// Container is the container's attach socket connection, http is a buffer for
+// the HTTP connection. cid is the ID of the container the attach session is
+// running for (used solely for error messages).
+func httpAttachTerminalCopy(container *net.UnixConn, http *bufio.ReadWriter, cid string) error {
+ buf := make([]byte, bufferSize)
+ for {
+ numR, err := container.Read(buf)
+ if numR > 0 {
+ switch buf[0] {
+ case AttachPipeStdout:
+ // Do nothing
+ default:
+ logrus.Errorf("Received unexpected attach type %+d, discarding %d bytes", buf[0], numR)
+ continue
+ }
+
+ numW, err2 := http.Write(buf[1:numR])
+ if err2 != nil {
+ if err != nil {
+ logrus.Errorf("Error reading container %s STDOUT: %v", cid, err)
+ }
+ return err2
+ } else if numW+1 != numR {
+ return io.ErrShortWrite
+ }
+ // We need to force the buffer to write immediately, so
+ // there isn't a delay on the terminal side.
+ if err2 := http.Flush(); err2 != nil {
+ if err != nil {
+ logrus.Errorf("Error reading container %s STDOUT: %v", cid, err)
+ }
+ return err2
+ }
+ }
+ if err != nil {
+ if err == io.EOF {
+ return nil
+ }
+ return err
+ }
+ }
+}
+
+// Copy data from a container to an HTTP connection, for non-terminal attach.
+// Appends a header to multiplex input.
+func httpAttachNonTerminalCopy(container *net.UnixConn, http *bufio.ReadWriter, cid string, stdin, stdout, stderr bool) error {
+ buf := make([]byte, bufferSize)
+ for {
+ numR, err := container.Read(buf)
+ if numR > 0 {
+ headerBuf := []byte{0, 0, 0, 0}
+
+ // Practically speaking, we could make this buf[0] - 1,
+ // but we need to validate it anyways...
+ switch buf[0] {
+ case AttachPipeStdin:
+ headerBuf[0] = 0
+ if !stdin {
+ continue
+ }
+ case AttachPipeStdout:
+ if !stdout {
+ continue
+ }
+ headerBuf[0] = 1
+ case AttachPipeStderr:
+ if !stderr {
+ continue
+ }
+ headerBuf[0] = 2
+ default:
+ logrus.Errorf("Received unexpected attach type %+d, discarding %d bytes", buf[0], numR)
+ continue
+ }
+
+ // Get big-endian length and append.
+ // Subtract 1 because we strip the first byte (used for
+ // multiplexing by Conmon).
+ lenBuf := []byte{0, 0, 0, 0}
+ binary.BigEndian.PutUint32(lenBuf, uint32(numR-1))
+ headerBuf = append(headerBuf, lenBuf...)
+
+ numH, err2 := http.Write(headerBuf)
+ if err2 != nil {
+ if err != nil {
+ logrus.Errorf("Error reading container %s standard streams: %v", cid, err)
+ }
+
+ return err2
+ }
+ // Hardcoding header length is pretty gross, but
+ // fast. Should be safe, as this is a fixed part
+ // of the protocol.
+ if numH != 8 {
+ if err != nil {
+ logrus.Errorf("Error reading container %s standard streams: %v", cid, err)
+ }
+
+ return io.ErrShortWrite
+ }
+
+ numW, err2 := http.Write(buf[1:numR])
+ if err2 != nil {
+ if err != nil {
+ logrus.Errorf("Error reading container %s standard streams: %v", cid, err)
+ }
+
+ return err2
+ } else if numW+1 != numR {
+ if err != nil {
+ logrus.Errorf("Error reading container %s standard streams: %v", cid, err)
+ }
+
+ return io.ErrShortWrite
+ }
+ // We need to force the buffer to write immediately, so
+ // there isn't a delay on the terminal side.
+ if err2 := http.Flush(); err2 != nil {
+ if err != nil {
+ logrus.Errorf("Error reading container %s STDOUT: %v", cid, err)
+ }
+ return err2
+ }
+ }
+ if err != nil {
+ if err == io.EOF {
+ return nil
+ }
+
+ return err
+ }
+ }
+
+}
diff --git a/libpod/oci_missing.go b/libpod/oci_missing.go
index 0faa1805b..ff7eea625 100644
--- a/libpod/oci_missing.go
+++ b/libpod/oci_missing.go
@@ -1,13 +1,16 @@
package libpod
import (
+ "bufio"
"fmt"
+ "net"
"path/filepath"
"sync"
"github.com/containers/libpod/libpod/define"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
+ "k8s.io/client-go/tools/remotecommand"
)
var (
@@ -107,6 +110,16 @@ func (r *MissingRuntime) UnpauseContainer(ctr *Container) error {
return r.printError()
}
+// HTTPAttach is not available as the runtime is missing
+func (r *MissingRuntime) HTTPAttach(ctr *Container, httpConn net.Conn, httpBuf *bufio.ReadWriter, streams *HTTPAttachStreams, detachKeys *string, cancel <-chan bool) error {
+ return r.printError()
+}
+
+// AttachResize is not available as the runtime is missing
+func (r *MissingRuntime) AttachResize(ctr *Container, newSize remotecommand.TerminalSize) error {
+ return r.printError()
+}
+
// ExecContainer is not available as the runtime is missing
func (r *MissingRuntime) ExecContainer(ctr *Container, sessionID string, options *ExecOptions) (int, chan error, error) {
return -1, nil, r.printError()
diff --git a/libpod/util.go b/libpod/util.go
index 30e5cd4c3..f79d6c09b 100644
--- a/libpod/util.go
+++ b/libpod/util.go
@@ -1,7 +1,9 @@
package libpod
import (
+ "bufio"
"fmt"
+ "io"
"os"
"os/exec"
"path/filepath"
@@ -16,6 +18,7 @@ import (
"github.com/fsnotify/fsnotify"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
)
// Runtime API constants
@@ -231,3 +234,20 @@ func checkDependencyContainer(depCtr, ctr *Container) error {
return nil
}
+
+// hijackWriteErrorAndClose writes an error to a hijacked HTTP session and
+// closes it. Intended to HTTPAttach function.
+// If error is nil, it will not be written; we'll only close the connection.
+func hijackWriteErrorAndClose(toWrite error, cid string, httpCon io.Closer, httpBuf *bufio.ReadWriter) {
+ if toWrite != nil {
+ if _, err := httpBuf.Write([]byte(toWrite.Error())); err != nil {
+ logrus.Errorf("Error writing error %q to container %s HTTP attach connection: %v", toWrite, cid, err)
+ } else if err := httpBuf.Flush(); err != nil {
+ logrus.Errorf("Error flushing HTTP buffer for container %s HTTP attach connection: %v", cid, err)
+ }
+ }
+
+ if err := httpCon.Close(); err != nil {
+ logrus.Errorf("Error closing container %s HTTP attach connection: %v", cid, err)
+ }
+}
diff --git a/pkg/adapter/client.go b/pkg/adapter/client.go
index da4670892..5774ebe72 100644
--- a/pkg/adapter/client.go
+++ b/pkg/adapter/client.go
@@ -57,7 +57,7 @@ func (r RemoteRuntime) RemoteEndpoint() (remoteEndpoint *Endpoint, err error) {
// last resort is to make a socket connection with the default varlink address for root user
} else {
logrus.Debug("creating a varlink address based default root address")
- remoteEndpoint, err = newSocketConnection(DefaultAddress)
+ remoteEndpoint, err = newSocketConnection(DefaultVarlinkAddress)
}
return
}
diff --git a/pkg/adapter/client_config.go b/pkg/adapter/client_config.go
index 3559b16e3..8187b03b1 100644
--- a/pkg/adapter/client_config.go
+++ b/pkg/adapter/client_config.go
@@ -1,7 +1,10 @@
package adapter
-// DefaultAddress is the default address of the varlink socket
-const DefaultAddress = "unix:/run/podman/io.podman"
+// DefaultAPIAddress is the default address of the REST socket
+const DefaultAPIAddress = "unix:/run/podman/podman.sock"
+
+// DefaultVarlinkAddress is the default address of the varlink socket
+const DefaultVarlinkAddress = "unix:/run/podman/io.podman"
// EndpointType declares the type of server connection
type EndpointType int
diff --git a/pkg/api/Makefile b/pkg/api/Makefile
index 5fb4e7da5..8a1556800 100644
--- a/pkg/api/Makefile
+++ b/pkg/api/Makefile
@@ -1,3 +1,7 @@
+export GO111MODULE=off
+
+SWAGGER_OUT ?= swagger.yaml
+
swagger:
- swagger generate spec -o swagger.yaml -w ./
+ swagger generate spec -o ${SWAGGER_OUT} -w ./
cat tags.yaml >> swagger.yaml
diff --git a/pkg/api/handlers/containers_attach.go b/pkg/api/handlers/containers_attach.go
new file mode 100644
index 000000000..eb306348b
--- /dev/null
+++ b/pkg/api/handlers/containers_attach.go
@@ -0,0 +1,159 @@
+package handlers
+
+import (
+ "net/http"
+
+ "github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/libpod/define"
+ "github.com/containers/libpod/pkg/api/handlers/utils"
+ "github.com/gorilla/mux"
+ "github.com/gorilla/schema"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "k8s.io/client-go/tools/remotecommand"
+)
+
+func AttachContainer(w http.ResponseWriter, r *http.Request) {
+ runtime := r.Context().Value("runtime").(*libpod.Runtime)
+ decoder := r.Context().Value("decoder").(*schema.Decoder)
+
+ query := struct {
+ DetachKeys string `schema:"detachKeys"`
+ Logs bool `schema:"logs"`
+ Stream bool `schema:"stream"`
+ Stdin bool `schema:"stdin"`
+ Stdout bool `schema:"stdout"`
+ Stderr bool `schema:"stderr"`
+ }{}
+ if err := decoder.Decode(&query, r.URL.Query()); err != nil {
+ utils.Error(w, "Error parsing parameters", http.StatusBadRequest, err)
+ return
+ }
+
+ muxVars := mux.Vars(r)
+
+ // Detach keys: explicitly set to "" is very different from unset
+ // TODO: Our format for parsing these may be different from Docker.
+ var detachKeys *string
+ if _, found := muxVars["detachKeys"]; found {
+ detachKeys = &query.DetachKeys
+ }
+
+ streams := new(libpod.HTTPAttachStreams)
+ streams.Stdout = true
+ streams.Stderr = true
+ streams.Stdin = true
+ useStreams := false
+ if _, found := muxVars["stdin"]; found {
+ streams.Stdin = query.Stdin
+ useStreams = true
+ }
+ if _, found := muxVars["stdout"]; found {
+ streams.Stdout = query.Stdout
+ useStreams = true
+ }
+ if _, found := muxVars["stderr"]; found {
+ streams.Stderr = query.Stderr
+ useStreams = true
+ }
+ if !useStreams {
+ streams = nil
+ }
+ if useStreams && !streams.Stdout && !streams.Stderr && !streams.Stdin {
+ utils.Error(w, "Parameter conflict", http.StatusBadRequest, errors.Errorf("at least one of stdin, stdout, stderr must be true"))
+ return
+ }
+
+ // TODO: Investigate supporting these.
+ // Logs replays container logs over the attach socket.
+ // Stream seems to break things up somehow? Not 100% clear.
+ if query.Logs {
+ utils.Error(w, "Unsupported parameter", http.StatusBadRequest, errors.Errorf("the logs parameter to attach is not presently supported"))
+ return
+ }
+ // We only support stream=true or unset
+ if _, found := muxVars["stream"]; found && query.Stream {
+ utils.Error(w, "Unsupported parameter", http.StatusBadRequest, errors.Errorf("the stream parameter to attach is not presently supported"))
+ return
+ }
+
+ name := getName(r)
+ ctr, err := runtime.LookupContainer(name)
+ if err != nil {
+ utils.ContainerNotFound(w, name, err)
+ return
+ }
+
+ state, err := ctr.State()
+ if err != nil {
+ utils.InternalServerError(w, err)
+ return
+ }
+ if !(state == define.ContainerStateCreated || state == define.ContainerStateRunning) {
+ utils.InternalServerError(w, errors.Wrapf(define.ErrCtrStateInvalid, "can only attach to created or running containers"))
+ return
+ }
+
+ // Hijack the connection
+ hijacker, ok := w.(http.Hijacker)
+ if !ok {
+ utils.InternalServerError(w, errors.Errorf("unable to hijack connection"))
+ return
+ }
+
+ w.WriteHeader(http.StatusSwitchingProtocols)
+
+ connection, buffer, err := hijacker.Hijack()
+ if err != nil {
+ utils.InternalServerError(w, errors.Wrapf(err, "error hijacking connection"))
+ return
+ }
+
+ logrus.Debugf("Hijack for attach of container %s successful", ctr.ID())
+
+ // Perform HTTP attach.
+ // HTTPAttach will handle everything about the connection from here on
+ // (including closing it and writing errors to it).
+ if err := ctr.HTTPAttach(connection, buffer, streams, detachKeys, nil); err != nil {
+ // We can't really do anything about errors anymore. HTTPAttach
+ // should be writing them to the connection.
+ logrus.Errorf("Error attaching to container %s: %v", ctr.ID(), err)
+ }
+
+ logrus.Debugf("Attach for container %s completed successfully", ctr.ID())
+}
+
+func ResizeContainer(w http.ResponseWriter, r *http.Request) {
+ runtime := r.Context().Value("runtime").(*libpod.Runtime)
+ decoder := r.Context().Value("decoder").(*schema.Decoder)
+
+ query := struct {
+ Height uint16 `schema:"h"`
+ Width uint16 `schema:"w"`
+ }{}
+ if err := decoder.Decode(&query, r.URL.Query()); err != nil {
+ // This is not a 400, despite the fact that is should be, for
+ // compatibility reasons.
+ utils.InternalServerError(w, errors.Wrapf(err, "error parsing query options"))
+ return
+ }
+
+ name := getName(r)
+ ctr, err := runtime.LookupContainer(name)
+ if err != nil {
+ utils.ContainerNotFound(w, name, err)
+ return
+ }
+
+ newSize := remotecommand.TerminalSize{
+ Width: query.Width,
+ Height: query.Height,
+ }
+ if err := ctr.AttachResize(newSize); err != nil {
+ utils.InternalServerError(w, err)
+ return
+ }
+ // This is not a 204, even though we write nothing, for compatibility
+ // reasons.
+ utils.WriteResponse(w, http.StatusOK, "")
+}
diff --git a/pkg/api/handlers/images.go b/pkg/api/handlers/images.go
index d4cddbfb2..b4acdc312 100644
--- a/pkg/api/handlers/images.go
+++ b/pkg/api/handlers/images.go
@@ -74,8 +74,25 @@ func TagImage(w http.ResponseWriter, r *http.Request) {
}
func RemoveImage(w http.ResponseWriter, r *http.Request) {
+ decoder := r.Context().Value("decoder").(*schema.Decoder)
runtime := r.Context().Value("runtime").(*libpod.Runtime)
+ query := struct {
+ noPrune bool
+ }{
+ // This is where you can override the golang default value for one of fields
+ }
+
+ if err := decoder.Decode(&query, r.URL.Query()); err != nil {
+ utils.Error(w, "Something went wrong.", http.StatusBadRequest, errors.Wrapf(err, "Failed to parse parameters for %s", r.URL.String()))
+ return
+ }
+ muxVars := mux.Vars(r)
+ if _, found := muxVars["noprune"]; found {
+ if query.noPrune {
+ utils.UnSupportedParameter("noprune")
+ }
+ }
name := mux.Vars(r)["name"]
newImage, err := runtime.ImageRuntime().NewFromLocal(name)
if err != nil {
diff --git a/pkg/api/handlers/libpod/containers.go b/pkg/api/handlers/libpod/containers.go
index 388be24b6..e16a4ea1f 100644
--- a/pkg/api/handlers/libpod/containers.go
+++ b/pkg/api/handlers/libpod/containers.go
@@ -143,6 +143,22 @@ func CreateContainer(w http.ResponseWriter, r *http.Request) {
}
+func UnmountContainer(w http.ResponseWriter, r *http.Request) {
+ runtime := r.Context().Value("runtime").(*libpod.Runtime)
+ name := mux.Vars(r)["name"]
+ conn, err := runtime.LookupContainer(name)
+ if err != nil {
+ utils.ContainerNotFound(w, name, err)
+ return
+ }
+ // TODO In future it might be an improvement that libpod unmount return a
+ // "container not mounted" error so we can surface that to the endpoint user
+ if err := conn.Unmount(false); err != nil {
+ utils.InternalServerError(w, err)
+ }
+ utils.WriteResponse(w, http.StatusNoContent, "")
+
+}
func MountContainer(w http.ResponseWriter, r *http.Request) {
runtime := r.Context().Value("runtime").(*libpod.Runtime)
name := mux.Vars(r)["name"]
diff --git a/pkg/api/handlers/libpod/pods.go b/pkg/api/handlers/libpod/pods.go
index daaf9d018..14f8e8de7 100644
--- a/pkg/api/handlers/libpod/pods.go
+++ b/pkg/api/handlers/libpod/pods.go
@@ -12,6 +12,7 @@ import (
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/pkg/api/handlers"
"github.com/containers/libpod/pkg/api/handlers/utils"
+ "github.com/containers/libpod/pkg/util"
"github.com/gorilla/mux"
"github.com/gorilla/schema"
"github.com/pkg/errors"
@@ -384,18 +385,27 @@ func PodKill(w http.ResponseWriter, r *http.Request) {
var (
runtime = r.Context().Value("runtime").(*libpod.Runtime)
decoder = r.Context().Value("decoder").(*schema.Decoder)
+ signal = "SIGKILL"
)
query := struct {
- signal int `schema:"signal"`
+ signal string `schema:"signal"`
}{
// override any golang type defaults
}
-
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest,
errors.Wrapf(err, "Failed to parse parameters for %s", r.URL.String()))
return
}
+ muxVars := mux.Vars(r)
+ if _, found := muxVars["signal"]; found {
+ signal = query.signal
+ }
+
+ sig, err := util.ParseSignal(signal)
+ if err != nil {
+ utils.InternalServerError(w, errors.Wrapf(err, "unable to parse signal value"))
+ }
name := mux.Vars(r)["name"]
pod, err := runtime.LookupPod(name)
if err != nil {
@@ -419,8 +429,7 @@ func PodKill(w http.ResponseWriter, r *http.Request) {
utils.Error(w, msg, http.StatusConflict, errors.Errorf("cannot kill a pod with no running containers: %s", pod.ID()))
return
}
- // TODO How do we differentiate if a signal was sent vs accepting the pod/container default?
- _, err = pod.Kill(uint(query.signal))
+ _, err = pod.Kill(uint(sig))
if err != nil {
utils.Error(w, "Something went wrong", http.StatusInternalServerError, err)
return
diff --git a/pkg/api/handlers/swagger.go b/pkg/api/handlers/swagger.go
index c845c8195..0db4e19b6 100644
--- a/pkg/api/handlers/swagger.go
+++ b/pkg/api/handlers/swagger.go
@@ -30,9 +30,7 @@ type swagImageInspect struct {
// swagger:response DocsImageDeleteResponse
type swagImageDeleteResponse struct {
// in:body
- Body struct {
- image.ImageDeleteResponse
- }
+ Body []image.ImageDeleteResponse
}
// Search results
diff --git a/pkg/api/handlers/utils/errors.go b/pkg/api/handlers/utils/errors.go
index 3ec0742bd..b6f125c58 100644
--- a/pkg/api/handlers/utils/errors.go
+++ b/pkg/api/handlers/utils/errors.go
@@ -86,3 +86,8 @@ func (e ErrorModel) Error() string {
func (e ErrorModel) Cause() error {
return errors.New(e.Because)
}
+
+// UnsupportedParameter logs a given param by its string name as not supported.
+func UnSupportedParameter(param string) {
+ log.Infof("API parameter %q: not supported", param)
+}
diff --git a/pkg/api/server/listener_api.go b/pkg/api/server/listener_api.go
new file mode 100644
index 000000000..4984216b8
--- /dev/null
+++ b/pkg/api/server/listener_api.go
@@ -0,0 +1,31 @@
+package server
+
+import (
+ "net"
+ "os"
+ "path/filepath"
+
+ "github.com/pkg/errors"
+)
+
+// ListenUnix follows stdlib net.Listen() API, providing a unix listener for given path
+// ListenUnix will delete and create files/directories as needed
+func ListenUnix(network string, path string) (net.Listener, error) {
+ // setup custom listener for API server
+ err := os.MkdirAll(filepath.Dir(path), 0770)
+ if err != nil {
+ return nil, errors.Wrapf(err, "api.ListenUnix() failed to create %s", filepath.Dir(path))
+ }
+ os.Remove(path)
+
+ listener, err := net.Listen(network, path)
+ if err != nil {
+ return nil, errors.Wrapf(err, "api.ListenUnix() failed to create net.Listen(%s, %s)", network, path)
+ }
+
+ _, err = os.Stat(path)
+ if err != nil {
+ return nil, errors.Wrapf(err, "net.Listen(%s, %s) failed to report the failure to create socket", network, path)
+ }
+ return listener, nil
+}
diff --git a/pkg/api/server/register_containers.go b/pkg/api/server/register_containers.go
index b275fa4d1..833bb5197 100644
--- a/pkg/api/server/register_containers.go
+++ b/pkg/api/server/register_containers.go
@@ -428,6 +428,91 @@ func (s *APIServer) RegisterContainersHandlers(r *mux.Router) error {
// '500':
// "$ref": "#/responses/InternalError"
r.HandleFunc(VersionedPath("/containers/{name:..*}/wait"), APIHandler(s.Context, generic.WaitContainer)).Methods(http.MethodPost)
+ // swagger:operation POST /containers/{nameOrID}/attach compat attach
+ // ---
+ // tags:
+ // - containers (compat)
+ // summary: Attach to a container
+ // description: Hijacks the connection to forward the container's standard streams to the client.
+ // parameters:
+ // - in: path
+ // name: nameOrID
+ // required: true
+ // description: the name or ID of the container
+ // - in: query
+ // name: detachKeys
+ // required: false
+ // type: string
+ // description: keys to use for detaching from the container
+ // - in: query
+ // name: logs
+ // required: false
+ // type: bool
+ // description: Not yet supported
+ // - in: query
+ // name: stream
+ // required: false
+ // type: bool
+ // default: true
+ // description: If passed, must be set to true; stream=false is not yet supported
+ // - in: query
+ // name: stdout
+ // required: false
+ // type: bool
+ // description: Attach to container STDOUT
+ // - in: query
+ // name: stderr
+ // required: false
+ // type: bool
+ // description: Attach to container STDERR
+ // - in: query
+ // name: stdin
+ // required: false
+ // type: bool
+ // description: Attach to container STDIN
+ // produces:
+ // - application/json
+ // responses:
+ // '101':
+ // description: No error, connection has been hijacked for transporting streams.
+ // '400':
+ // "$ref": "#/responses/BadParamError"
+ // '404':
+ // "$ref": "#/responses/NoSuchContainer"
+ // '500':
+ // "$ref": "#/responses/InternalError"
+ r.HandleFunc(VersionedPath("/containers/{name:..*}/attach"), APIHandler(s.Context, handlers.AttachContainer)).Methods(http.MethodPost)
+ // swagger:operation POST /containers/{nameOrID}/resize compat resize
+ // ---
+ // tags:
+ // - containers (compat)
+ // summary: Resize a container's TTY
+ // description: Resize the terminal attached to a container (for use with Attach).
+ // parameters:
+ // - in: path
+ // name: nameOrID
+ // required: true
+ // description: the name or ID of the container
+ // - in: query
+ // name: h
+ // type: int
+ // required: false
+ // description: Height to set for the terminal, in characters
+ // - in: query
+ // name: w
+ // type: int
+ // required: false
+ // description: Width to set for the terminal, in characters
+ // produces:
+ // - application/json
+ // responses:
+ // '200':
+ // description: no error
+ // '404':
+ // "$ref": "#/responses/NoSuchContainer"
+ // '500':
+ // "$ref": "#/responses/InternalError"
+ r.HandleFunc(VersionedPath("/containers/{name:..*}/resize"), APIHandler(s.Context, handlers.ResizeContainer)).Methods(http.MethodPost)
/*
libpod endpoints
@@ -580,7 +665,7 @@ func (s *APIServer) RegisterContainersHandlers(r *mux.Router) error {
// '500':
// "$ref": "#/responses/InternalError"
r.HandleFunc(VersionedPath("/libpod/containers/{name:..*}/kill"), APIHandler(s.Context, libpod.KillContainer)).Methods(http.MethodGet)
- // swagger:operation GET /libpod/containers/{nameOrID}/mount libpod mountContainer
+ // swagger:operation POST /libpod/containers/{nameOrID}/mount libpod mountContainer
// ---
// tags:
// - containers
@@ -599,12 +684,33 @@ func (s *APIServer) RegisterContainersHandlers(r *mux.Router) error {
// schema:
// description: id
// type: string
- // example: 3c784de79b791b4ebd3ac55e511f97fedc042328499554937a3f8bfd9c1a2cb8
+ // example: /var/lib/containers/storage/overlay/f3f693bd88872a1e3193f4ebb925f4c282e8e73aadb8ab3e7492754dda3a02a4/merged
// '404':
// "$ref": "#/responses/NoSuchContainer"
// '500':
// "$ref": "#/responses/InternalError"
r.HandleFunc(VersionedPath("/libpod/containers/{name:..*}/mount"), APIHandler(s.Context, libpod.MountContainer)).Methods(http.MethodPost)
+ // swagger:operation GET /libpod/containers/{nameOrID}/unmount libpod unmountContainer
+ // ---
+ // tags:
+ // - containers
+ // summary: Unmount a container
+ // description: Unmount a container from the filesystem
+ // parameters:
+ // - in: path
+ // name: nameOrID
+ // required: true
+ // description: the name or ID of the container
+ // produces:
+ // - application/json
+ // responses:
+ // '204':
+ // description: no error
+ // '404':
+ // "$ref": "#/responses/NoSuchContainer"
+ // '500':
+ // "$ref": "#/responses/InternalError"
+ r.HandleFunc(VersionedPath("/libpod/containers/{name:..*}/unmount"), APIHandler(s.Context, libpod.UnmountContainer)).Methods(http.MethodPost)
r.HandleFunc(VersionedPath("/libpod/containers/{name:..*}/logs"), APIHandler(s.Context, libpod.LogsFromContainer)).Methods(http.MethodGet)
// swagger:operation POST /libpod/containers/{nameOrID}/pause libpod libpodPauseContainer
// ---
@@ -703,11 +809,13 @@ func (s *APIServer) RegisterContainersHandlers(r *mux.Router) error {
// '500':
// "$ref": "#/responses/InternalError"
r.HandleFunc(VersionedPath("/libpod/containers/{name:..*}/stats"), APIHandler(s.Context, generic.StatsContainer)).Methods(http.MethodGet)
- // swagger:operation GET /libpod/containers/{nameOrID}/top containers topContainer
+ // swagger:operation GET /libpod/containers/{nameOrID}/top libpod libpodTopContainer
//
// List processes running inside a container. Note
//
// ---
+ // tags:
+ // - containers
// parameters:
// - in: path
// name: nameOrID
@@ -725,7 +833,6 @@ func (s *APIServer) RegisterContainersHandlers(r *mux.Router) error {
// - application/json
// responses:
// '200':
- // description: no error
// "ref": "#/responses/DockerTopResponse"
// '404':
// "$ref": "#/responses/NoSuchContainer"
@@ -823,5 +930,90 @@ func (s *APIServer) RegisterContainersHandlers(r *mux.Router) error {
// '500':
// "$ref": "#/responses/InternalError"
r.HandleFunc(VersionedPath("/libpod/containers/{name:..*}/stop"), APIHandler(s.Context, handlers.StopContainer)).Methods(http.MethodPost)
+ // swagger:operation POST /libpod/containers/{nameOrID}/attach libpod attach
+ // ---
+ // tags:
+ // - containers
+ // summary: Attach to a container
+ // description: Hijacks the connection to forward the container's standard streams to the client.
+ // parameters:
+ // - in: path
+ // name: nameOrID
+ // required: true
+ // description: the name or ID of the container
+ // - in: query
+ // name: detachKeys
+ // required: false
+ // type: string
+ // description: keys to use for detaching from the container
+ // - in: query
+ // name: logs
+ // required: false
+ // type: bool
+ // description: Not yet supported
+ // - in: query
+ // name: stream
+ // required: false
+ // type: bool
+ // default: true
+ // description: If passed, must be set to true; stream=false is not yet supported
+ // - in: query
+ // name: stdout
+ // required: false
+ // type: bool
+ // description: Attach to container STDOUT
+ // - in: query
+ // name: stderr
+ // required: false
+ // type: bool
+ // description: Attach to container STDERR
+ // - in: query
+ // name: stdin
+ // required: false
+ // type: bool
+ // description: Attach to container STDIN
+ // produces:
+ // - application/json
+ // responses:
+ // '101':
+ // description: No error, connection has been hijacked for transporting streams.
+ // '400':
+ // "$ref": "#/responses/BadParamError"
+ // '404':
+ // "$ref": "#/responses/NoSuchContainer"
+ // '500':
+ // "$ref": "#/responses/InternalError"
+ r.HandleFunc(VersionedPath("/libpod/containers/{name:..*}/attach"), APIHandler(s.Context, handlers.AttachContainer)).Methods(http.MethodPost)
+ // swagger:operation POST /libpod/containers/{nameOrID}/resize libpod resize
+ // ---
+ // tags:
+ // - containers
+ // summary: Resize a container's TTY
+ // description: Resize the terminal attached to a container (for use with Attach).
+ // parameters:
+ // - in: path
+ // name: nameOrID
+ // required: true
+ // description: the name or ID of the container
+ // - in: query
+ // name: h
+ // type: int
+ // required: false
+ // description: Height to set for the terminal, in characters
+ // - in: query
+ // name: w
+ // type: int
+ // required: false
+ // description: Width to set for the terminal, in characters
+ // produces:
+ // - application/json
+ // responses:
+ // '200':
+ // description: no error
+ // '404':
+ // "$ref": "#/responses/NoSuchContainer"
+ // '500':
+ // "$ref": "#/responses/InternalError"
+ r.HandleFunc(VersionedPath("/libpod/containers/{name:..*}/resize"), APIHandler(s.Context, handlers.ResizeContainer)).Methods(http.MethodPost)
return nil
}
diff --git a/pkg/api/server/register_images.go b/pkg/api/server/register_images.go
index cd42afe71..7f1bb4e5c 100644
--- a/pkg/api/server/register_images.go
+++ b/pkg/api/server/register_images.go
@@ -193,8 +193,8 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// responses:
// '200':
// $ref: "#/responses/DocsImageDeleteResponse"
- // '400':
- // $ref: '#/responses/BadParamError'
+ // '404':
+ // $ref: '#/responses/NoSuchImage'
// '409':
// $ref: '#/responses/ConflictError'
// '500':
@@ -506,11 +506,7 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// - application/json
// responses:
// '200':
- // schema:
- // items:
- // $ref: "#/responses/DocsIageDeleteResponse"
- // '400':
- // $ref: "#/responses/BadParamError"
+ // $ref: "#/responses/DocsImageDeleteResponse"
// '404':
// $ref: '#/responses/NoSuchImage'
// '409':
@@ -533,10 +529,12 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// name: format
// type: string
// description: format for exported image
+ // default: oci-archive
// - in: query
// name: compress
// type: bool
// description: use compression on image
+ // default: false
// produces:
// - application/json
// responses:
diff --git a/pkg/api/server/register_pods.go b/pkg/api/server/register_pods.go
index 5069326b6..4018cfbe8 100644
--- a/pkg/api/server/register_pods.go
+++ b/pkg/api/server/register_pods.go
@@ -121,8 +121,9 @@ func (s *APIServer) registerPodsHandlers(r *mux.Router) error {
// description: the name or ID of the pod
// - in: query
// name: signal
- // type: int
+ // type: string
// description: signal to be sent to pod
+ // default: SIGKILL
// responses:
// '204':
// description: no error
diff --git a/pkg/api/server/server.go b/pkg/api/server/server.go
index 2bda5ad01..f3bae0345 100644
--- a/pkg/api/server/server.go
+++ b/pkg/api/server/server.go
@@ -1,6 +1,8 @@
-// Package serviceapi Provides a Container compatible interface.
+// Package serviceapi Provides a Container compatible interface (EXPERIMENTAL)
//
-// This documentation describes the HTTP LibPod interface
+// This documentation describes the HTTP LibPod interface. It is to be consider
+// only as experimental as this point. The endpoints, parameters, inputs, and
+// return values can all change.
//
// Schemes: http, https
// Host: podman.io
@@ -8,6 +10,10 @@
// Version: 0.0.1
// License: Apache-2.0 https://opensource.org/licenses/Apache-2.0
// Contact: Podman <podman@lists.podman.io> https://podman.io/community/
+// InfoExtensions:
+// x-logo:
+// - url: https://raw.githubusercontent.com/containers/libpod/master/logo/podman-logo.png
+// - altText: "Podman logo"
//
// Consumes:
// - application/json
@@ -48,9 +54,9 @@ import (
)
type APIServer struct {
- http.Server // Where the HTTP work happens
+ http.Server // The HTTP work happens here
*schema.Decoder // Decoder for Query parameters to structs
- context.Context // Context for graceful server shutdown
+ context.Context // Context to carry objects to handlers
*libpod.Runtime // Where the real work happens
net.Listener // mux for routing HTTP API calls to libpod routines
context.CancelFunc // Stop APIServer
@@ -58,14 +64,37 @@ type APIServer struct {
time.Duration // Duration of client access sliding window
}
-// NewServer will create and configure a new API HTTP server
+// Number of seconds to wait for next request, if exceeded shutdown server
+const (
+ DefaultServiceDuration = 300 * time.Second
+ UnlimitedServiceDuration = 0 * time.Second
+)
+
+// NewServer will create and configure a new API server with all defaults
func NewServer(runtime *libpod.Runtime) (*APIServer, error) {
- listeners, err := activation.Listeners()
- if err != nil {
- return nil, errors.Wrap(err, "Cannot retrieve file descriptors from systemd")
- }
- if len(listeners) != 1 {
- return nil, errors.Errorf("Wrong number of file descriptors from systemd for socket activation (%d != 1)", len(listeners))
+ return newServer(runtime, DefaultServiceDuration, nil)
+}
+
+// NewServerWithSettings will create and configure a new API server using provided settings
+func NewServerWithSettings(runtime *libpod.Runtime, duration time.Duration, listener *net.Listener) (*APIServer, error) {
+ return newServer(runtime, duration, listener)
+}
+
+func newServer(runtime *libpod.Runtime, duration time.Duration, listener *net.Listener) (*APIServer, error) {
+ // If listener not provided try socket activation protocol
+ if listener == nil {
+ if _, found := os.LookupEnv("LISTEN_FDS"); !found {
+ return nil, errors.Errorf("Cannot create Server, no listener provided and socket activation protocol is not active.")
+ }
+
+ listeners, err := activation.Listeners()
+ if err != nil {
+ return nil, errors.Wrap(err, "Cannot retrieve file descriptors from systemd")
+ }
+ if len(listeners) != 1 {
+ return nil, errors.Errorf("Wrong number of file descriptors for socket activation protocol (%d != 1)", len(listeners))
+ }
+ listener = &listeners[0]
}
router := mux.NewRouter()
@@ -80,9 +109,9 @@ func NewServer(runtime *libpod.Runtime) (*APIServer, error) {
Decoder: schema.NewDecoder(),
Context: nil,
Runtime: runtime,
- Listener: listeners[0],
+ Listener: *listener,
CancelFunc: nil,
- Duration: 300 * time.Second,
+ Duration: duration,
}
server.Timer = time.AfterFunc(server.Duration, func() {
if err := server.Shutdown(); err != nil {
@@ -176,6 +205,11 @@ func (s *APIServer) Serve() error {
// Shutdown is a clean shutdown waiting on existing clients
func (s *APIServer) Shutdown() error {
+ // Duration == 0 flags no auto-shutdown of server
+ if s.Duration == 0 {
+ return nil
+ }
+
// We're still in the sliding service window
if s.Timer.Stop() {
s.Timer.Reset(s.Duration)
diff --git a/pkg/bindings/containers.go b/pkg/bindings/containers.go
index 01f68f970..057580088 100644
--- a/pkg/bindings/containers.go
+++ b/pkg/bindings/containers.go
@@ -126,11 +126,11 @@ func (c Connection) ContainerExists(nameOrID string) (bool, error) {
return false, nil
}
-func (c Connection) StopContainer(nameOrID string, timeout int) error {
- // TODO we might need to distinguish whether a timeout is desired; a zero, the int
- // zero value is valid; what do folks want to do?
+func (c Connection) StopContainer(nameOrID string, timeout *int) error {
params := make(map[string]string)
- params["t"] = strconv.Itoa(timeout)
+ if timeout != nil {
+ params["t"] = strconv.Itoa(*timeout)
+ }
response, err := c.newRequest(http.MethodPost, fmt.Sprintf("/containers/%s/stop", nameOrID), nil, params)
if err != nil {
return err
diff --git a/pkg/rootless/rootless_linux.c b/pkg/rootless/rootless_linux.c
index 193c788c0..041a161dc 100644
--- a/pkg/rootless/rootless_linux.c
+++ b/pkg/rootless/rootless_linux.c
@@ -266,6 +266,8 @@ static void __attribute__((constructor)) init()
char path[PATH_MAX];
const char *const suffix = "/libpod/pause.pid";
char *cwd = getcwd (NULL, 0);
+ char uid_fmt[16];
+ char gid_fmt[16];
if (cwd == NULL)
{
@@ -324,6 +326,13 @@ static void __attribute__((constructor)) init()
exit (EXIT_FAILURE);
}
+ sprintf (uid_fmt, "%d", uid);
+ sprintf (gid_fmt, "%d", gid);
+
+ setenv ("_CONTAINERS_USERNS_CONFIGURED", "init", 1);
+ setenv ("_CONTAINERS_ROOTLESS_UID", uid_fmt, 1);
+ setenv ("_CONTAINERS_ROOTLESS_GID", gid_fmt, 1);
+
r = setns (fd, 0);
if (r < 0)
{
diff --git a/test/e2e/run_signal_test.go b/test/e2e/run_signal_test.go
index eee7c14fb..fbdd3acec 100644
--- a/test/e2e/run_signal_test.go
+++ b/test/e2e/run_signal_test.go
@@ -132,7 +132,7 @@ var _ = Describe("Podman run with --sig-proxy", func() {
Expect(killSession.ExitCode()).To(Equal(0))
session.WaitWithDefaultTimeout()
- Expect(session.ExitCode()).To(Equal(137))
+ Expect(session.ExitCode()).ToNot(Equal(0))
ok, _ = session.GrepString("Received")
Expect(ok).To(BeFalse())
})
diff --git a/vendor/github.com/containers/conmon/LICENSE b/vendor/github.com/containers/conmon/LICENSE
new file mode 100644
index 000000000..28df615ce
--- /dev/null
+++ b/vendor/github.com/containers/conmon/LICENSE
@@ -0,0 +1,190 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2018-2019 github.com/containers authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/containers/conmon/runner/config/config.go b/vendor/github.com/containers/conmon/runner/config/config.go
new file mode 100644
index 000000000..cb70e9325
--- /dev/null
+++ b/vendor/github.com/containers/conmon/runner/config/config.go
@@ -0,0 +1,19 @@
+package config
+
+const (
+ // BufSize is the size of buffers passed in to sockets
+ BufSize = 8192
+ // ConnSockBufSize is the size of the socket used for
+ // to attach to the container
+ ConnSockBufSize = 32768
+ // WinResizeEvent is the event code the caller program will
+ // send along the ctrl fd to signal conmon to resize
+ // the pty window
+ WinResizeEvent = 1
+ // ReopenLogsEvent is the event code the caller program will
+ // send along the ctrl fd to signal conmon to reopen the log files
+ ReopenLogsEvent = 2
+ // TimedOutMessage is the message sent back to the caller by conmon
+ // when a container times out
+ TimedOutMessage = "command timed out"
+)
diff --git a/vendor/github.com/containers/conmon/runner/config/config_unix.go b/vendor/github.com/containers/conmon/runner/config/config_unix.go
new file mode 100644
index 000000000..5caaca7cc
--- /dev/null
+++ b/vendor/github.com/containers/conmon/runner/config/config_unix.go
@@ -0,0 +1,7 @@
+// +build !windows
+
+package config
+
+const (
+ ContainerAttachSocketDir = "/var/run/crio"
+)
diff --git a/vendor/github.com/containers/conmon/runner/config/config_windows.go b/vendor/github.com/containers/conmon/runner/config/config_windows.go
new file mode 100644
index 000000000..2161b9441
--- /dev/null
+++ b/vendor/github.com/containers/conmon/runner/config/config_windows.go
@@ -0,0 +1,7 @@
+// +build windows
+
+package config
+
+const (
+ ContainerAttachSocketDir = "C:\\crio\\run\\"
+)
diff --git a/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusteroperator.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusteroperator.crd.yaml
new file mode 100644
index 000000000..114db5aec
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusteroperator.crd.yaml
@@ -0,0 +1,164 @@
+kind: CustomResourceDefinition
+apiVersion: apiextensions.k8s.io/v1beta1
+metadata:
+ name: clusteroperators.config.openshift.io
+spec:
+ additionalPrinterColumns:
+ - JSONPath: .status.versions[?(@.name=="operator")].version
+ description: The version the operator is at.
+ name: Version
+ type: string
+ - JSONPath: .status.conditions[?(@.type=="Available")].status
+ description: Whether the operator is running and stable.
+ name: Available
+ type: string
+ - JSONPath: .status.conditions[?(@.type=="Progressing")].status
+ description: Whether the operator is processing changes.
+ name: Progressing
+ type: string
+ - JSONPath: .status.conditions[?(@.type=="Degraded")].status
+ description: Whether the operator is degraded.
+ name: Degraded
+ type: string
+ - JSONPath: .status.conditions[?(@.type=="Available")].lastTransitionTime
+ description: The time the operator's Available status last changed.
+ name: Since
+ type: date
+ group: config.openshift.io
+ names:
+ kind: ClusterOperator
+ listKind: ClusterOperatorList
+ plural: clusteroperators
+ singular: clusteroperator
+ shortNames:
+ - co
+ preserveUnknownFields: false
+ scope: Cluster
+ subresources:
+ status: {}
+ version: v1
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ validation:
+ openAPIV3Schema:
+ description: ClusterOperator is the Custom Resource object which holds the current
+ state of an operator. This object is used by operators to convey their state
+ to the rest of the cluster.
+ type: object
+ required:
+ - spec
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: spec holds configuration that could apply to any operator.
+ type: object
+ status:
+ description: status holds the information about the state of an operator. It
+ is consistent with status information across the Kubernetes ecosystem.
+ type: object
+ properties:
+ conditions:
+ description: conditions describes the state of the operator's managed
+ and monitored components.
+ type: array
+ items:
+ description: ClusterOperatorStatusCondition represents the state of
+ the operator's managed and monitored components.
+ type: object
+ required:
+ - lastTransitionTime
+ - status
+ - type
+ properties:
+ lastTransitionTime:
+ description: lastTransitionTime is the time of the last update
+ to the current status property.
+ type: string
+ format: date-time
+ message:
+ description: message provides additional information about the
+ current condition. This is only to be consumed by humans.
+ type: string
+ reason:
+ description: reason is the CamelCase reason for the condition's
+ current status.
+ type: string
+ status:
+ description: status of the condition, one of True, False, Unknown.
+ type: string
+ type:
+ description: type specifies the aspect reported by this condition.
+ type: string
+ extension:
+ description: extension contains any additional status information specific
+ to the operator which owns this status object.
+ type: object
+ nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ relatedObjects:
+ description: 'relatedObjects is a list of objects that are "interesting"
+ or related to this operator. Common uses are: 1. the detailed resource
+ driving the operator 2. operator namespaces 3. operand namespaces'
+ type: array
+ items:
+ description: ObjectReference contains enough information to let you
+ inspect or modify the referred object.
+ type: object
+ required:
+ - group
+ - name
+ - resource
+ properties:
+ group:
+ description: group of the referent.
+ type: string
+ name:
+ description: name of the referent.
+ type: string
+ namespace:
+ description: namespace of the referent.
+ type: string
+ resource:
+ description: resource of the referent.
+ type: string
+ versions:
+ description: versions is a slice of operator and operand version tuples. Operators
+ which manage multiple operands will have multiple operand entries
+ in the array. Available operators must report the version of the
+ operator itself with the name "operator". An operator reports a new
+ "operator" version when it has rolled out the new version to all of
+ its operands.
+ type: array
+ items:
+ type: object
+ required:
+ - name
+ - version
+ properties:
+ name:
+ description: name is the name of the particular operand this version
+ is for. It usually matches container images, not operators.
+ type: string
+ version:
+ description: version indicates which version of a particular operand
+ is currently being managed. It must always match the Available
+ operand. If 1.0.0 is Available, then this must indicate 1.0.0
+ even if the operator is trying to rollout 1.1.0
+ type: string
+ versions:
+ - name: v1
+ served: true
+ storage: true
diff --git a/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion.crd.yaml
new file mode 100644
index 000000000..ccde0db23
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion.crd.yaml
@@ -0,0 +1,328 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: clusterversions.config.openshift.io
+spec:
+ group: config.openshift.io
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ scope: Cluster
+ subresources:
+ status: {}
+ names:
+ plural: clusterversions
+ singular: clusterversion
+ kind: ClusterVersion
+ preserveUnknownFields: false
+ additionalPrinterColumns:
+ - name: Version
+ type: string
+ JSONPath: .status.history[?(@.state=="Completed")].version
+ - name: Available
+ type: string
+ JSONPath: .status.conditions[?(@.type=="Available")].status
+ - name: Progressing
+ type: string
+ JSONPath: .status.conditions[?(@.type=="Progressing")].status
+ - name: Since
+ type: date
+ JSONPath: .status.conditions[?(@.type=="Progressing")].lastTransitionTime
+ - name: Status
+ type: string
+ JSONPath: .status.conditions[?(@.type=="Progressing")].message
+ validation:
+ openAPIV3Schema:
+ description: ClusterVersion is the configuration for the ClusterVersionOperator.
+ This is where parameters related to automatic updates can be set.
+ type: object
+ required:
+ - spec
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: spec is the desired state of the cluster version - the operator
+ will work to ensure that the desired version is applied to the cluster.
+ type: object
+ required:
+ - clusterID
+ properties:
+ channel:
+ description: channel is an identifier for explicitly requesting that
+ a non-default set of updates be applied to this cluster. The default
+ channel will be contain stable updates that are appropriate for production
+ clusters.
+ type: string
+ clusterID:
+ description: clusterID uniquely identifies this cluster. This is expected
+ to be an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+ in hexadecimal values). This is a required field.
+ type: string
+ desiredUpdate:
+ description: "desiredUpdate is an optional field that indicates the
+ desired value of the cluster version. Setting this value will trigger
+ an upgrade (if the current version does not match the desired version).
+ The set of recommended update values is listed as part of available
+ updates in status, and setting values outside that range may cause
+ the upgrade to fail. You may specify the version field without setting
+ image if an update exists with that version in the availableUpdates
+ or history. \n If an upgrade fails the operator will halt and report
+ status about the failing component. Setting the desired update value
+ back to the previous version will cause a rollback to be attempted.
+ Not all rollbacks will succeed."
+ type: object
+ properties:
+ force:
+ description: "force allows an administrator to update to an image
+ that has failed verification, does not appear in the availableUpdates
+ list, or otherwise would be blocked by normal protections on update.
+ This option should only be used when the authenticity of the provided
+ image has been verified out of band because the provided image
+ will run with full administrative access to the cluster. Do not
+ use this flag with images that comes from unknown or potentially
+ malicious sources. \n This flag does not override other forms
+ of consistency checking that are required before a new update
+ is deployed."
+ type: boolean
+ image:
+ description: image is a container image location that contains the
+ update. When this field is part of spec, image is optional if
+ version is specified and the availableUpdates field contains a
+ matching version.
+ type: string
+ version:
+ description: version is a semantic versioning identifying the update
+ version. When this field is part of spec, version is optional
+ if image is specified.
+ type: string
+ overrides:
+ description: overrides is list of overides for components that are managed
+ by cluster version operator. Marking a component unmanaged will prevent
+ the operator from creating or updating the object.
+ type: array
+ items:
+ description: ComponentOverride allows overriding cluster version operator's
+ behavior for a component.
+ type: object
+ required:
+ - group
+ - kind
+ - name
+ - namespace
+ - unmanaged
+ properties:
+ group:
+ description: group identifies the API group that the kind is in.
+ type: string
+ kind:
+ description: kind indentifies which object to override.
+ type: string
+ name:
+ description: name is the component's name.
+ type: string
+ namespace:
+ description: namespace is the component's namespace. If the resource
+ is cluster scoped, the namespace should be empty.
+ type: string
+ unmanaged:
+ description: 'unmanaged controls if cluster version operator should
+ stop managing the resources in this cluster. Default: false'
+ type: boolean
+ upstream:
+ description: upstream may be used to specify the preferred update server.
+ By default it will use the appropriate update server for the cluster
+ and region.
+ type: string
+ status:
+ description: status contains information about the available updates and
+ any in-progress updates.
+ type: object
+ required:
+ - availableUpdates
+ - desired
+ - observedGeneration
+ - versionHash
+ properties:
+ availableUpdates:
+ description: availableUpdates contains the list of updates that are
+ appropriate for this cluster. This list may be empty if no updates
+ are recommended, if the update service is unavailable, or if an invalid
+ channel has been specified.
+ type: array
+ items:
+ description: Update represents a release of the ClusterVersionOperator,
+ referenced by the Image member.
+ type: object
+ properties:
+ force:
+ description: "force allows an administrator to update to an image
+ that has failed verification, does not appear in the availableUpdates
+ list, or otherwise would be blocked by normal protections on
+ update. This option should only be used when the authenticity
+ of the provided image has been verified out of band because
+ the provided image will run with full administrative access
+ to the cluster. Do not use this flag with images that comes
+ from unknown or potentially malicious sources. \n This flag
+ does not override other forms of consistency checking that are
+ required before a new update is deployed."
+ type: boolean
+ image:
+ description: image is a container image location that contains
+ the update. When this field is part of spec, image is optional
+ if version is specified and the availableUpdates field contains
+ a matching version.
+ type: string
+ version:
+ description: version is a semantic versioning identifying the
+ update version. When this field is part of spec, version is
+ optional if image is specified.
+ type: string
+ nullable: true
+ conditions:
+ description: conditions provides information about the cluster version.
+ The condition "Available" is set to true if the desiredUpdate has
+ been reached. The condition "Progressing" is set to true if an update
+ is being applied. The condition "Degraded" is set to true if an update
+ is currently blocked by a temporary or permanent error. Conditions
+ are only valid for the current desiredUpdate when metadata.generation
+ is equal to status.generation.
+ type: array
+ items:
+ description: ClusterOperatorStatusCondition represents the state of
+ the operator's managed and monitored components.
+ type: object
+ required:
+ - lastTransitionTime
+ - status
+ - type
+ properties:
+ lastTransitionTime:
+ description: lastTransitionTime is the time of the last update
+ to the current status property.
+ type: string
+ format: date-time
+ message:
+ description: message provides additional information about the
+ current condition. This is only to be consumed by humans.
+ type: string
+ reason:
+ description: reason is the CamelCase reason for the condition's
+ current status.
+ type: string
+ status:
+ description: status of the condition, one of True, False, Unknown.
+ type: string
+ type:
+ description: type specifies the aspect reported by this condition.
+ type: string
+ desired:
+ description: desired is the version that the cluster is reconciling
+ towards. If the cluster is not yet fully initialized desired will
+ be set with the information available, which may be an image or a
+ tag.
+ type: object
+ properties:
+ force:
+ description: "force allows an administrator to update to an image
+ that has failed verification, does not appear in the availableUpdates
+ list, or otherwise would be blocked by normal protections on update.
+ This option should only be used when the authenticity of the provided
+ image has been verified out of band because the provided image
+ will run with full administrative access to the cluster. Do not
+ use this flag with images that comes from unknown or potentially
+ malicious sources. \n This flag does not override other forms
+ of consistency checking that are required before a new update
+ is deployed."
+ type: boolean
+ image:
+ description: image is a container image location that contains the
+ update. When this field is part of spec, image is optional if
+ version is specified and the availableUpdates field contains a
+ matching version.
+ type: string
+ version:
+ description: version is a semantic versioning identifying the update
+ version. When this field is part of spec, version is optional
+ if image is specified.
+ type: string
+ history:
+ description: history contains a list of the most recent versions applied
+ to the cluster. This value may be empty during cluster startup, and
+ then will be updated when a new update is being applied. The newest
+ update is first in the list and it is ordered by recency. Updates
+ in the history have state Completed if the rollout completed - if
+ an update was failing or halfway applied the state will be Partial.
+ Only a limited amount of update history is preserved.
+ type: array
+ items:
+ description: UpdateHistory is a single attempted update to the cluster.
+ type: object
+ required:
+ - completionTime
+ - image
+ - startedTime
+ - state
+ - verified
+ properties:
+ completionTime:
+ description: completionTime, if set, is when the update was fully
+ applied. The update that is currently being applied will have
+ a null completion time. Completion time will always be set for
+ entries that are not the current update (usually to the started
+ time of the next update).
+ type: string
+ format: date-time
+ nullable: true
+ image:
+ description: image is a container image location that contains
+ the update. This value is always populated.
+ type: string
+ startedTime:
+ description: startedTime is the time at which the update was started.
+ type: string
+ format: date-time
+ state:
+ description: state reflects whether the update was fully applied.
+ The Partial state indicates the update is not fully applied,
+ while the Completed state indicates the update was successfully
+ rolled out at least once (all parts of the update successfully
+ applied).
+ type: string
+ verified:
+ description: verified indicates whether the provided update was
+ properly verified before it was installed. If this is false
+ the cluster may not be trusted.
+ type: boolean
+ version:
+ description: version is a semantic versioning identifying the
+ update version. If the requested image does not define a version,
+ or if a failure occurs retrieving the image, this value may
+ be empty.
+ type: string
+ observedGeneration:
+ description: observedGeneration reports which version of the spec is
+ being synced. If this value is not equal to metadata.generation, then
+ the desired and conditions fields may represent a previous version.
+ type: integer
+ format: int64
+ versionHash:
+ description: versionHash is a fingerprint of the content that the cluster
+ will be updated with. It is used by the operator to avoid unnecessary
+ work and is for internal use only.
+ type: string
+ versions:
+ - name: v1
+ served: true
+ storage: true
diff --git a/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_operatorhub.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_operatorhub.crd.yaml
new file mode 100644
index 000000000..8c857d45a
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_operatorhub.crd.yaml
@@ -0,0 +1,101 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: operatorhubs.config.openshift.io
+spec:
+ group: config.openshift.io
+ names:
+ kind: OperatorHub
+ listKind: OperatorHubList
+ plural: operatorhubs
+ singular: operatorhub
+ scope: Cluster
+ preserveUnknownFields: false
+ subresources:
+ status: {}
+ version: v1
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ "validation":
+ "openAPIV3Schema":
+ description: OperatorHub is the Schema for the operatorhubs API. It can be used
+ to change the state of the default hub sources for OperatorHub on the cluster
+ from enabled to disabled and vice versa.
+ type: object
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: OperatorHubSpec defines the desired state of OperatorHub
+ type: object
+ properties:
+ disableAllDefaultSources:
+ description: disableAllDefaultSources allows you to disable all the
+ default hub sources. If this is true, a specific entry in sources
+ can be used to enable a default source. If this is false, a specific
+ entry in sources can be used to disable or enable a default source.
+ type: boolean
+ sources:
+ description: sources is the list of default hub sources and their configuration.
+ If the list is empty, it implies that the default hub sources are
+ enabled on the cluster unless disableAllDefaultSources is true. If
+ disableAllDefaultSources is true and sources is not empty, the configuration
+ present in sources will take precedence. The list of default hub sources
+ and their current state will always be reflected in the status block.
+ type: array
+ items:
+ description: HubSource is used to specify the hub source and its configuration
+ type: object
+ properties:
+ disabled:
+ description: disabled is used to disable a default hub source
+ on cluster
+ type: boolean
+ name:
+ description: name is the name of one of the default hub sources
+ type: string
+ maxLength: 253
+ minLength: 1
+ status:
+ description: OperatorHubStatus defines the observed state of OperatorHub.
+ The current state of the default hub sources will always be reflected
+ here.
+ type: object
+ properties:
+ sources:
+ description: sources encapsulates the result of applying the configuration
+ for each hub source
+ type: array
+ items:
+ description: HubSourceStatus is used to reflect the current state
+ of applying the configuration to a default source
+ type: object
+ properties:
+ disabled:
+ description: disabled is used to disable a default hub source
+ on cluster
+ type: boolean
+ message:
+ description: message provides more information regarding failures
+ type: string
+ name:
+ description: name is the name of one of the default hub sources
+ type: string
+ maxLength: 253
+ minLength: 1
+ status:
+ description: status indicates success or failure in applying the
+ configuration
+ type: string
diff --git a/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_proxy.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_proxy.crd.yaml
new file mode 100644
index 000000000..afd076747
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_proxy.crd.yaml
@@ -0,0 +1,98 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: proxies.config.openshift.io
+spec:
+ group: config.openshift.io
+ scope: Cluster
+ preserveUnknownFields: false
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ names:
+ kind: Proxy
+ listKind: ProxyList
+ plural: proxies
+ singular: proxy
+ subresources:
+ status: {}
+ "validation":
+ "openAPIV3Schema":
+ description: Proxy holds cluster-wide information on how to configure default
+ proxies for the cluster. The canonical name is `cluster`
+ type: object
+ required:
+ - spec
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: Spec holds user-settable values for the proxy configuration
+ type: object
+ properties:
+ httpProxy:
+ description: httpProxy is the URL of the proxy for HTTP requests. Empty
+ means unset and will not result in an env var.
+ type: string
+ httpsProxy:
+ description: httpsProxy is the URL of the proxy for HTTPS requests. Empty
+ means unset and will not result in an env var.
+ type: string
+ noProxy:
+ description: noProxy is a comma-separated list of hostnames and/or CIDRs
+ for which the proxy should not be used. Empty means unset and will
+ not result in an env var.
+ type: string
+ readinessEndpoints:
+ description: readinessEndpoints is a list of endpoints used to verify
+ readiness of the proxy.
+ type: array
+ items:
+ type: string
+ trustedCA:
+ description: "trustedCA is a reference to a ConfigMap containing a CA
+ certificate bundle used for client egress HTTPS connections. The certificate
+ bundle must be from the CA that signed the proxy's certificate and
+ be signed for everything. The trustedCA field should only be consumed
+ by a proxy validator. The validator is responsible for reading the
+ certificate bundle from required key \"ca-bundle.crt\" and copying
+ it to a ConfigMap named \"trusted-ca-bundle\" in the \"openshift-config-managed\"
+ namespace. The namespace for the ConfigMap referenced by trustedCA
+ is \"openshift-config\". Here is an example ConfigMap (in yaml): \n
+ apiVersion: v1 kind: ConfigMap metadata: name: user-ca-bundle namespace:
+ openshift-config data: ca-bundle.crt: | -----BEGIN CERTIFICATE-----
+ \ Custom CA certificate bundle. -----END CERTIFICATE-----"
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced config
+ map
+ type: string
+ status:
+ description: status holds observed values from the cluster. They may not
+ be overridden.
+ type: object
+ properties:
+ httpProxy:
+ description: httpProxy is the URL of the proxy for HTTP requests.
+ type: string
+ httpsProxy:
+ description: httpsProxy is the URL of the proxy for HTTPS requests.
+ type: string
+ noProxy:
+ description: noProxy is a comma-separated list of hostnames and/or CIDRs
+ for which the proxy should not be used.
+ type: string
diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver.crd.yaml
new file mode 100644
index 000000000..4e1fdac37
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver.crd.yaml
@@ -0,0 +1,219 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: apiservers.config.openshift.io
+spec:
+ group: config.openshift.io
+ scope: Cluster
+ preserveUnknownFields: false
+ names:
+ kind: APIServer
+ singular: apiserver
+ plural: apiservers
+ listKind: APIServerList
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ subresources:
+ status: {}
+ "validation":
+ "openAPIV3Schema":
+ description: APIServer holds configuration (like serving certificates, client
+ CA and CORS domains) shared by all API servers in the system, among them especially
+ kube-apiserver and openshift-apiserver. The canonical name of an instance
+ is 'cluster'.
+ type: object
+ required:
+ - spec
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ type: object
+ properties:
+ additionalCORSAllowedOrigins:
+ description: additionalCORSAllowedOrigins lists additional, user-defined
+ regular expressions describing hosts for which the API server allows
+ access using the CORS headers. This may be needed to access the API
+ and the integrated OAuth server from JavaScript applications. The
+ values are regular expressions that correspond to the Golang regular
+ expression language.
+ type: array
+ items:
+ type: string
+ clientCA:
+ description: 'clientCA references a ConfigMap containing a certificate
+ bundle for the signers that will be recognized for incoming client
+ certificates in addition to the operator managed signers. If this
+ is empty, then only operator managed signers are valid. You usually
+ only have to set this if you have your own PKI you wish to honor client
+ certificates from. The ConfigMap must exist in the openshift-config
+ namespace and contain the following required fields: - ConfigMap.Data["ca-bundle.crt"]
+ - CA bundle.'
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced config
+ map
+ type: string
+ encryption:
+ description: encryption allows the configuration of encryption of resources
+ at the datastore layer.
+ type: object
+ properties:
+ type:
+ description: "type defines what encryption type should be used to
+ encrypt resources at the datastore layer. When this field is unset
+ (i.e. when it is set to the empty string), identity is implied.
+ The behavior of unset can and will change over time. Even if
+ encryption is enabled by default, the meaning of unset may change
+ to a different encryption type based on changes in best practices.
+ \n When encryption is enabled, all sensitive resources shipped
+ with the platform are encrypted. This list of sensitive resources
+ can and will change over time. The current authoritative list
+ is: \n 1. secrets 2. configmaps 3. routes.route.openshift.io
+ \ 4. oauthaccesstokens.oauth.openshift.io 5. oauthauthorizetokens.oauth.openshift.io"
+ type: string
+ enum:
+ - ""
+ - identity
+ - aescbc
+ servingCerts:
+ description: servingCert is the TLS cert info for serving secure traffic.
+ If not specified, operator managed certificates will be used for serving
+ secure traffic.
+ type: object
+ properties:
+ namedCertificates:
+ description: namedCertificates references secrets containing the
+ TLS cert info for serving secure traffic to specific hostnames.
+ If no named certificates are provided, or no named certificates
+ match the server name as understood by a client, the defaultServingCertificate
+ will be used.
+ type: array
+ items:
+ description: APIServerNamedServingCert maps a server DNS name,
+ as understood by a client, to a certificate.
+ type: object
+ properties:
+ names:
+ description: names is a optional list of explicit DNS names
+ (leading wildcards allowed) that should use this certificate
+ to serve secure traffic. If no names are provided, the implicit
+ names will be extracted from the certificates. Exact names
+ trump over wildcard names. Explicit names defined here trump
+ over extracted implicit names.
+ type: array
+ items:
+ type: string
+ servingCertificate:
+ description: 'servingCertificate references a kubernetes.io/tls
+ type secret containing the TLS cert info for serving secure
+ traffic. The secret must exist in the openshift-config namespace
+ and contain the following required fields: - Secret.Data["tls.key"]
+ - TLS private key. - Secret.Data["tls.crt"] - TLS certificate.'
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ secret
+ type: string
+ tlsSecurityProfile:
+ description: "tlsSecurityProfile specifies settings for TLS connections
+ for externally exposed servers. \n If unset, a default (which may
+ change between releases) is chosen. Note that only Old and Intermediate
+ profiles are currently supported, and the maximum available MinTLSVersions
+ is VersionTLS12."
+ type: object
+ properties:
+ custom:
+ description: "custom is a user-defined TLS security profile. Be
+ extremely careful using a custom profile as invalid configurations
+ can be catastrophic. An example custom profile looks like this:
+ \n ciphers: - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305
+ \ - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256
+ \ minTLSVersion: TLSv1.1"
+ type: object
+ properties:
+ ciphers:
+ description: "ciphers is used to specify the cipher algorithms
+ that are negotiated during the TLS handshake. Operators may
+ remove entries their operands do not support. For example,
+ to use DES-CBC3-SHA (yaml): \n ciphers: - DES-CBC3-SHA"
+ type: array
+ items:
+ type: string
+ minTLSVersion:
+ description: "minTLSVersion is used to specify the minimal version
+ of the TLS protocol that is negotiated during the TLS handshake.
+ For example, to use TLS versions 1.1, 1.2 and 1.3 (yaml):
+ \n minTLSVersion: TLSv1.1 \n NOTE: currently the highest
+ minTLSVersion allowed is VersionTLS12"
+ type: string
+ nullable: true
+ intermediate:
+ description: "intermediate is a TLS security profile based on: \n
+ https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29
+ \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256
+ \ - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256
+ \ - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256
+ \ - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384
+ \ - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305
+ \ - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384
+ \ minTLSVersion: TLSv1.2"
+ type: object
+ nullable: true
+ modern:
+ description: "modern is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility
+ \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256
+ \ - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256
+ \ minTLSVersion: TLSv1.3 \n NOTE: Currently unsupported."
+ type: object
+ nullable: true
+ old:
+ description: "old is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility
+ \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256
+ \ - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256
+ \ - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256
+ \ - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384
+ \ - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305
+ \ - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384
+ \ - DHE-RSA-CHACHA20-POLY1305 - ECDHE-ECDSA-AES128-SHA256
+ \ - ECDHE-RSA-AES128-SHA256 - ECDHE-ECDSA-AES128-SHA -
+ ECDHE-RSA-AES128-SHA - ECDHE-ECDSA-AES256-SHA384 - ECDHE-RSA-AES256-SHA384
+ \ - ECDHE-ECDSA-AES256-SHA - ECDHE-RSA-AES256-SHA -
+ DHE-RSA-AES128-SHA256 - DHE-RSA-AES256-SHA256 - AES128-GCM-SHA256
+ \ - AES256-GCM-SHA384 - AES128-SHA256 - AES256-SHA256
+ \ - AES128-SHA - AES256-SHA - DES-CBC3-SHA minTLSVersion:
+ TLSv1.0"
+ type: object
+ nullable: true
+ type:
+ description: "type is one of Old, Intermediate, Modern or Custom.
+ Custom provides the ability to specify individual TLS security
+ profile parameters. Old, Intermediate and Modern are TLS security
+ profiles based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations
+ \n The profiles are intent based, so they may change over time
+ as new ciphers are developed and existing ciphers are found to
+ be insecure. Depending on precisely which ciphers are available
+ to a process, the list may be reduced. \n Note that the Modern
+ profile is currently not supported because it is not yet well
+ adopted by common software libraries."
+ type: string
+ status:
+ type: object
diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd.yaml
new file mode 100644
index 000000000..f21ac7ea8
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd.yaml
@@ -0,0 +1,123 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: authentications.config.openshift.io
+spec:
+ group: config.openshift.io
+ names:
+ kind: Authentication
+ listKind: AuthenticationList
+ plural: authentications
+ singular: authentication
+ scope: Cluster
+ preserveUnknownFields: false
+ subresources:
+ status: {}
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ "validation":
+ "openAPIV3Schema":
+ description: Authentication specifies cluster-wide settings for authentication
+ (like OAuth and webhook token authenticators). The canonical name of an instance
+ is `cluster`.
+ type: object
+ required:
+ - spec
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: spec holds user settable values for configuration
+ type: object
+ properties:
+ oauthMetadata:
+ description: 'oauthMetadata contains the discovery endpoint data for
+ OAuth 2.0 Authorization Server Metadata for an external OAuth server.
+ This discovery document can be viewed from its served location: oc
+ get --raw ''/.well-known/oauth-authorization-server'' For further
+ details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2
+ If oauthMetadata.name is non-empty, this value has precedence over
+ any metadata reference stored in status. The key "oauthMetadata" is
+ used to locate the data. If specified and the config map or expected
+ key is not found, no metadata is served. If the specified metadata
+ is not valid, no metadata is served. The namespace for this config
+ map is openshift-config.'
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced config
+ map
+ type: string
+ type:
+ description: type identifies the cluster managed, user facing authentication
+ mode in use. Specifically, it manages the component that responds
+ to login attempts. The default is IntegratedOAuth.
+ type: string
+ webhookTokenAuthenticators:
+ description: webhookTokenAuthenticators configures remote token reviewers.
+ These remote authentication webhooks can be used to verify bearer
+ tokens via the tokenreviews.authentication.k8s.io REST API. This
+ is required to honor bearer tokens that are provisioned by an external
+ authentication service. The namespace for these secrets is openshift-config.
+ type: array
+ items:
+ description: webhookTokenAuthenticator holds the necessary configuration
+ options for a remote token authenticator
+ type: object
+ properties:
+ kubeConfig:
+ description: 'kubeConfig contains kube config file data which
+ describes how to access the remote webhook service. For further
+ details, see: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication
+ The key "kubeConfig" is used to locate the data. If the secret
+ or expected key is not found, the webhook is not honored. If
+ the specified kube config data is not valid, the webhook is
+ not honored. The namespace for this secret is determined by
+ the point of use.'
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced secret
+ type: string
+ status:
+ description: status holds observed values from the cluster. They may not
+ be overridden.
+ type: object
+ properties:
+ integratedOAuthMetadata:
+ description: 'integratedOAuthMetadata contains the discovery endpoint
+ data for OAuth 2.0 Authorization Server Metadata for the in-cluster
+ integrated OAuth server. This discovery document can be viewed from
+ its served location: oc get --raw ''/.well-known/oauth-authorization-server''
+ For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2
+ This contains the observed value based on cluster state. An explicitly
+ set value in spec.oauthMetadata has precedence over this field. This
+ field has no meaning if authentication spec.type is not set to IntegratedOAuth.
+ The key "oauthMetadata" is used to locate the data. If the config
+ map or expected key is not found, no metadata is served. If the specified
+ metadata is not valid, no metadata is served. The namespace for this
+ config map is openshift-config-managed.'
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced config
+ map
+ type: string
diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_build.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_build.crd.yaml
new file mode 100644
index 000000000..8f7583971
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_build.crd.yaml
@@ -0,0 +1,366 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: builds.config.openshift.io
+spec:
+ group: config.openshift.io
+ scope: Cluster
+ preserveUnknownFields: false
+ names:
+ kind: Build
+ singular: build
+ plural: builds
+ listKind: BuildList
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ subresources:
+ status: {}
+ "validation":
+ "openAPIV3Schema":
+ description: "Build configures the behavior of OpenShift builds for the entire
+ cluster. This includes default settings that can be overridden in BuildConfig
+ objects, and overrides which are applied to all builds. \n The canonical name
+ is \"cluster\""
+ type: object
+ required:
+ - spec
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: Spec holds user-settable values for the build controller configuration
+ type: object
+ properties:
+ additionalTrustedCA:
+ description: "AdditionalTrustedCA is a reference to a ConfigMap containing
+ additional CAs that should be trusted for image pushes and pulls during
+ builds. The namespace for this config map is openshift-config. \n
+ DEPRECATED: Additional CAs for image pull and push should be set on
+ image.config.openshift.io/cluster instead."
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced config
+ map
+ type: string
+ buildDefaults:
+ description: BuildDefaults controls the default information for Builds
+ type: object
+ properties:
+ defaultProxy:
+ description: "DefaultProxy contains the default proxy settings for
+ all build operations, including image pull/push and source download.
+ \n Values can be overrode by setting the `HTTP_PROXY`, `HTTPS_PROXY`,
+ and `NO_PROXY` environment variables in the build config's strategy."
+ type: object
+ properties:
+ httpProxy:
+ description: httpProxy is the URL of the proxy for HTTP requests. Empty
+ means unset and will not result in an env var.
+ type: string
+ httpsProxy:
+ description: httpsProxy is the URL of the proxy for HTTPS requests. Empty
+ means unset and will not result in an env var.
+ type: string
+ noProxy:
+ description: noProxy is a comma-separated list of hostnames
+ and/or CIDRs for which the proxy should not be used. Empty
+ means unset and will not result in an env var.
+ type: string
+ readinessEndpoints:
+ description: readinessEndpoints is a list of endpoints used
+ to verify readiness of the proxy.
+ type: array
+ items:
+ type: string
+ trustedCA:
+ description: "trustedCA is a reference to a ConfigMap containing
+ a CA certificate bundle used for client egress HTTPS connections.
+ The certificate bundle must be from the CA that signed the
+ proxy's certificate and be signed for everything. The trustedCA
+ field should only be consumed by a proxy validator. The validator
+ is responsible for reading the certificate bundle from required
+ key \"ca-bundle.crt\" and copying it to a ConfigMap named
+ \"trusted-ca-bundle\" in the \"openshift-config-managed\"
+ namespace. The namespace for the ConfigMap referenced by trustedCA
+ is \"openshift-config\". Here is an example ConfigMap (in
+ yaml): \n apiVersion: v1 kind: ConfigMap metadata: name:
+ user-ca-bundle namespace: openshift-config data: ca-bundle.crt:
+ | -----BEGIN CERTIFICATE----- Custom CA certificate
+ bundle. -----END CERTIFICATE-----"
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ config map
+ type: string
+ env:
+ description: Env is a set of default environment variables that
+ will be applied to the build if the specified variables do not
+ exist on the build
+ type: array
+ items:
+ description: EnvVar represents an environment variable present
+ in a Container.
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: Name of the environment variable. Must be a C_IDENTIFIER.
+ type: string
+ value:
+ description: 'Variable references $(VAR_NAME) are expanded
+ using the previous defined environment variables in the
+ container and any service environment variables. If a variable
+ cannot be resolved, the reference in the input string will
+ be unchanged. The $(VAR_NAME) syntax can be escaped with
+ a double $$, ie: $$(VAR_NAME). Escaped references will never
+ be expanded, regardless of whether the variable exists or
+ not. Defaults to "".'
+ type: string
+ valueFrom:
+ description: Source for the environment variable's value.
+ Cannot be used if value is not empty.
+ type: object
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ type: object
+ required:
+ - key
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind,
+ uid?'
+ type: string
+ optional:
+ description: Specify whether the ConfigMap or its
+ key must be defined
+ type: boolean
+ fieldRef:
+ description: 'Selects a field of the pod: supports metadata.name,
+ metadata.namespace, metadata.labels, metadata.annotations,
+ spec.nodeName, spec.serviceAccountName, status.hostIP,
+ status.podIP, status.podIPs.'
+ type: object
+ required:
+ - fieldPath
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath is
+ written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in the specified
+ API version.
+ type: string
+ resourceFieldRef:
+ description: 'Selects a resource of the container: only
+ resources limits and requests (limits.cpu, limits.memory,
+ limits.ephemeral-storage, requests.cpu, requests.memory
+ and requests.ephemeral-storage) are currently supported.'
+ type: object
+ required:
+ - resource
+ properties:
+ containerName:
+ description: 'Container name: required for volumes,
+ optional for env vars'
+ type: string
+ divisor:
+ description: Specifies the output format of the exposed
+ resources, defaults to "1"
+ type: string
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ secretKeyRef:
+ description: Selects a key of a secret in the pod's namespace
+ type: object
+ required:
+ - key
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind,
+ uid?'
+ type: string
+ optional:
+ description: Specify whether the Secret or its key
+ must be defined
+ type: boolean
+ gitProxy:
+ description: "GitProxy contains the proxy settings for git operations
+ only. If set, this will override any Proxy settings for all git
+ commands, such as git clone. \n Values that are not set here will
+ be inherited from DefaultProxy."
+ type: object
+ properties:
+ httpProxy:
+ description: httpProxy is the URL of the proxy for HTTP requests. Empty
+ means unset and will not result in an env var.
+ type: string
+ httpsProxy:
+ description: httpsProxy is the URL of the proxy for HTTPS requests. Empty
+ means unset and will not result in an env var.
+ type: string
+ noProxy:
+ description: noProxy is a comma-separated list of hostnames
+ and/or CIDRs for which the proxy should not be used. Empty
+ means unset and will not result in an env var.
+ type: string
+ readinessEndpoints:
+ description: readinessEndpoints is a list of endpoints used
+ to verify readiness of the proxy.
+ type: array
+ items:
+ type: string
+ trustedCA:
+ description: "trustedCA is a reference to a ConfigMap containing
+ a CA certificate bundle used for client egress HTTPS connections.
+ The certificate bundle must be from the CA that signed the
+ proxy's certificate and be signed for everything. The trustedCA
+ field should only be consumed by a proxy validator. The validator
+ is responsible for reading the certificate bundle from required
+ key \"ca-bundle.crt\" and copying it to a ConfigMap named
+ \"trusted-ca-bundle\" in the \"openshift-config-managed\"
+ namespace. The namespace for the ConfigMap referenced by trustedCA
+ is \"openshift-config\". Here is an example ConfigMap (in
+ yaml): \n apiVersion: v1 kind: ConfigMap metadata: name:
+ user-ca-bundle namespace: openshift-config data: ca-bundle.crt:
+ | -----BEGIN CERTIFICATE----- Custom CA certificate
+ bundle. -----END CERTIFICATE-----"
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ config map
+ type: string
+ imageLabels:
+ description: ImageLabels is a list of docker labels that are applied
+ to the resulting image. User can override a default label by providing
+ a label with the same name in their Build/BuildConfig.
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ description: Name defines the name of the label. It must have
+ non-zero length.
+ type: string
+ value:
+ description: Value defines the literal value of the label.
+ type: string
+ resources:
+ description: Resources defines resource requirements to execute
+ the build.
+ type: object
+ properties:
+ limits:
+ description: 'Limits describes the maximum amount of compute
+ resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+ type: object
+ additionalProperties:
+ type: string
+ requests:
+ description: 'Requests describes the minimum amount of compute
+ resources required. If Requests is omitted for a container,
+ it defaults to Limits if that is explicitly specified, otherwise
+ to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+ type: object
+ additionalProperties:
+ type: string
+ buildOverrides:
+ description: BuildOverrides controls override settings for builds
+ type: object
+ properties:
+ imageLabels:
+ description: ImageLabels is a list of docker labels that are applied
+ to the resulting image. If user provided a label in their Build/BuildConfig
+ with the same name as one in this list, the user's label will
+ be overwritten.
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ description: Name defines the name of the label. It must have
+ non-zero length.
+ type: string
+ value:
+ description: Value defines the literal value of the label.
+ type: string
+ nodeSelector:
+ description: NodeSelector is a selector which must be true for the
+ build pod to fit on a node
+ type: object
+ additionalProperties:
+ type: string
+ tolerations:
+ description: Tolerations is a list of Tolerations that will override
+ any existing tolerations set on a build pod.
+ type: array
+ items:
+ description: The pod this Toleration is attached to tolerates
+ any taint that matches the triple <key,value,effect> using the
+ matching operator <operator>.
+ type: object
+ properties:
+ effect:
+ description: Effect indicates the taint effect to match. Empty
+ means match all taint effects. When specified, allowed values
+ are NoSchedule, PreferNoSchedule and NoExecute.
+ type: string
+ key:
+ description: Key is the taint key that the toleration applies
+ to. Empty means match all taint keys. If the key is empty,
+ operator must be Exists; this combination means to match
+ all values and all keys.
+ type: string
+ operator:
+ description: Operator represents a key's relationship to the
+ value. Valid operators are Exists and Equal. Defaults to
+ Equal. Exists is equivalent to wildcard for value, so that
+ a pod can tolerate all taints of a particular category.
+ type: string
+ tolerationSeconds:
+ description: TolerationSeconds represents the period of time
+ the toleration (which must be of effect NoExecute, otherwise
+ this field is ignored) tolerates the taint. By default,
+ it is not set, which means tolerate the taint forever (do
+ not evict). Zero and negative values will be treated as
+ 0 (evict immediately) by the system.
+ type: integer
+ format: int64
+ value:
+ description: Value is the taint value the toleration matches
+ to. If the operator is Exists, the value should be empty,
+ otherwise just a regular string.
+ type: string
diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_console.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_console.crd.yaml
new file mode 100644
index 000000000..b527f7aa3
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_console.crd.yaml
@@ -0,0 +1,70 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: consoles.config.openshift.io
+spec:
+ scope: Cluster
+ preserveUnknownFields: false
+ group: config.openshift.io
+ names:
+ kind: Console
+ listKind: ConsoleList
+ plural: consoles
+ singular: console
+ subresources:
+ status: {}
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ "validation":
+ "openAPIV3Schema":
+ description: Console holds cluster-wide configuration for the web console, including
+ the logout URL, and reports the public URL of the console. The canonical name
+ is `cluster`.
+ type: object
+ required:
+ - spec
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: spec holds user settable values for configuration
+ type: object
+ properties:
+ authentication:
+ description: ConsoleAuthentication defines a list of optional configuration
+ for console authentication.
+ type: object
+ properties:
+ logoutRedirect:
+ description: 'An optional, absolute URL to redirect web browsers
+ to after logging out of the console. If not specified, it will
+ redirect to the default login page. This is required when using
+ an identity provider that supports single sign-on (SSO) such as:
+ - OpenID (Keycloak, Azure) - RequestHeader (GSSAPI, SSPI, SAML)
+ - OAuth (GitHub, GitLab, Google) Logging out of the console will
+ destroy the user''s token. The logoutRedirect provides the user
+ the option to perform single logout (SLO) through the identity
+ provider to destroy their single sign-on session.'
+ type: string
+ pattern: ^$|^((https):\/\/?)[^\s()<>]+(?:\([\w\d]+\)|([^[:punct:]\s]|\/?))$
+ status:
+ description: status holds observed values from the cluster. They may not
+ be overridden.
+ type: object
+ properties:
+ consoleURL:
+ description: The URL for the console. This will be derived from the
+ host for the route that is created for the console.
+ type: string
diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns.crd.yaml
new file mode 100644
index 000000000..c883ee0f0
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns.crd.yaml
@@ -0,0 +1,100 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: dnses.config.openshift.io
+spec:
+ group: config.openshift.io
+ names:
+ kind: DNS
+ listKind: DNSList
+ plural: dnses
+ singular: dns
+ scope: Cluster
+ preserveUnknownFields: false
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ subresources:
+ status: {}
+ "validation":
+ "openAPIV3Schema":
+ description: DNS holds cluster-wide information about DNS. The canonical name
+ is `cluster`
+ type: object
+ required:
+ - spec
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: spec holds user settable values for configuration
+ type: object
+ properties:
+ baseDomain:
+ description: "baseDomain is the base domain of the cluster. All managed
+ DNS records will be sub-domains of this base. \n For example, given
+ the base domain `openshift.example.com`, an API server DNS record
+ may be created for `cluster-api.openshift.example.com`. \n Once set,
+ this field cannot be changed."
+ type: string
+ privateZone:
+ description: "privateZone is the location where all the DNS records
+ that are only available internally to the cluster exist. \n If this
+ field is nil, no private records should be created. \n Once set, this
+ field cannot be changed."
+ type: object
+ properties:
+ id:
+ description: "id is the identifier that can be used to find the
+ DNS hosted zone. \n on AWS zone can be fetched using `ID` as id
+ in [1] on Azure zone can be fetched using `ID` as a pre-determined
+ name in [2], on GCP zone can be fetched using `ID` as a pre-determined
+ name in [3]. \n [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options
+ [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show
+ [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get"
+ type: string
+ tags:
+ description: "tags can be used to query the DNS hosted zone. \n
+ on AWS, resourcegroupstaggingapi [1] can be used to fetch a zone
+ using `Tags` as tag-filters, \n [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options"
+ type: object
+ additionalProperties:
+ type: string
+ publicZone:
+ description: "publicZone is the location where all the DNS records that
+ are publicly accessible to the internet exist. \n If this field is
+ nil, no public records should be created. \n Once set, this field
+ cannot be changed."
+ type: object
+ properties:
+ id:
+ description: "id is the identifier that can be used to find the
+ DNS hosted zone. \n on AWS zone can be fetched using `ID` as id
+ in [1] on Azure zone can be fetched using `ID` as a pre-determined
+ name in [2], on GCP zone can be fetched using `ID` as a pre-determined
+ name in [3]. \n [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options
+ [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show
+ [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get"
+ type: string
+ tags:
+ description: "tags can be used to query the DNS hosted zone. \n
+ on AWS, resourcegroupstaggingapi [1] can be used to fetch a zone
+ using `Tags` as tag-filters, \n [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options"
+ type: object
+ additionalProperties:
+ type: string
+ status:
+ description: status holds observed values from the cluster. They may not
+ be overridden.
+ type: object
diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_featuregate.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_featuregate.crd.yaml
new file mode 100644
index 000000000..89084a33f
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_featuregate.crd.yaml
@@ -0,0 +1,76 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: featuregates.config.openshift.io
+spec:
+ group: config.openshift.io
+ version: v1
+ scope: Cluster
+ preserveUnknownFields: false
+ names:
+ kind: FeatureGate
+ singular: featuregate
+ plural: featuregates
+ listKind: FeatureGateList
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ subresources:
+ status: {}
+ "validation":
+ "openAPIV3Schema":
+ description: Feature holds cluster-wide information about feature gates. The
+ canonical name is `cluster`
+ type: object
+ required:
+ - spec
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: spec holds user settable values for configuration
+ type: object
+ properties:
+ customNoUpgrade:
+ description: customNoUpgrade allows the enabling or disabling of any
+ feature. Turning this feature set on IS NOT SUPPORTED, CANNOT BE UNDONE,
+ and PREVENTS UPGRADES. Because of its nature, this setting cannot
+ be validated. If you have any typos or accidentally apply invalid
+ combinations your cluster may fail in an unrecoverable way. featureSet
+ must equal "CustomNoUpgrade" must be set to use this field.
+ type: object
+ properties:
+ disabled:
+ description: disabled is a list of all feature gates that you want
+ to force off
+ type: array
+ items:
+ type: string
+ enabled:
+ description: enabled is a list of all feature gates that you want
+ to force on
+ type: array
+ items:
+ type: string
+ nullable: true
+ featureSet:
+ description: featureSet changes the list of features in the cluster. The
+ default is empty. Be very careful adjusting this setting. Turning
+ on or off features may cause irreversible changes in your cluster
+ which cannot be undone.
+ type: string
+ status:
+ description: status holds observed values from the cluster. They may not
+ be overridden.
+ type: object
diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_image.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_image.crd.yaml
new file mode 100644
index 000000000..a0fd48709
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_image.crd.yaml
@@ -0,0 +1,144 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: images.config.openshift.io
+spec:
+ group: config.openshift.io
+ scope: Cluster
+ preserveUnknownFields: false
+ names:
+ kind: Image
+ singular: image
+ plural: images
+ listKind: ImageList
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ subresources:
+ status: {}
+ "validation":
+ "openAPIV3Schema":
+ description: Image governs policies related to imagestream imports and runtime
+ configuration for external registries. It allows cluster admins to configure
+ which registries OpenShift is allowed to import images from, extra CA trust
+ bundles for external registries, and policies to blacklist/whitelist registry
+ hostnames. When exposing OpenShift's image registry to the public, this also
+ lets cluster admins specify the external hostname.
+ type: object
+ required:
+ - spec
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: spec holds user settable values for configuration
+ type: object
+ properties:
+ additionalTrustedCA:
+ description: additionalTrustedCA is a reference to a ConfigMap containing
+ additional CAs that should be trusted during imagestream import, pod
+ image pull, build image pull, and imageregistry pullthrough. The namespace
+ for this config map is openshift-config.
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced config
+ map
+ type: string
+ allowedRegistriesForImport:
+ description: allowedRegistriesForImport limits the container image registries
+ that normal users may import images from. Set this list to the registries
+ that you trust to contain valid Docker images and that you want applications
+ to be able to import from. Users with permission to create Images
+ or ImageStreamMappings via the API are not affected by this policy
+ - typically only administrators or system integrations will have those
+ permissions.
+ type: array
+ items:
+ description: RegistryLocation contains a location of the registry
+ specified by the registry domain name. The domain name might include
+ wildcards, like '*' or '??'.
+ type: object
+ properties:
+ domainName:
+ description: domainName specifies a domain name for the registry
+ In case the registry use non-standard (80 or 443) port, the
+ port should be included in the domain name as well.
+ type: string
+ insecure:
+ description: insecure indicates whether the registry is secure
+ (https) or insecure (http) By default (if not specified) the
+ registry is assumed as secure.
+ type: boolean
+ externalRegistryHostnames:
+ description: externalRegistryHostnames provides the hostnames for the
+ default external image registry. The external hostname should be set
+ only when the image registry is exposed externally. The first value
+ is used in 'publicDockerImageRepository' field in ImageStreams. The
+ value must be in "hostname[:port]" format.
+ type: array
+ items:
+ type: string
+ registrySources:
+ description: registrySources contains configuration that determines
+ how the container runtime should treat individual registries when
+ accessing images for builds+pods. (e.g. whether or not to allow insecure
+ access). It does not contain configuration for the internal cluster
+ registry.
+ type: object
+ properties:
+ allowedRegistries:
+ description: "allowedRegistries are whitelisted for image pull/push.
+ All other registries are blocked. \n Only one of BlockedRegistries
+ or AllowedRegistries may be set."
+ type: array
+ items:
+ type: string
+ blockedRegistries:
+ description: "blockedRegistries are blacklisted from image pull/push.
+ All other registries are allowed. \n Only one of BlockedRegistries
+ or AllowedRegistries may be set."
+ type: array
+ items:
+ type: string
+ insecureRegistries:
+ description: insecureRegistries are registries which do not have
+ a valid TLS certificates or only support HTTP connections.
+ type: array
+ items:
+ type: string
+ status:
+ description: status holds observed values from the cluster. They may not
+ be overridden.
+ type: object
+ properties:
+ externalRegistryHostnames:
+ description: externalRegistryHostnames provides the hostnames for the
+ default external image registry. The external hostname should be set
+ only when the image registry is exposed externally. The first value
+ is used in 'publicDockerImageRepository' field in ImageStreams. The
+ value must be in "hostname[:port]" format.
+ type: array
+ items:
+ type: string
+ internalRegistryHostname:
+ description: internalRegistryHostname sets the hostname for the default
+ internal image registry. The value must be in "hostname[:port]" format.
+ This value is set by the image registry operator which controls the
+ internal registry hostname. For backward compatibility, users can
+ still use OPENSHIFT_DEFAULT_REGISTRY environment variable but this
+ setting overrides the environment variable.
+ type: string
diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure.crd.yaml
new file mode 100644
index 000000000..2aba542da
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure.crd.yaml
@@ -0,0 +1,221 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: infrastructures.config.openshift.io
+spec:
+ group: config.openshift.io
+ names:
+ kind: Infrastructure
+ listKind: InfrastructureList
+ plural: infrastructures
+ singular: infrastructure
+ scope: Cluster
+ preserveUnknownFields: false
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ "validation":
+ "openAPIV3Schema":
+ description: Infrastructure holds cluster-wide information about Infrastructure. The
+ canonical name is `cluster`
+ type: object
+ required:
+ - spec
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: spec holds user settable values for configuration
+ type: object
+ properties:
+ cloudConfig:
+ description: cloudConfig is a reference to a ConfigMap containing the
+ cloud provider configuration file. This configuration file is used
+ to configure the Kubernetes cloud provider integration when using
+ the built-in cloud provider integration or the external cloud controller
+ manager. The namespace for this config map is openshift-config.
+ type: object
+ properties:
+ key:
+ description: Key allows pointing to a specific key/value inside
+ of the configmap. This is useful for logical file references.
+ type: string
+ name:
+ type: string
+ status:
+ description: status holds observed values from the cluster. They may not
+ be overridden.
+ type: object
+ properties:
+ apiServerInternalURI:
+ description: apiServerInternalURL is a valid URI with scheme(http/https),
+ address and port. apiServerInternalURL can be used by components
+ like kubelets, to contact the Kubernetes API server using the infrastructure
+ provider rather than Kubernetes networking.
+ type: string
+ apiServerURL:
+ description: apiServerURL is a valid URI with scheme(http/https), address
+ and port. apiServerURL can be used by components like the web console
+ to tell users where to find the Kubernetes API.
+ type: string
+ etcdDiscoveryDomain:
+ description: 'etcdDiscoveryDomain is the domain used to fetch the SRV
+ records for discovering etcd servers and clients. For more info: https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery'
+ type: string
+ infrastructureName:
+ description: infrastructureName uniquely identifies a cluster with a
+ human friendly name. Once set it should not be changed. Must be of
+ max length 27 and must have only alphanumeric or hyphen characters.
+ type: string
+ platform:
+ description: "platform is the underlying infrastructure provider for
+ the cluster. \n Deprecated: Use platformStatus.type instead."
+ type: string
+ platformStatus:
+ description: platformStatus holds status information specific to the
+ underlying infrastructure provider.
+ type: object
+ properties:
+ aws:
+ description: AWS contains settings specific to the Amazon Web Services
+ infrastructure provider.
+ type: object
+ properties:
+ region:
+ description: region holds the default AWS region for new AWS
+ resources created by the cluster.
+ type: string
+ azure:
+ description: Azure contains settings specific to the Azure infrastructure
+ provider.
+ type: object
+ properties:
+ networkResourceGroupName:
+ description: networkResourceGroupName is the Resource Group
+ for network resources like the Virtual Network and Subnets
+ used by the cluster. If empty, the value is same as ResourceGroupName.
+ type: string
+ resourceGroupName:
+ description: resourceGroupName is the Resource Group for new
+ Azure resources created for the cluster.
+ type: string
+ baremetal:
+ description: BareMetal contains settings specific to the BareMetal
+ platform.
+ type: object
+ properties:
+ apiServerInternalIP:
+ description: apiServerInternalIP is an IP address to contact
+ the Kubernetes API server that can be used by components inside
+ the cluster, like kubelets using the infrastructure rather
+ than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI
+ points to. It is the IP for a self-hosted load balancer in
+ front of the API servers.
+ type: string
+ ingressIP:
+ description: ingressIP is an external IP which routes to the
+ default ingress controller. The IP is a suitable target of
+ a wildcard DNS record used to resolve default route host names.
+ type: string
+ nodeDNSIP:
+ description: nodeDNSIP is the IP address for the internal DNS
+ used by the nodes. Unlike the one managed by the DNS operator,
+ `NodeDNSIP` provides name resolution for the nodes themselves.
+ There is no DNS-as-a-service for BareMetal deployments. In
+ order to minimize necessary changes to the datacenter DNS,
+ a DNS service is hosted as a static pod to serve those hostnames
+ to the nodes in the cluster.
+ type: string
+ gcp:
+ description: GCP contains settings specific to the Google Cloud
+ Platform infrastructure provider.
+ type: object
+ properties:
+ projectID:
+ description: resourceGroupName is the Project ID for new GCP
+ resources created for the cluster.
+ type: string
+ region:
+ description: region holds the region for new GCP resources created
+ for the cluster.
+ type: string
+ openstack:
+ description: OpenStack contains settings specific to the OpenStack
+ infrastructure provider.
+ type: object
+ properties:
+ apiServerInternalIP:
+ description: apiServerInternalIP is an IP address to contact
+ the Kubernetes API server that can be used by components inside
+ the cluster, like kubelets using the infrastructure rather
+ than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI
+ points to. It is the IP for a self-hosted load balancer in
+ front of the API servers.
+ type: string
+ cloudName:
+ description: cloudName is the name of the desired OpenStack
+ cloud in the client configuration file (`clouds.yaml`).
+ type: string
+ ingressIP:
+ description: ingressIP is an external IP which routes to the
+ default ingress controller. The IP is a suitable target of
+ a wildcard DNS record used to resolve default route host names.
+ type: string
+ nodeDNSIP:
+ description: nodeDNSIP is the IP address for the internal DNS
+ used by the nodes. Unlike the one managed by the DNS operator,
+ `NodeDNSIP` provides name resolution for the nodes themselves.
+ There is no DNS-as-a-service for OpenStack deployments. In
+ order to minimize necessary changes to the datacenter DNS,
+ a DNS service is hosted as a static pod to serve those hostnames
+ to the nodes in the cluster.
+ type: string
+ ovirt:
+ description: Ovirt contains settings specific to the oVirt infrastructure
+ provider.
+ type: object
+ properties:
+ apiServerInternalIP:
+ description: apiServerInternalIP is an IP address to contact
+ the Kubernetes API server that can be used by components inside
+ the cluster, like kubelets using the infrastructure rather
+ than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI
+ points to. It is the IP for a self-hosted load balancer in
+ front of the API servers.
+ type: string
+ ingressIP:
+ description: ingressIP is an external IP which routes to the
+ default ingress controller. The IP is a suitable target of
+ a wildcard DNS record used to resolve default route host names.
+ type: string
+ nodeDNSIP:
+ description: nodeDNSIP is the IP address for the internal DNS
+ used by the nodes. Unlike the one managed by the DNS operator,
+ `NodeDNSIP` provides name resolution for the nodes themselves.
+ There is no DNS-as-a-service for oVirt deployments. In order
+ to minimize necessary changes to the datacenter DNS, a DNS
+ service is hosted as a static pod to serve those hostnames
+ to the nodes in the cluster.
+ type: string
+ type:
+ description: type is the underlying infrastructure provider for
+ the cluster. This value controls whether infrastructure automation
+ such as service load balancers, dynamic volume provisioning, machine
+ creation and deletion, and other integrations are enabled. If
+ None, no infrastructure automation is enabled. Allowed values
+ are "AWS", "Azure", "BareMetal", "GCP", "Libvirt", "OpenStack",
+ "VSphere", "oVirt", and "None". Individual components may not
+ support all platforms, and must handle unrecognized platforms
+ as None if they do not support that platform.
+ type: string
diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_ingress.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_ingress.crd.yaml
new file mode 100644
index 000000000..ada440425
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_ingress.crd.yaml
@@ -0,0 +1,55 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: ingresses.config.openshift.io
+spec:
+ group: config.openshift.io
+ names:
+ kind: Ingress
+ listKind: IngressList
+ plural: ingresses
+ singular: ingress
+ scope: Cluster
+ preserveUnknownFields: false
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ subresources:
+ status: {}
+ "validation":
+ "openAPIV3Schema":
+ description: Ingress holds cluster-wide information about ingress, including
+ the default ingress domain used for routes. The canonical name is `cluster`.
+ type: object
+ required:
+ - spec
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: spec holds user settable values for configuration
+ type: object
+ properties:
+ domain:
+ description: "domain is used to generate a default host name for a route
+ when the route's host name is empty. The generated host name will
+ follow this pattern: \"<route-name>.<route-namespace>.<domain>\".
+ \n It is also used as the default wildcard domain suffix for ingress.
+ The default ingresscontroller domain will follow this pattern: \"*.<domain>\".
+ \n Once set, changing domain is not currently supported."
+ type: string
+ status:
+ description: status holds observed values from the cluster. They may not
+ be overridden.
+ type: object
diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network.crd.yaml
new file mode 100644
index 000000000..bc3b62a87
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network.crd.yaml
@@ -0,0 +1,141 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: networks.config.openshift.io
+spec:
+ group: config.openshift.io
+ names:
+ kind: Network
+ listKind: NetworkList
+ plural: networks
+ singular: network
+ scope: Cluster
+ preserveUnknownFields: false
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ "validation":
+ "openAPIV3Schema":
+ description: 'Network holds cluster-wide information about Network. The canonical
+ name is `cluster`. It is used to configure the desired network configuration,
+ such as: IP address pools for services/pod IPs, network plugin, etc. Please
+ view network.spec for an explanation on what applies when configuring this
+ resource.'
+ type: object
+ required:
+ - spec
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: spec holds user settable values for configuration. As a general
+ rule, this SHOULD NOT be read directly. Instead, you should consume the
+ NetworkStatus, as it indicates the currently deployed configuration. Currently,
+ most spec fields are immutable after installation. Please view the individual
+ ones for further details on each.
+ type: object
+ properties:
+ clusterNetwork:
+ description: IP address pool to use for pod IPs. This field is immutable
+ after installation.
+ type: array
+ items:
+ description: ClusterNetworkEntry is a contiguous block of IP addresses
+ from which pod IPs are allocated.
+ type: object
+ properties:
+ cidr:
+ description: The complete block for pod IPs.
+ type: string
+ hostPrefix:
+ description: The size (prefix) of block to allocate to each node.
+ type: integer
+ format: int32
+ minimum: 0
+ externalIP:
+ description: externalIP defines configuration for controllers that affect
+ Service.ExternalIP. If nil, then ExternalIP is not allowed to be set.
+ type: object
+ properties:
+ autoAssignCIDRs:
+ description: autoAssignCIDRs is a list of CIDRs from which to automatically
+ assign Service.ExternalIP. These are assigned when the service
+ is of type LoadBalancer. In general, this is only useful for bare-metal
+ clusters. In Openshift 3.x, this was misleadingly called "IngressIPs".
+ Automatically assigned External IPs are not affected by any ExternalIPPolicy
+ rules. Currently, only one entry may be provided.
+ type: array
+ items:
+ type: string
+ policy:
+ description: policy is a set of restrictions applied to the ExternalIP
+ field. If nil or empty, then ExternalIP is not allowed to be set.
+ type: object
+ properties:
+ allowedCIDRs:
+ description: allowedCIDRs is the list of allowed CIDRs.
+ type: array
+ items:
+ type: string
+ rejectedCIDRs:
+ description: rejectedCIDRs is the list of disallowed CIDRs.
+ These take precedence over allowedCIDRs.
+ type: array
+ items:
+ type: string
+ networkType:
+ description: 'NetworkType is the plugin that is to be deployed (e.g.
+ OpenShiftSDN). This should match a value that the cluster-network-operator
+ understands, or else no networking will be installed. Currently supported
+ values are: - OpenShiftSDN This field is immutable after installation.'
+ type: string
+ serviceNetwork:
+ description: IP address pool for services. Currently, we only support
+ a single entry here. This field is immutable after installation.
+ type: array
+ items:
+ type: string
+ status:
+ description: status holds observed values from the cluster. They may not
+ be overridden.
+ type: object
+ properties:
+ clusterNetwork:
+ description: IP address pool to use for pod IPs.
+ type: array
+ items:
+ description: ClusterNetworkEntry is a contiguous block of IP addresses
+ from which pod IPs are allocated.
+ type: object
+ properties:
+ cidr:
+ description: The complete block for pod IPs.
+ type: string
+ hostPrefix:
+ description: The size (prefix) of block to allocate to each node.
+ type: integer
+ format: int32
+ minimum: 0
+ clusterNetworkMTU:
+ description: ClusterNetworkMTU is the MTU for inter-pod networking.
+ type: integer
+ networkType:
+ description: NetworkType is the plugin that is deployed (e.g. OpenShiftSDN).
+ type: string
+ serviceNetwork:
+ description: IP address pool for services. Currently, we only support
+ a single entry here.
+ type: array
+ items:
+ type: string
diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_oauth.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_oauth.crd.yaml
new file mode 100644
index 000000000..fd763d047
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_oauth.crd.yaml
@@ -0,0 +1,661 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: oauths.config.openshift.io
+spec:
+ group: config.openshift.io
+ names:
+ kind: OAuth
+ listKind: OAuthList
+ plural: oauths
+ singular: oauth
+ scope: Cluster
+ preserveUnknownFields: false
+ subresources:
+ status: {}
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ "validation":
+ "openAPIV3Schema":
+ description: OAuth holds cluster-wide information about OAuth. The canonical
+ name is `cluster`. It is used to configure the integrated OAuth server. This
+ configuration is only honored when the top level Authentication config has
+ type set to IntegratedOAuth.
+ type: object
+ required:
+ - spec
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: OAuthSpec contains desired cluster auth configuration
+ type: object
+ properties:
+ identityProviders:
+ description: identityProviders is an ordered list of ways for a user
+ to identify themselves. When this list is empty, no identities are
+ provisioned for users.
+ type: array
+ items:
+ description: IdentityProvider provides identities for users authenticating
+ using credentials
+ type: object
+ properties:
+ basicAuth:
+ description: basicAuth contains configuration options for the
+ BasicAuth IdP
+ type: object
+ properties:
+ ca:
+ description: ca is an optional reference to a config map by
+ name containing the PEM-encoded CA bundle. It is used as
+ a trust anchor to validate the TLS certificate presented
+ by the remote server. The key "ca.crt" is used to locate
+ the data. If specified and the config map or expected key
+ is not found, the identity provider is not honored. If the
+ specified ca data is not valid, the identity provider is
+ not honored. If empty, the default system roots are used.
+ The namespace for this config map is openshift-config.
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ config map
+ type: string
+ tlsClientCert:
+ description: tlsClientCert is an optional reference to a secret
+ by name that contains the PEM-encoded TLS client certificate
+ to present when connecting to the server. The key "tls.crt"
+ is used to locate the data. If specified and the secret
+ or expected key is not found, the identity provider is not
+ honored. If the specified certificate data is not valid,
+ the identity provider is not honored. The namespace for
+ this secret is openshift-config.
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ secret
+ type: string
+ tlsClientKey:
+ description: tlsClientKey is an optional reference to a secret
+ by name that contains the PEM-encoded TLS private key for
+ the client certificate referenced in tlsClientCert. The
+ key "tls.key" is used to locate the data. If specified and
+ the secret or expected key is not found, the identity provider
+ is not honored. If the specified certificate data is not
+ valid, the identity provider is not honored. The namespace
+ for this secret is openshift-config.
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ secret
+ type: string
+ url:
+ description: url is the remote URL to connect to
+ type: string
+ github:
+ description: github enables user authentication using GitHub credentials
+ type: object
+ properties:
+ ca:
+ description: ca is an optional reference to a config map by
+ name containing the PEM-encoded CA bundle. It is used as
+ a trust anchor to validate the TLS certificate presented
+ by the remote server. The key "ca.crt" is used to locate
+ the data. If specified and the config map or expected key
+ is not found, the identity provider is not honored. If the
+ specified ca data is not valid, the identity provider is
+ not honored. If empty, the default system roots are used.
+ This can only be configured when hostname is set to a non-empty
+ value. The namespace for this config map is openshift-config.
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ config map
+ type: string
+ clientID:
+ description: clientID is the oauth client ID
+ type: string
+ clientSecret:
+ description: clientSecret is a required reference to the secret
+ by name containing the oauth client secret. The key "clientSecret"
+ is used to locate the data. If the secret or expected key
+ is not found, the identity provider is not honored. The
+ namespace for this secret is openshift-config.
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ secret
+ type: string
+ hostname:
+ description: hostname is the optional domain (e.g. "mycompany.com")
+ for use with a hosted instance of GitHub Enterprise. It
+ must match the GitHub Enterprise settings value configured
+ at /setup/settings#hostname.
+ type: string
+ organizations:
+ description: organizations optionally restricts which organizations
+ are allowed to log in
+ type: array
+ items:
+ type: string
+ teams:
+ description: teams optionally restricts which teams are allowed
+ to log in. Format is <org>/<team>.
+ type: array
+ items:
+ type: string
+ gitlab:
+ description: gitlab enables user authentication using GitLab credentials
+ type: object
+ properties:
+ ca:
+ description: ca is an optional reference to a config map by
+ name containing the PEM-encoded CA bundle. It is used as
+ a trust anchor to validate the TLS certificate presented
+ by the remote server. The key "ca.crt" is used to locate
+ the data. If specified and the config map or expected key
+ is not found, the identity provider is not honored. If the
+ specified ca data is not valid, the identity provider is
+ not honored. If empty, the default system roots are used.
+ The namespace for this config map is openshift-config.
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ config map
+ type: string
+ clientID:
+ description: clientID is the oauth client ID
+ type: string
+ clientSecret:
+ description: clientSecret is a required reference to the secret
+ by name containing the oauth client secret. The key "clientSecret"
+ is used to locate the data. If the secret or expected key
+ is not found, the identity provider is not honored. The
+ namespace for this secret is openshift-config.
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ secret
+ type: string
+ url:
+ description: url is the oauth server base URL
+ type: string
+ google:
+ description: google enables user authentication using Google credentials
+ type: object
+ properties:
+ clientID:
+ description: clientID is the oauth client ID
+ type: string
+ clientSecret:
+ description: clientSecret is a required reference to the secret
+ by name containing the oauth client secret. The key "clientSecret"
+ is used to locate the data. If the secret or expected key
+ is not found, the identity provider is not honored. The
+ namespace for this secret is openshift-config.
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ secret
+ type: string
+ hostedDomain:
+ description: hostedDomain is the optional Google App domain
+ (e.g. "mycompany.com") to restrict logins to
+ type: string
+ htpasswd:
+ description: htpasswd enables user authentication using an HTPasswd
+ file to validate credentials
+ type: object
+ properties:
+ fileData:
+ description: fileData is a required reference to a secret
+ by name containing the data to use as the htpasswd file.
+ The key "htpasswd" is used to locate the data. If the secret
+ or expected key is not found, the identity provider is not
+ honored. If the specified htpasswd data is not valid, the
+ identity provider is not honored. The namespace for this
+ secret is openshift-config.
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ secret
+ type: string
+ keystone:
+ description: keystone enables user authentication using keystone
+ password credentials
+ type: object
+ properties:
+ ca:
+ description: ca is an optional reference to a config map by
+ name containing the PEM-encoded CA bundle. It is used as
+ a trust anchor to validate the TLS certificate presented
+ by the remote server. The key "ca.crt" is used to locate
+ the data. If specified and the config map or expected key
+ is not found, the identity provider is not honored. If the
+ specified ca data is not valid, the identity provider is
+ not honored. If empty, the default system roots are used.
+ The namespace for this config map is openshift-config.
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ config map
+ type: string
+ domainName:
+ description: domainName is required for keystone v3
+ type: string
+ tlsClientCert:
+ description: tlsClientCert is an optional reference to a secret
+ by name that contains the PEM-encoded TLS client certificate
+ to present when connecting to the server. The key "tls.crt"
+ is used to locate the data. If specified and the secret
+ or expected key is not found, the identity provider is not
+ honored. If the specified certificate data is not valid,
+ the identity provider is not honored. The namespace for
+ this secret is openshift-config.
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ secret
+ type: string
+ tlsClientKey:
+ description: tlsClientKey is an optional reference to a secret
+ by name that contains the PEM-encoded TLS private key for
+ the client certificate referenced in tlsClientCert. The
+ key "tls.key" is used to locate the data. If specified and
+ the secret or expected key is not found, the identity provider
+ is not honored. If the specified certificate data is not
+ valid, the identity provider is not honored. The namespace
+ for this secret is openshift-config.
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ secret
+ type: string
+ url:
+ description: url is the remote URL to connect to
+ type: string
+ ldap:
+ description: ldap enables user authentication using LDAP credentials
+ type: object
+ properties:
+ attributes:
+ description: attributes maps LDAP attributes to identities
+ type: object
+ properties:
+ email:
+ description: email is the list of attributes whose values
+ should be used as the email address. Optional. If unspecified,
+ no email is set for the identity
+ type: array
+ items:
+ type: string
+ id:
+ description: id is the list of attributes whose values
+ should be used as the user ID. Required. First non-empty
+ attribute is used. At least one attribute is required.
+ If none of the listed attribute have a value, authentication
+ fails. LDAP standard identity attribute is "dn"
+ type: array
+ items:
+ type: string
+ name:
+ description: name is the list of attributes whose values
+ should be used as the display name. Optional. If unspecified,
+ no display name is set for the identity LDAP standard
+ display name attribute is "cn"
+ type: array
+ items:
+ type: string
+ preferredUsername:
+ description: preferredUsername is the list of attributes
+ whose values should be used as the preferred username.
+ LDAP standard login attribute is "uid"
+ type: array
+ items:
+ type: string
+ bindDN:
+ description: bindDN is an optional DN to bind with during
+ the search phase.
+ type: string
+ bindPassword:
+ description: bindPassword is an optional reference to a secret
+ by name containing a password to bind with during the search
+ phase. The key "bindPassword" is used to locate the data.
+ If specified and the secret or expected key is not found,
+ the identity provider is not honored. The namespace for
+ this secret is openshift-config.
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ secret
+ type: string
+ ca:
+ description: ca is an optional reference to a config map by
+ name containing the PEM-encoded CA bundle. It is used as
+ a trust anchor to validate the TLS certificate presented
+ by the remote server. The key "ca.crt" is used to locate
+ the data. If specified and the config map or expected key
+ is not found, the identity provider is not honored. If the
+ specified ca data is not valid, the identity provider is
+ not honored. If empty, the default system roots are used.
+ The namespace for this config map is openshift-config.
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ config map
+ type: string
+ insecure:
+ description: 'insecure, if true, indicates the connection
+ should not use TLS WARNING: Should not be set to `true`
+ with the URL scheme "ldaps://" as "ldaps://" URLs always attempt
+ to connect using TLS, even when `insecure` is set to `true`
+ When `true`, "ldap://" URLS connect insecurely. When `false`,
+ "ldap://" URLs are upgraded to a TLS connection using StartTLS
+ as specified in https://tools.ietf.org/html/rfc2830.'
+ type: boolean
+ url:
+ description: 'url is an RFC 2255 URL which specifies the LDAP
+ search parameters to use. The syntax of the URL is: ldap://host:port/basedn?attribute?scope?filter'
+ type: string
+ mappingMethod:
+ description: mappingMethod determines how identities from this
+ provider are mapped to users Defaults to "claim"
+ type: string
+ name:
+ description: 'name is used to qualify the identities returned
+ by this provider. - It MUST be unique and not shared by any
+ other identity provider used - It MUST be a valid path segment:
+ name cannot equal "." or ".." or contain "/" or "%" or ":" Ref:
+ https://godoc.org/github.com/openshift/origin/pkg/user/apis/user/validation#ValidateIdentityProviderName'
+ type: string
+ openID:
+ description: openID enables user authentication using OpenID credentials
+ type: object
+ properties:
+ ca:
+ description: ca is an optional reference to a config map by
+ name containing the PEM-encoded CA bundle. It is used as
+ a trust anchor to validate the TLS certificate presented
+ by the remote server. The key "ca.crt" is used to locate
+ the data. If specified and the config map or expected key
+ is not found, the identity provider is not honored. If the
+ specified ca data is not valid, the identity provider is
+ not honored. If empty, the default system roots are used.
+ The namespace for this config map is openshift-config.
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ config map
+ type: string
+ claims:
+ description: claims mappings
+ type: object
+ properties:
+ email:
+ description: email is the list of claims whose values
+ should be used as the email address. Optional. If unspecified,
+ no email is set for the identity
+ type: array
+ items:
+ type: string
+ name:
+ description: name is the list of claims whose values should
+ be used as the display name. Optional. If unspecified,
+ no display name is set for the identity
+ type: array
+ items:
+ type: string
+ preferredUsername:
+ description: preferredUsername is the list of claims whose
+ values should be used as the preferred username. If
+ unspecified, the preferred username is determined from
+ the value of the sub claim
+ type: array
+ items:
+ type: string
+ clientID:
+ description: clientID is the oauth client ID
+ type: string
+ clientSecret:
+ description: clientSecret is a required reference to the secret
+ by name containing the oauth client secret. The key "clientSecret"
+ is used to locate the data. If the secret or expected key
+ is not found, the identity provider is not honored. The
+ namespace for this secret is openshift-config.
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ secret
+ type: string
+ extraAuthorizeParameters:
+ description: extraAuthorizeParameters are any custom parameters
+ to add to the authorize request.
+ type: object
+ additionalProperties:
+ type: string
+ extraScopes:
+ description: extraScopes are any scopes to request in addition
+ to the standard "openid" scope.
+ type: array
+ items:
+ type: string
+ issuer:
+ description: issuer is the URL that the OpenID Provider asserts
+ as its Issuer Identifier. It must use the https scheme with
+ no query or fragment component.
+ type: string
+ requestHeader:
+ description: requestHeader enables user authentication using request
+ header credentials
+ type: object
+ properties:
+ ca:
+ description: ca is a required reference to a config map by
+ name containing the PEM-encoded CA bundle. It is used as
+ a trust anchor to validate the TLS certificate presented
+ by the remote server. Specifically, it allows verification
+ of incoming requests to prevent header spoofing. The key
+ "ca.crt" is used to locate the data. If the config map or
+ expected key is not found, the identity provider is not
+ honored. If the specified ca data is not valid, the identity
+ provider is not honored. The namespace for this config map
+ is openshift-config.
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ config map
+ type: string
+ challengeURL:
+ description: challengeURL is a URL to redirect unauthenticated
+ /authorize requests to Unauthenticated requests from OAuth
+ clients which expect WWW-Authenticate challenges will be
+ redirected here. ${url} is replaced with the current URL,
+ escaped to be safe in a query parameter https://www.example.com/sso-login?then=${url}
+ ${query} is replaced with the current query string https://www.example.com/auth-proxy/oauth/authorize?${query}
+ Required when challenge is set to true.
+ type: string
+ clientCommonNames:
+ description: clientCommonNames is an optional list of common
+ names to require a match from. If empty, any client certificate
+ validated against the clientCA bundle is considered authoritative.
+ type: array
+ items:
+ type: string
+ emailHeaders:
+ description: emailHeaders is the set of headers to check for
+ the email address
+ type: array
+ items:
+ type: string
+ headers:
+ description: headers is the set of headers to check for identity
+ information
+ type: array
+ items:
+ type: string
+ loginURL:
+ description: loginURL is a URL to redirect unauthenticated
+ /authorize requests to Unauthenticated requests from OAuth
+ clients which expect interactive logins will be redirected
+ here ${url} is replaced with the current URL, escaped to
+ be safe in a query parameter https://www.example.com/sso-login?then=${url}
+ ${query} is replaced with the current query string https://www.example.com/auth-proxy/oauth/authorize?${query}
+ Required when login is set to true.
+ type: string
+ nameHeaders:
+ description: nameHeaders is the set of headers to check for
+ the display name
+ type: array
+ items:
+ type: string
+ preferredUsernameHeaders:
+ description: preferredUsernameHeaders is the set of headers
+ to check for the preferred username
+ type: array
+ items:
+ type: string
+ type:
+ description: type identifies the identity provider type for this
+ entry.
+ type: string
+ templates:
+ description: templates allow you to customize pages like the login page.
+ type: object
+ properties:
+ error:
+ description: error is the name of a secret that specifies a go template
+ to use to render error pages during the authentication or grant
+ flow. The key "errors.html" is used to locate the template data.
+ If specified and the secret or expected key is not found, the
+ default error page is used. If the specified template is not valid,
+ the default error page is used. If unspecified, the default error
+ page is used. The namespace for this secret is openshift-config.
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced secret
+ type: string
+ login:
+ description: login is the name of a secret that specifies a go template
+ to use to render the login page. The key "login.html" is used
+ to locate the template data. If specified and the secret or expected
+ key is not found, the default login page is used. If the specified
+ template is not valid, the default login page is used. If unspecified,
+ the default login page is used. The namespace for this secret
+ is openshift-config.
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced secret
+ type: string
+ providerSelection:
+ description: providerSelection is the name of a secret that specifies
+ a go template to use to render the provider selection page. The
+ key "providers.html" is used to locate the template data. If specified
+ and the secret or expected key is not found, the default provider
+ selection page is used. If the specified template is not valid,
+ the default provider selection page is used. If unspecified, the
+ default provider selection page is used. The namespace for this
+ secret is openshift-config.
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced secret
+ type: string
+ tokenConfig:
+ description: tokenConfig contains options for authorization and access
+ tokens
+ type: object
+ properties:
+ accessTokenInactivityTimeoutSeconds:
+ description: 'accessTokenInactivityTimeoutSeconds defines the default
+ token inactivity timeout for tokens granted by any client. The
+ value represents the maximum amount of time that can occur between
+ consecutive uses of the token. Tokens become invalid if they are
+ not used within this temporal window. The user will need to acquire
+ a new token to regain access once a token times out. Valid values
+ are integer values: x < 0 Tokens time out is enabled but tokens
+ never timeout unless configured per client (e.g. `-1`) x = 0 Tokens
+ time out is disabled (default) x > 0 Tokens time out if there
+ is no activity for x seconds The current minimum allowed value
+ for X is 300 (5 minutes)'
+ type: integer
+ format: int32
+ accessTokenMaxAgeSeconds:
+ description: accessTokenMaxAgeSeconds defines the maximum age of
+ access tokens
+ type: integer
+ format: int32
+ status:
+ description: OAuthStatus shows current known state of OAuth server in the
+ cluster
+ type: object
diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_project.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_project.crd.yaml
new file mode 100644
index 000000000..a625aa617
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_project.crd.yaml
@@ -0,0 +1,63 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: projects.config.openshift.io
+spec:
+ group: config.openshift.io
+ scope: Cluster
+ preserveUnknownFields: false
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ names:
+ kind: Project
+ listKind: ProjectList
+ plural: projects
+ singular: project
+ subresources:
+ status: {}
+ "validation":
+ "openAPIV3Schema":
+ description: Project holds cluster-wide information about Project. The canonical
+ name is `cluster`
+ type: object
+ required:
+ - spec
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: spec holds user settable values for configuration
+ type: object
+ properties:
+ projectRequestMessage:
+ description: projectRequestMessage is the string presented to a user
+ if they are unable to request a project via the projectrequest api
+ endpoint
+ type: string
+ projectRequestTemplate:
+ description: projectRequestTemplate is the template to use for creating
+ projects in response to projectrequest. This must point to a template
+ in 'openshift-config' namespace. It is optional. If it is not specified,
+ a default template is used.
+ type: object
+ properties:
+ name:
+ description: name is the metadata.name of the referenced project
+ request template
+ type: string
+ status:
+ description: status holds observed values from the cluster. They may not
+ be overridden.
+ type: object
diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler.crd.yaml
new file mode 100644
index 000000000..6f5336c8f
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler.crd.yaml
@@ -0,0 +1,88 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: schedulers.config.openshift.io
+spec:
+ group: config.openshift.io
+ scope: Cluster
+ preserveUnknownFields: false
+ names:
+ kind: Scheduler
+ singular: scheduler
+ plural: schedulers
+ listKind: SchedulerList
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ subresources:
+ status: {}
+ "validation":
+ "openAPIV3Schema":
+ description: Scheduler holds cluster-wide config information to run the Kubernetes
+ Scheduler and influence its placement decisions. The canonical name for this
+ config is `cluster`.
+ type: object
+ required:
+ - spec
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: spec holds user settable values for configuration
+ type: object
+ properties:
+ defaultNodeSelector:
+ description: 'defaultNodeSelector helps set the cluster-wide default
+ node selector to restrict pod placement to specific nodes. This is
+ applied to the pods created in all namespaces without a specified
+ nodeSelector value. For example, defaultNodeSelector: "type=user-node,region=east"
+ would set nodeSelector field in pod spec to "type=user-node,region=east"
+ to all pods created in all namespaces. Namespaces having project-wide
+ node selectors won''t be impacted even if this field is set. This
+ adds an annotation section to the namespace. For example, if a new
+ namespace is created with node-selector=''type=user-node,region=east'',
+ the annotation openshift.io/node-selector: type=user-node,region=east
+ gets added to the project. When the openshift.io/node-selector annotation
+ is set on the project the value is used in preference to the value
+ we are setting for defaultNodeSelector field. For instance, openshift.io/node-selector:
+ "type=user-node,region=west" means that the default of "type=user-node,region=east"
+ set in defaultNodeSelector would not be applied.'
+ type: string
+ mastersSchedulable:
+ description: 'MastersSchedulable allows masters nodes to be schedulable.
+ When this flag is turned on, all the master nodes in the cluster will
+ be made schedulable, so that workload pods can run on them. The default
+ value for this field is false, meaning none of the master nodes are
+ schedulable. Important Note: Once the workload pods start running
+ on the master nodes, extreme care must be taken to ensure that cluster-critical
+ control plane components are not impacted. Please turn on this field
+ after doing due diligence.'
+ type: boolean
+ policy:
+ description: policy is a reference to a ConfigMap containing scheduler
+ policy which has user specified predicates and priorities. If this
+ ConfigMap is not available scheduler will default to use DefaultAlgorithmProvider.
+ The namespace for this configmap is openshift-config.
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced config
+ map
+ type: string
+ status:
+ description: status holds observed values from the cluster. They may not
+ be overridden.
+ type: object
diff --git a/vendor/github.com/openshift/api/config/v1/types.go b/vendor/github.com/openshift/api/config/v1/types.go
index ca36f6777..142748423 100644
--- a/vendor/github.com/openshift/api/config/v1/types.go
+++ b/vendor/github.com/openshift/api/config/v1/types.go
@@ -167,6 +167,7 @@ type AdmissionPluginConfig struct {
// Configuration is an embedded configuration object to be used as the plugin's
// configuration. If present, it will be used instead of the path to the configuration file.
// +nullable
+ // +kubebuilder:pruning:PreserveUnknownFields
Configuration runtime.RawExtension `json:"configuration"`
}
@@ -210,6 +211,7 @@ type AuditConfig struct {
// as the audit policy configuration. If present, it will be used instead of
// the path to the policy file.
// +nullable
+ // +kubebuilder:pruning:PreserveUnknownFields
PolicyConfiguration runtime.RawExtension `json:"policyConfiguration"`
// Format of saved audits (legacy or json).
diff --git a/vendor/github.com/openshift/api/config/v1/types_apiserver.go b/vendor/github.com/openshift/api/config/v1/types_apiserver.go
index ea76aec02..b347bd80e 100644
--- a/vendor/github.com/openshift/api/config/v1/types_apiserver.go
+++ b/vendor/github.com/openshift/api/config/v1/types_apiserver.go
@@ -39,6 +39,16 @@ type APIServerSpec struct {
// The values are regular expressions that correspond to the Golang regular expression language.
// +optional
AdditionalCORSAllowedOrigins []string `json:"additionalCORSAllowedOrigins,omitempty"`
+ // encryption allows the configuration of encryption of resources at the datastore layer.
+ // +optional
+ Encryption APIServerEncryption `json:"encryption"`
+ // tlsSecurityProfile specifies settings for TLS connections for externally exposed servers.
+ //
+ // If unset, a default (which may change between releases) is chosen. Note that only Old and
+ // Intermediate profiles are currently supported, and the maximum available MinTLSVersions
+ // is VersionTLS12.
+ // +optional
+ TLSSecurityProfile *TLSSecurityProfile `json:"tlsSecurityProfile,omitempty"`
}
type APIServerServingCerts struct {
@@ -63,6 +73,39 @@ type APIServerNamedServingCert struct {
ServingCertificate SecretNameReference `json:"servingCertificate"`
}
+type APIServerEncryption struct {
+ // type defines what encryption type should be used to encrypt resources at the datastore layer.
+ // When this field is unset (i.e. when it is set to the empty string), identity is implied.
+ // The behavior of unset can and will change over time. Even if encryption is enabled by default,
+ // the meaning of unset may change to a different encryption type based on changes in best practices.
+ //
+ // When encryption is enabled, all sensitive resources shipped with the platform are encrypted.
+ // This list of sensitive resources can and will change over time. The current authoritative list is:
+ //
+ // 1. secrets
+ // 2. configmaps
+ // 3. routes.route.openshift.io
+ // 4. oauthaccesstokens.oauth.openshift.io
+ // 5. oauthauthorizetokens.oauth.openshift.io
+ //
+ // +unionDiscriminator
+ // +optional
+ Type EncryptionType `json:"type,omitempty"`
+}
+
+// +kubebuilder:validation:Enum="";identity;aescbc
+type EncryptionType string
+
+const (
+ // identity refers to a type where no encryption is performed at the datastore layer.
+ // Resources are written as-is without encryption.
+ EncryptionTypeIdentity EncryptionType = "identity"
+
+ // aescbc refers to a type where AES-CBC with PKCS#7 padding and a 32-byte key
+ // is used to perform encryption at the datastore layer.
+ EncryptionTypeAESCBC EncryptionType = "aescbc"
+)
+
type APIServerStatus struct {
}
diff --git a/vendor/github.com/openshift/api/config/v1/types_authentication.go b/vendor/github.com/openshift/api/config/v1/types_authentication.go
index 0722ddbfc..eecfe75e7 100644
--- a/vendor/github.com/openshift/api/config/v1/types_authentication.go
+++ b/vendor/github.com/openshift/api/config/v1/types_authentication.go
@@ -9,8 +9,7 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
// Authentication specifies cluster-wide settings for authentication (like OAuth and
// webhook token authenticators). The canonical name of an instance is `cluster`.
type Authentication struct {
- metav1.TypeMeta `json:",inline"`
- // Standard object's metadata.
+ metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
// spec holds user settable values for configuration
@@ -77,7 +76,6 @@ type AuthenticationStatus struct {
type AuthenticationList struct {
metav1.TypeMeta `json:",inline"`
- // Standard object's metadata.
metav1.ListMeta `json:"metadata"`
Items []Authentication `json:"items"`
diff --git a/vendor/github.com/openshift/api/config/v1/types_build.go b/vendor/github.com/openshift/api/config/v1/types_build.go
index c7ed7e958..ef4512aa1 100644
--- a/vendor/github.com/openshift/api/config/v1/types_build.go
+++ b/vendor/github.com/openshift/api/config/v1/types_build.go
@@ -9,10 +9,14 @@ import (
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-// Build holds cluster-wide information on how to handle builds. The canonical name is `cluster`
+// Build configures the behavior of OpenShift builds for the entire cluster.
+// This includes default settings that can be overridden in BuildConfig objects, and overrides which are applied to all builds.
+//
+// The canonical name is "cluster"
type Build struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
+
// Spec holds user-settable values for the build controller configuration
// +kubebuilder:validation:Required
// +required
@@ -23,6 +27,10 @@ type BuildSpec struct {
// AdditionalTrustedCA is a reference to a ConfigMap containing additional CAs that
// should be trusted for image pushes and pulls during builds.
// The namespace for this config map is openshift-config.
+ //
+ // DEPRECATED: Additional CAs for image pull and push should be set on
+ // image.config.openshift.io/cluster instead.
+ //
// +optional
AdditionalTrustedCA ConfigMapNameReference `json:"additionalTrustedCA"`
// BuildDefaults controls the default information for Builds
@@ -95,7 +103,7 @@ type BuildOverrides struct {
type BuildList struct {
metav1.TypeMeta `json:",inline"`
- // Standard object's metadata.
metav1.ListMeta `json:"metadata"`
- Items []Build `json:"items"`
+
+ Items []Build `json:"items"`
}
diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go b/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go
index af2ce846c..3681d0ff0 100644
--- a/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go
+++ b/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go
@@ -16,13 +16,13 @@ type ClusterOperator struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata"`
- // spec hold the intent of how this operator should behave.
+ // spec holds configuration that could apply to any operator.
// +kubebuilder:validation:Required
// +required
Spec ClusterOperatorSpec `json:"spec"`
// status holds the information about the state of an operator. It is consistent with status information across
- // the kube ecosystem.
+ // the Kubernetes ecosystem.
// +optional
Status ClusterOperatorStatus `json:"status"`
}
@@ -34,15 +34,15 @@ type ClusterOperatorSpec struct {
// ClusterOperatorStatus provides information about the status of the operator.
// +k8s:deepcopy-gen=true
type ClusterOperatorStatus struct {
- // conditions describes the state of the operator's reconciliation functionality.
+ // conditions describes the state of the operator's managed and monitored components.
// +patchMergeKey=type
// +patchStrategy=merge
// +optional
Conditions []ClusterOperatorStatusCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"`
- // versions is a slice of operand version tuples. Operators which manage multiple operands will have multiple
- // entries in the array. If an operator is Available, it must have at least one entry. You must report the version of
- // the operator itself with the name "operator".
+ // versions is a slice of operator and operand version tuples. Operators which manage multiple operands will have multiple
+ // operand entries in the array. Available operators must report the version of the operator itself with the name "operator".
+ // An operator reports a new "operator" version when it has rolled out the new version to all of its operands.
// +optional
Versions []OperandVersion `json:"versions,omitempty"`
@@ -57,29 +57,40 @@ type ClusterOperatorStatus struct {
// operator which owns this status object.
// +nullable
// +optional
+ // +kubebuilder:pruning:PreserveUnknownFields
Extension runtime.RawExtension `json:"extension"`
}
type OperandVersion struct {
// name is the name of the particular operand this version is for. It usually matches container images, not operators.
+ // +kubebuilder:validation:Required
+ // +required
Name string `json:"name"`
- // version indicates which version of a particular operand is currently being manage. It must always match the Available
- // condition. If 1.0.0 is Available, then this must indicate 1.0.0 even if the operator is trying to rollout
+ // version indicates which version of a particular operand is currently being managed. It must always match the Available
+ // operand. If 1.0.0 is Available, then this must indicate 1.0.0 even if the operator is trying to rollout
// 1.1.0
+ // +kubebuilder:validation:Required
+ // +required
Version string `json:"version"`
}
// ObjectReference contains enough information to let you inspect or modify the referred object.
type ObjectReference struct {
// group of the referent.
+ // +kubebuilder:validation:Required
+ // +required
Group string `json:"group"`
// resource of the referent.
+ // +kubebuilder:validation:Required
+ // +required
Resource string `json:"resource"`
// namespace of the referent.
// +optional
Namespace string `json:"namespace,omitempty"`
// name of the referent.
+ // +kubebuilder:validation:Required
+ // +required
Name string `json:"name"`
}
@@ -96,41 +107,64 @@ const (
)
// ClusterOperatorStatusCondition represents the state of the operator's
-// reconciliation functionality.
+// managed and monitored components.
// +k8s:deepcopy-gen=true
type ClusterOperatorStatusCondition struct {
- // type specifies the state of the operator's reconciliation functionality.
+ // type specifies the aspect reported by this condition.
+ // +kubebuilder:validation:Required
+ // +required
Type ClusterStatusConditionType `json:"type"`
// status of the condition, one of True, False, Unknown.
+ // +kubebuilder:validation:Required
+ // +required
Status ConditionStatus `json:"status"`
- // lastTransitionTime is the time of the last update to the current status object.
+ // lastTransitionTime is the time of the last update to the current status property.
+ // +kubebuilder:validation:Required
+ // +required
LastTransitionTime metav1.Time `json:"lastTransitionTime"`
- // reason is the reason for the condition's last transition. Reasons are CamelCase
+ // reason is the CamelCase reason for the condition's current status.
+ // +optional
Reason string `json:"reason,omitempty"`
// message provides additional information about the current condition.
// This is only to be consumed by humans.
+ // +optional
Message string `json:"message,omitempty"`
}
-// ClusterStatusConditionType is the state of the operator's reconciliation functionality.
+// ClusterStatusConditionType is an aspect of operator state.
type ClusterStatusConditionType string
const (
- // Available indicates that the binary maintained by the operator (eg: openshift-apiserver for the
+ // Available indicates that the operand (eg: openshift-apiserver for the
// openshift-apiserver-operator), is functional and available in the cluster.
OperatorAvailable ClusterStatusConditionType = "Available"
- // Progressing indicates that the operator is actively making changes to the binary maintained by the
- // operator (eg: openshift-apiserver for the openshift-apiserver-operator).
+ // Progressing indicates that the operator is actively rolling out new code,
+ // propagating config changes, or otherwise moving from one steady state to
+ // another. Operators should not report progressing when they are reconciling
+ // a previously known state.
OperatorProgressing ClusterStatusConditionType = "Progressing"
- // Degraded indicates that the operand is not functioning completely. An example of a degraded state
- // would be if there should be 5 copies of the operand running but only 4 are running. It may still be available,
- // but it is degraded
+ // Degraded indicates that the operator's current state does not match its
+ // desired state over a period of time resulting in a lower quality of service.
+ // The period of time may vary by component, but a Degraded state represents
+ // persistent observation of a condition. As a result, a component should not
+ // oscillate in and out of Degraded state. A service may be Available even
+ // if its degraded. For example, your service may desire 3 running pods, but 1
+ // pod is crash-looping. The service is Available but Degraded because it
+ // may have a lower quality of service. A component may be Progressing but
+ // not Degraded because the transition from one state to another does not
+ // persist over a long enough period to report Degraded. A service should not
+ // report Degraded during the course of a normal upgrade. A service may report
+ // Degraded in response to a persistent infrastructure failure that requires
+ // administrator intervention. For example, if a control plane host is unhealthy
+ // and must be replaced. An operator should report Degraded if unexpected
+ // errors occur over a period, but the expectation is that all unexpected errors
+ // are handled as operators mature.
OperatorDegraded ClusterStatusConditionType = "Degraded"
// Upgradeable indicates whether the operator is in a state that is safe to upgrade. When status is `False`
diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_version.go b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go
index c6c2e7e43..771e962ad 100644
--- a/vendor/github.com/openshift/api/config/v1/types_cluster_version.go
+++ b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go
@@ -33,6 +33,8 @@ type ClusterVersionSpec struct {
// clusterID uniquely identifies this cluster. This is expected to be
// an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx in
// hexadecimal values). This is a required field.
+ // +kubebuilder:validation:Required
+ // +required
ClusterID ClusterID `json:"clusterID"`
// desiredUpdate is an optional field that indicates the desired value of
@@ -80,6 +82,8 @@ type ClusterVersionStatus struct {
// desired is the version that the cluster is reconciling towards.
// If the cluster is not yet fully initialized desired will be set
// with the information available, which may be an image or a tag.
+ // +kubebuilder:validation:Required
+ // +required
Desired Update `json:"desired"`
// history contains a list of the most recent versions applied to the cluster.
@@ -94,12 +98,16 @@ type ClusterVersionStatus struct {
// observedGeneration reports which version of the spec is being synced.
// If this value is not equal to metadata.generation, then the desired
- // and conditions fields may represent from a previous version.
+ // and conditions fields may represent a previous version.
+ // +kubebuilder:validation:Required
+ // +required
ObservedGeneration int64 `json:"observedGeneration"`
// versionHash is a fingerprint of the content that the cluster will be
// updated with. It is used by the operator to avoid unnecessary work
// and is for internal use only.
+ // +kubebuilder:validation:Required
+ // +required
VersionHash string `json:"versionHash"`
// conditions provides information about the cluster version. The condition
@@ -117,6 +125,8 @@ type ClusterVersionStatus struct {
// if the update service is unavailable, or if an invalid channel has
// been specified.
// +nullable
+ // +kubebuilder:validation:Required
+ // +required
AvailableUpdates []Update `json:"availableUpdates"`
}
@@ -139,14 +149,20 @@ type UpdateHistory struct {
// indicates the update is not fully applied, while the Completed state
// indicates the update was successfully rolled out at least once (all
// parts of the update successfully applied).
+ // +kubebuilder:validation:Required
+ // +required
State UpdateState `json:"state"`
// startedTime is the time at which the update was started.
+ // +kubebuilder:validation:Required
+ // +required
StartedTime metav1.Time `json:"startedTime"`
// completionTime, if set, is when the update was fully applied. The update
// that is currently being applied will have a null completion time.
// Completion time will always be set for entries that are not the current
// update (usually to the started time of the next update).
+ // +kubebuilder:validation:Required
+ // +required
// +nullable
CompletionTime *metav1.Time `json:"completionTime"`
@@ -158,9 +174,13 @@ type UpdateHistory struct {
Version string `json:"version"`
// image is a container image location that contains the update. This value
// is always populated.
+ // +kubebuilder:validation:Required
+ // +required
Image string `json:"image"`
// verified indicates whether the provided update was properly verified
// before it was installed. If this is false the cluster may not be trusted.
+ // +kubebuilder:validation:Required
+ // +required
Verified bool `json:"verified"`
}
@@ -172,19 +192,29 @@ type ClusterID string
// +k8s:deepcopy-gen=true
type ComponentOverride struct {
// kind indentifies which object to override.
+ // +kubebuilder:validation:Required
+ // +required
Kind string `json:"kind"`
// group identifies the API group that the kind is in.
+ // +kubebuilder:validation:Required
+ // +required
Group string `json:"group"`
// namespace is the component's namespace. If the resource is cluster
// scoped, the namespace should be empty.
+ // +kubebuilder:validation:Required
+ // +required
Namespace string `json:"namespace"`
// name is the component's name.
+ // +kubebuilder:validation:Required
+ // +required
Name string `json:"name"`
// unmanaged controls if cluster version operator should stop managing the
// resources in this cluster.
// Default: false
+ // +kubebuilder:validation:Required
+ // +required
Unmanaged bool `json:"unmanaged"`
}
diff --git a/vendor/github.com/openshift/api/config/v1/types_console.go b/vendor/github.com/openshift/api/config/v1/types_console.go
index 9cda3f83b..22b0b5160 100644
--- a/vendor/github.com/openshift/api/config/v1/types_console.go
+++ b/vendor/github.com/openshift/api/config/v1/types_console.go
@@ -10,8 +10,7 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
// logout URL, and reports the public URL of the console. The canonical name is
// `cluster`.
type Console struct {
- metav1.TypeMeta `json:",inline"`
- // Standard object's metadata.
+ metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
// spec holds user settable values for configuration
@@ -40,9 +39,9 @@ type ConsoleStatus struct {
type ConsoleList struct {
metav1.TypeMeta `json:",inline"`
- // Standard object's metadata.
metav1.ListMeta `json:"metadata"`
- Items []Console `json:"items"`
+
+ Items []Console `json:"items"`
}
// ConsoleAuthentication defines a list of optional configuration for console authentication.
@@ -58,6 +57,6 @@ type ConsoleAuthentication struct {
// provides the user the option to perform single logout (SLO) through the identity
// provider to destroy their single sign-on session.
// +optional
- // +kubebuilder:validation:Pattern=^$|^((https):\/\/?)[^\s()<>]+(?:\([\w\d]+\)|([^[:punct:]\s]|\/?))$
+ // +kubebuilder:validation:Pattern=`^$|^((https):\/\/?)[^\s()<>]+(?:\([\w\d]+\)|([^[:punct:]\s]|\/?))$`
LogoutRedirect string `json:"logoutRedirect,omitempty"`
}
diff --git a/vendor/github.com/openshift/api/config/v1/types_dns.go b/vendor/github.com/openshift/api/config/v1/types_dns.go
index ef04f7a67..989ef99c3 100644
--- a/vendor/github.com/openshift/api/config/v1/types_dns.go
+++ b/vendor/github.com/openshift/api/config/v1/types_dns.go
@@ -8,8 +8,7 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
// DNS holds cluster-wide information about DNS. The canonical name is `cluster`
type DNS struct {
- metav1.TypeMeta `json:",inline"`
- // Standard object's metadata.
+ metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
// spec holds user settable values for configuration
@@ -82,7 +81,7 @@ type DNSStatus struct {
type DNSList struct {
metav1.TypeMeta `json:",inline"`
- // Standard object's metadata.
metav1.ListMeta `json:"metadata"`
- Items []DNS `json:"items"`
+
+ Items []DNS `json:"items"`
}
diff --git a/vendor/github.com/openshift/api/config/v1/types_feature.go b/vendor/github.com/openshift/api/config/v1/types_feature.go
index 536bad191..ce9012627 100644
--- a/vendor/github.com/openshift/api/config/v1/types_feature.go
+++ b/vendor/github.com/openshift/api/config/v1/types_feature.go
@@ -8,8 +8,7 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
// Feature holds cluster-wide information about feature gates. The canonical name is `cluster`
type FeatureGate struct {
- metav1.TypeMeta `json:",inline"`
- // Standard object's metadata.
+ metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
// spec holds user settable values for configuration
@@ -35,6 +34,9 @@ var (
// Because of its nature, this setting cannot be validated. If you have any typos or accidentally apply invalid combinations
// your cluster may fail in an unrecoverable way.
CustomNoUpgrade FeatureSet = "CustomNoUpgrade"
+
+ // TopologyManager enables ToplogyManager support. Upgrades are enabled with this feature.
+ LatencySensitive FeatureSet = "LatencySensitive"
)
type FeatureGateSpec struct {
@@ -73,9 +75,9 @@ type FeatureGateStatus struct {
type FeatureGateList struct {
metav1.TypeMeta `json:",inline"`
- // Standard object's metadata.
metav1.ListMeta `json:"metadata"`
- Items []FeatureGate `json:"items"`
+
+ Items []FeatureGate `json:"items"`
}
type FeatureGateEnabledDisabled struct {
@@ -95,24 +97,98 @@ type FeatureGateEnabledDisabled struct {
//
// If you put an item in either of these lists, put your area and name on it so we can find owners.
var FeatureSets = map[FeatureSet]*FeatureGateEnabledDisabled{
- Default: {
- Enabled: []string{
- "ExperimentalCriticalPodAnnotation", // sig-pod, sjenning
- "RotateKubeletServerCertificate", // sig-pod, sjenning
- "SupportPodPidsLimit", // sig-pod, sjenning
- },
- Disabled: []string{
- "LocalStorageCapacityIsolation", // sig-pod, sjenning
- },
+ Default: defaultFeatures,
+ CustomNoUpgrade: {
+ Enabled: []string{},
+ Disabled: []string{},
+ },
+ TechPreviewNoUpgrade: newDefaultFeatures().toFeatures(),
+ LatencySensitive: newDefaultFeatures().
+ with(
+ "TopologyManager", // sig-pod, sjenning
+ ).
+ toFeatures(),
+}
+
+var defaultFeatures = &FeatureGateEnabledDisabled{
+ Enabled: []string{
+ "RotateKubeletServerCertificate", // sig-pod, sjenning
+ "SupportPodPidsLimit", // sig-pod, sjenning
+ "NodeDisruptionExclusion", // sig-scheduling, ccoleman
+ "ServiceNodeExclusion", // sig-scheduling, ccoleman
+ "SCTPSupport", // sig-network, ccallend
},
- TechPreviewNoUpgrade: {
- Enabled: []string{
- "ExperimentalCriticalPodAnnotation", // sig-pod, sjenning
- "RotateKubeletServerCertificate", // sig-pod, sjenning
- "SupportPodPidsLimit", // sig-pod, sjenning
- },
- Disabled: []string{
- "LocalStorageCapacityIsolation", // sig-pod, sjenning
- },
+ Disabled: []string{
+ "LegacyNodeRoleBehavior", // sig-scheduling, ccoleman
},
}
+
+type featureSetBuilder struct {
+ forceOn []string
+ forceOff []string
+}
+
+func newDefaultFeatures() *featureSetBuilder {
+ return &featureSetBuilder{}
+}
+
+func (f *featureSetBuilder) with(forceOn ...string) *featureSetBuilder {
+ f.forceOn = append(f.forceOn, forceOn...)
+ return f
+}
+
+func (f *featureSetBuilder) without(forceOff ...string) *featureSetBuilder {
+ f.forceOff = append(f.forceOff, forceOff...)
+ return f
+}
+
+func (f *featureSetBuilder) isForcedOff(needle string) bool {
+ for _, forcedOff := range f.forceOff {
+ if needle == forcedOff {
+ return true
+ }
+ }
+ return false
+}
+
+func (f *featureSetBuilder) isForcedOn(needle string) bool {
+ for _, forceOn := range f.forceOn {
+ if needle == forceOn {
+ return true
+ }
+ }
+ return false
+}
+
+func (f *featureSetBuilder) toFeatures() *FeatureGateEnabledDisabled {
+ finalOn := []string{}
+ finalOff := []string{}
+
+ // only add the default enabled features if they haven't been explicitly set off
+ for _, defaultOn := range defaultFeatures.Enabled {
+ if !f.isForcedOff(defaultOn) {
+ finalOn = append(finalOn, defaultOn)
+ }
+ }
+ for _, currOn := range f.forceOn {
+ if f.isForcedOff(currOn) {
+ panic("coding error, you can't have features both on and off")
+ }
+ finalOn = append(finalOn, currOn)
+ }
+
+ // only add the default disabled features if they haven't been explicitly set on
+ for _, defaultOff := range defaultFeatures.Disabled {
+ if !f.isForcedOn(defaultOff) {
+ finalOff = append(finalOff, defaultOff)
+ }
+ }
+ for _, currOff := range f.forceOff {
+ finalOff = append(finalOff, currOff)
+ }
+
+ return &FeatureGateEnabledDisabled{
+ Enabled: finalOn,
+ Disabled: finalOff,
+ }
+}
diff --git a/vendor/github.com/openshift/api/config/v1/types_image.go b/vendor/github.com/openshift/api/config/v1/types_image.go
index f0cf220d3..bf594c1b7 100644
--- a/vendor/github.com/openshift/api/config/v1/types_image.go
+++ b/vendor/github.com/openshift/api/config/v1/types_image.go
@@ -6,10 +6,14 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-// Image holds cluster-wide information about how to handle images. The canonical name is `cluster`
+// Image governs policies related to imagestream imports and runtime configuration
+// for external registries. It allows cluster admins to configure which registries
+// OpenShift is allowed to import images from, extra CA trust bundles for external
+// registries, and policies to blacklist/whitelist registry hostnames.
+// When exposing OpenShift's image registry to the public, this also lets cluster
+// admins specify the external hostname.
type Image struct {
- metav1.TypeMeta `json:",inline"`
- // Standard object's metadata.
+ metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
// spec holds user settable values for configuration
@@ -22,7 +26,7 @@ type Image struct {
}
type ImageSpec struct {
- // AllowedRegistriesForImport limits the container image registries that normal users may import
+ // allowedRegistriesForImport limits the container image registries that normal users may import
// images from. Set this list to the registries that you trust to contain valid Docker
// images and that you want applications to be able to import from. Users with
// permission to create Images or ImageStreamMappings via the API are not affected by
@@ -38,14 +42,14 @@ type ImageSpec struct {
// +optional
ExternalRegistryHostnames []string `json:"externalRegistryHostnames,omitempty"`
- // AdditionalTrustedCA is a reference to a ConfigMap containing additional CAs that
- // should be trusted during imagestream import, pod image pull, and imageregistry
- // pullthrough.
+ // additionalTrustedCA is a reference to a ConfigMap containing additional CAs that
+ // should be trusted during imagestream import, pod image pull, build image pull, and
+ // imageregistry pullthrough.
// The namespace for this config map is openshift-config.
// +optional
AdditionalTrustedCA ConfigMapNameReference `json:"additionalTrustedCA"`
- // RegistrySources contains configuration that determines how the container runtime
+ // registrySources contains configuration that determines how the container runtime
// should treat individual registries when accessing images for builds+pods. (e.g.
// whether or not to allow insecure access). It does not contain configuration for the
// internal cluster registry.
@@ -55,10 +59,10 @@ type ImageSpec struct {
type ImageStatus struct {
- // this value is set by the image registry operator which controls the internal registry hostname
- // InternalRegistryHostname sets the hostname for the default internal image
+ // internalRegistryHostname sets the hostname for the default internal image
// registry. The value must be in "hostname[:port]" format.
- // For backward compatibility, users can still use OPENSHIFT_DEFAULT_REGISTRY
+ // This value is set by the image registry operator which controls the internal registry
+ // hostname. For backward compatibility, users can still use OPENSHIFT_DEFAULT_REGISTRY
// environment variable but this setting overrides the environment variable.
// +optional
InternalRegistryHostname string `json:"internalRegistryHostname,omitempty"`
@@ -75,19 +79,19 @@ type ImageStatus struct {
type ImageList struct {
metav1.TypeMeta `json:",inline"`
- // Standard object's metadata.
metav1.ListMeta `json:"metadata"`
- Items []Image `json:"items"`
+
+ Items []Image `json:"items"`
}
// RegistryLocation contains a location of the registry specified by the registry domain
// name. The domain name might include wildcards, like '*' or '??'.
type RegistryLocation struct {
- // DomainName specifies a domain name for the registry
+ // domainName specifies a domain name for the registry
// In case the registry use non-standard (80 or 443) port, the port should be included
// in the domain name as well.
DomainName string `json:"domainName"`
- // Insecure indicates whether the registry is secure (https) or insecure (http)
+ // insecure indicates whether the registry is secure (https) or insecure (http)
// By default (if not specified) the registry is assumed as secure.
// +optional
Insecure bool `json:"insecure,omitempty"`
@@ -95,15 +99,15 @@ type RegistryLocation struct {
// RegistrySources holds cluster-wide information about how to handle the registries config.
type RegistrySources struct {
- // InsecureRegistries are registries which do not have a valid TLS certificates or only support HTTP connections.
+ // insecureRegistries are registries which do not have a valid TLS certificates or only support HTTP connections.
// +optional
InsecureRegistries []string `json:"insecureRegistries,omitempty"`
- // BlockedRegistries are blacklisted from image pull/push. All other registries are allowed.
+ // blockedRegistries are blacklisted from image pull/push. All other registries are allowed.
//
// Only one of BlockedRegistries or AllowedRegistries may be set.
// +optional
BlockedRegistries []string `json:"blockedRegistries,omitempty"`
- // AllowedRegistries are whitelisted for image pull/push. All other registries are blocked.
+ // allowedRegistries are whitelisted for image pull/push. All other registries are blocked.
//
// Only one of BlockedRegistries or AllowedRegistries may be set.
// +optional
diff --git a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go
index 4632e6ada..ac1e5048e 100644
--- a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go
+++ b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go
@@ -8,8 +8,7 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
// Infrastructure holds cluster-wide information about Infrastructure. The canonical name is `cluster`
type Infrastructure struct {
- metav1.TypeMeta `json:",inline"`
- // Standard object's metadata.
+ metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
// spec holds user settable values for configuration
@@ -130,6 +129,10 @@ type PlatformStatus struct {
// OpenStack contains settings specific to the OpenStack infrastructure provider.
// +optional
OpenStack *OpenStackPlatformStatus `json:"openstack,omitempty"`
+
+ // Ovirt contains settings specific to the oVirt infrastructure provider.
+ // +optional
+ Ovirt *OvirtPlatformStatus `json:"ovirt,omitempty"`
}
// AWSPlatformStatus holds the current status of the Amazon Web Services infrastructure provider.
@@ -142,6 +145,11 @@ type AWSPlatformStatus struct {
type AzurePlatformStatus struct {
// resourceGroupName is the Resource Group for new Azure resources created for the cluster.
ResourceGroupName string `json:"resourceGroupName"`
+
+ // networkResourceGroupName is the Resource Group for network resources like the Virtual Network and Subnets used by the cluster.
+ // If empty, the value is same as ResourceGroupName.
+ // +optional
+ NetworkResourceGroupName string `json:"networkResourceGroupName,omitempty"`
}
// GCPPlatformStatus holds the current status of the Google Cloud Platform infrastructure provider.
@@ -154,6 +162,8 @@ type GCPPlatformStatus struct {
}
// BareMetalPlatformStatus holds the current status of the BareMetal infrastructure provider.
+// For more information about the network architecture used with the BareMetal platform type, see:
+// https://github.com/openshift/installer/blob/master/docs/design/baremetal/networking-infrastructure.md
type BareMetalPlatformStatus struct {
// apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used
// by components inside the cluster, like kubelets using the infrastructure rather
@@ -199,12 +209,33 @@ type OpenStackPlatformStatus struct {
NodeDNSIP string `json:"nodeDNSIP,omitempty"`
}
+// OvirtPlatformStatus holds the current status of the oVirt infrastructure provider.
+type OvirtPlatformStatus struct {
+ // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used
+ // by components inside the cluster, like kubelets using the infrastructure rather
+ // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI
+ // points to. It is the IP for a self-hosted load balancer in front of the API servers.
+ APIServerInternalIP string `json:"apiServerInternalIP,omitempty"`
+
+ // ingressIP is an external IP which routes to the default ingress controller.
+ // The IP is a suitable target of a wildcard DNS record used to resolve default route host names.
+ IngressIP string `json:"ingressIP,omitempty"`
+
+ // nodeDNSIP is the IP address for the internal DNS used by the
+ // nodes. Unlike the one managed by the DNS operator, `NodeDNSIP`
+ // provides name resolution for the nodes themselves. There is no DNS-as-a-service for
+ // oVirt deployments. In order to minimize necessary changes to the
+ // datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames
+ // to the nodes in the cluster.
+ NodeDNSIP string `json:"nodeDNSIP,omitempty"`
+}
+
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// InfrastructureList is
type InfrastructureList struct {
metav1.TypeMeta `json:",inline"`
- // Standard object's metadata.
metav1.ListMeta `json:"metadata"`
- Items []Infrastructure `json:"items"`
+
+ Items []Infrastructure `json:"items"`
}
diff --git a/vendor/github.com/openshift/api/config/v1/types_ingress.go b/vendor/github.com/openshift/api/config/v1/types_ingress.go
index 484a1af0b..0216919ad 100644
--- a/vendor/github.com/openshift/api/config/v1/types_ingress.go
+++ b/vendor/github.com/openshift/api/config/v1/types_ingress.go
@@ -6,11 +6,10 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-// Ingress holds cluster-wide information about Ingress. The canonical name is `cluster`
-// TODO this object is an example of a possible grouping and is subject to change or removal
+// Ingress holds cluster-wide information about ingress, including the default ingress domain
+// used for routes. The canonical name is `cluster`.
type Ingress struct {
- metav1.TypeMeta `json:",inline"`
- // Standard object's metadata.
+ metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
// spec holds user settable values for configuration
@@ -24,8 +23,13 @@ type Ingress struct {
type IngressSpec struct {
// domain is used to generate a default host name for a route when the
- // route's host name is empty. The generated host name will follow this
+ // route's host name is empty. The generated host name will follow this
// pattern: "<route-name>.<route-namespace>.<domain>".
+ //
+ // It is also used as the default wildcard domain suffix for ingress. The
+ // default ingresscontroller domain will follow this pattern: "*.<domain>".
+ //
+ // Once set, changing domain is not currently supported.
Domain string `json:"domain"`
}
@@ -36,7 +40,7 @@ type IngressStatus struct {
type IngressList struct {
metav1.TypeMeta `json:",inline"`
- // Standard object's metadata.
metav1.ListMeta `json:"metadata"`
- Items []Ingress `json:"items"`
+
+ Items []Ingress `json:"items"`
}
diff --git a/vendor/github.com/openshift/api/config/v1/types_network.go b/vendor/github.com/openshift/api/config/v1/types_network.go
index a60c5f7dc..a09c5fe8e 100644
--- a/vendor/github.com/openshift/api/config/v1/types_network.go
+++ b/vendor/github.com/openshift/api/config/v1/types_network.go
@@ -6,14 +6,16 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-// Network holds cluster-wide information about Network. The canonical name is `cluster`
-// TODO this object is an example of a possible grouping and is subject to change or removal
+// Network holds cluster-wide information about Network. The canonical name is `cluster`. It is used to configure the desired network configuration, such as: IP address pools for services/pod IPs, network plugin, etc.
+// Please view network.spec for an explanation on what applies when configuring this resource.
type Network struct {
- metav1.TypeMeta `json:",inline"`
- // Standard object's metadata.
+ metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
// spec holds user settable values for configuration.
+ // As a general rule, this SHOULD NOT be read directly. Instead, you should
+ // consume the NetworkStatus, as it indicates the currently deployed configuration.
+ // Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each.
// +kubebuilder:validation:Required
// +required
Spec NetworkSpec `json:"spec"`
@@ -25,14 +27,15 @@ type Network struct {
// NetworkSpec is the desired network configuration.
// As a general rule, this SHOULD NOT be read directly. Instead, you should
// consume the NetworkStatus, as it indicates the currently deployed configuration.
-// Currently, changing ClusterNetwork, ServiceNetwork, or NetworkType after
-// installation is not supported.
+// Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each.
type NetworkSpec struct {
// IP address pool to use for pod IPs.
+ // This field is immutable after installation.
ClusterNetwork []ClusterNetworkEntry `json:"clusterNetwork"`
// IP address pool for services.
// Currently, we only support a single entry here.
+ // This field is immutable after installation.
ServiceNetwork []string `json:"serviceNetwork"`
// NetworkType is the plugin that is to be deployed (e.g. OpenShiftSDN).
@@ -40,10 +43,12 @@ type NetworkSpec struct {
// or else no networking will be installed.
// Currently supported values are:
// - OpenShiftSDN
+ // This field is immutable after installation.
NetworkType string `json:"networkType"`
// externalIP defines configuration for controllers that
- // affect Service.ExternalIP
+ // affect Service.ExternalIP. If nil, then ExternalIP is
+ // not allowed to be set.
// +optional
ExternalIP *ExternalIPConfig `json:"externalIP,omitempty"`
}
@@ -71,6 +76,7 @@ type ClusterNetworkEntry struct {
CIDR string `json:"cidr"`
// The size (prefix) of block to allocate to each node.
+ // +kubebuilder:validation:Minimum=0
HostPrefix uint32 `json:"hostPrefix"`
}
@@ -78,8 +84,7 @@ type ClusterNetworkEntry struct {
// of a Service resource.
type ExternalIPConfig struct {
// policy is a set of restrictions applied to the ExternalIP field.
- // If nil, any value is allowed for an ExternalIP. If the empty/zero
- // policy is supplied, then ExternalIP is not allowed to be set.
+ // If nil or empty, then ExternalIP is not allowed to be set.
// +optional
Policy *ExternalIPPolicy `json:"policy,omitempty"`
@@ -111,7 +116,7 @@ type ExternalIPPolicy struct {
type NetworkList struct {
metav1.TypeMeta `json:",inline"`
- // Standard object's metadata.
metav1.ListMeta `json:"metadata"`
- Items []Network `json:"items"`
+
+ Items []Network `json:"items"`
}
diff --git a/vendor/github.com/openshift/api/config/v1/types_operatorhub.go b/vendor/github.com/openshift/api/config/v1/types_operatorhub.go
index cf821f9e3..1d998bf37 100644
--- a/vendor/github.com/openshift/api/config/v1/types_operatorhub.go
+++ b/vendor/github.com/openshift/api/config/v1/types_operatorhub.go
@@ -6,10 +6,19 @@ import (
// OperatorHubSpec defines the desired state of OperatorHub
type OperatorHubSpec struct {
+ // disableAllDefaultSources allows you to disable all the default hub
+ // sources. If this is true, a specific entry in sources can be used to
+ // enable a default source. If this is false, a specific entry in
+ // sources can be used to disable or enable a default source.
+ // +optional
+ DisableAllDefaultSources bool `json:"disableAllDefaultSources,omitempty"`
// sources is the list of default hub sources and their configuration.
- // If the list is empty, it indicates that the default hub sources are
- // enabled on the cluster. The list of default hub sources and their
- // current state will always be reflected in the status block.
+ // If the list is empty, it implies that the default hub sources are
+ // enabled on the cluster unless disableAllDefaultSources is true.
+ // If disableAllDefaultSources is true and sources is not empty,
+ // the configuration present in sources will take precedence. The list of
+ // default hub sources and their current state will always be reflected in
+ // the status block.
// +optional
Sources []HubSource `json:"sources,omitempty"`
}
@@ -61,9 +70,9 @@ type HubSource struct {
// HubSourceStatus is used to reflect the current state of applying the
// configuration to a default source
type HubSourceStatus struct {
- HubSource
+ HubSource `json:",omitempty"`
// status indicates success or failure in applying the configuration
- Status string `json:"status"`
+ Status string `json:"status,omitempty"`
// message provides more information regarding failures
Message string `json:"message,omitempty"`
}
diff --git a/vendor/github.com/openshift/api/config/v1/types_project.go b/vendor/github.com/openshift/api/config/v1/types_project.go
index 61152a6f7..244ce3ef8 100644
--- a/vendor/github.com/openshift/api/config/v1/types_project.go
+++ b/vendor/github.com/openshift/api/config/v1/types_project.go
@@ -8,8 +8,7 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
// Project holds cluster-wide information about Project. The canonical name is `cluster`
type Project struct {
- metav1.TypeMeta `json:",inline"`
- // Standard object's metadata.
+ metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
// spec holds user settable values for configuration
@@ -49,7 +48,7 @@ type ProjectStatus struct {
type ProjectList struct {
metav1.TypeMeta `json:",inline"`
- // Standard object's metadata.
metav1.ListMeta `json:"metadata"`
- Items []Project `json:"items"`
+
+ Items []Project `json:"items"`
}
diff --git a/vendor/github.com/openshift/api/config/v1/types_proxy.go b/vendor/github.com/openshift/api/config/v1/types_proxy.go
index 1413a48ca..93f4c487e 100644
--- a/vendor/github.com/openshift/api/config/v1/types_proxy.go
+++ b/vendor/github.com/openshift/api/config/v1/types_proxy.go
@@ -12,6 +12,7 @@ import (
type Proxy struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
+
// Spec holds user-settable values for the proxy configuration
// +kubebuilder:validation:Required
// +required
@@ -83,7 +84,7 @@ type ProxyStatus struct {
type ProxyList struct {
metav1.TypeMeta `json:",inline"`
- // Standard object's metadata.
metav1.ListMeta `json:"metadata"`
- Items []Proxy `json:"items"`
+
+ Items []Proxy `json:"items"`
}
diff --git a/vendor/github.com/openshift/api/config/v1/types_scheduling.go b/vendor/github.com/openshift/api/config/v1/types_scheduling.go
index 9b8fa3a52..d5bf0c362 100644
--- a/vendor/github.com/openshift/api/config/v1/types_scheduling.go
+++ b/vendor/github.com/openshift/api/config/v1/types_scheduling.go
@@ -9,8 +9,7 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
// Scheduler holds cluster-wide config information to run the Kubernetes Scheduler
// and influence its placement decisions. The canonical name for this config is `cluster`.
type Scheduler struct {
- metav1.TypeMeta `json:",inline"`
- // Standard object's metadata.
+ metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
// spec holds user settable values for configuration
@@ -69,7 +68,7 @@ type SchedulerStatus struct {
type SchedulerList struct {
metav1.TypeMeta `json:",inline"`
- // Standard object's metadata.
metav1.ListMeta `json:"metadata"`
- Items []Scheduler `json:"items"`
+
+ Items []Scheduler `json:"items"`
}
diff --git a/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go b/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go
new file mode 100644
index 000000000..ea788dc16
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go
@@ -0,0 +1,260 @@
+package v1
+
+// TLSSecurityProfile defines the schema for a TLS security profile. This object
+// is used by operators to apply TLS security settings to operands.
+// +union
+type TLSSecurityProfile struct {
+ // type is one of Old, Intermediate, Modern or Custom. Custom provides
+ // the ability to specify individual TLS security profile parameters.
+ // Old, Intermediate and Modern are TLS security profiles based on:
+ //
+ // https://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations
+ //
+ // The profiles are intent based, so they may change over time as new ciphers are developed and existing ciphers
+ // are found to be insecure. Depending on precisely which ciphers are available to a process, the list may be
+ // reduced.
+ //
+ // Note that the Modern profile is currently not supported because it is not
+ // yet well adopted by common software libraries.
+ //
+ // +unionDiscriminator
+ // +optional
+ Type TLSProfileType `json:"type"`
+ // old is a TLS security profile based on:
+ //
+ // https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility
+ //
+ // and looks like this (yaml):
+ //
+ // ciphers:
+ // - TLS_AES_128_GCM_SHA256
+ // - TLS_AES_256_GCM_SHA384
+ // - TLS_CHACHA20_POLY1305_SHA256
+ // - ECDHE-ECDSA-AES128-GCM-SHA256
+ // - ECDHE-RSA-AES128-GCM-SHA256
+ // - ECDHE-ECDSA-AES256-GCM-SHA384
+ // - ECDHE-RSA-AES256-GCM-SHA384
+ // - ECDHE-ECDSA-CHACHA20-POLY1305
+ // - ECDHE-RSA-CHACHA20-POLY1305
+ // - DHE-RSA-AES128-GCM-SHA256
+ // - DHE-RSA-AES256-GCM-SHA384
+ // - DHE-RSA-CHACHA20-POLY1305
+ // - ECDHE-ECDSA-AES128-SHA256
+ // - ECDHE-RSA-AES128-SHA256
+ // - ECDHE-ECDSA-AES128-SHA
+ // - ECDHE-RSA-AES128-SHA
+ // - ECDHE-ECDSA-AES256-SHA384
+ // - ECDHE-RSA-AES256-SHA384
+ // - ECDHE-ECDSA-AES256-SHA
+ // - ECDHE-RSA-AES256-SHA
+ // - DHE-RSA-AES128-SHA256
+ // - DHE-RSA-AES256-SHA256
+ // - AES128-GCM-SHA256
+ // - AES256-GCM-SHA384
+ // - AES128-SHA256
+ // - AES256-SHA256
+ // - AES128-SHA
+ // - AES256-SHA
+ // - DES-CBC3-SHA
+ // minTLSVersion: TLSv1.0
+ //
+ // +optional
+ // +nullable
+ Old *OldTLSProfile `json:"old,omitempty"`
+ // intermediate is a TLS security profile based on:
+ //
+ // https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29
+ //
+ // and looks like this (yaml):
+ //
+ // ciphers:
+ // - TLS_AES_128_GCM_SHA256
+ // - TLS_AES_256_GCM_SHA384
+ // - TLS_CHACHA20_POLY1305_SHA256
+ // - ECDHE-ECDSA-AES128-GCM-SHA256
+ // - ECDHE-RSA-AES128-GCM-SHA256
+ // - ECDHE-ECDSA-AES256-GCM-SHA384
+ // - ECDHE-RSA-AES256-GCM-SHA384
+ // - ECDHE-ECDSA-CHACHA20-POLY1305
+ // - ECDHE-RSA-CHACHA20-POLY1305
+ // - DHE-RSA-AES128-GCM-SHA256
+ // - DHE-RSA-AES256-GCM-SHA384
+ // minTLSVersion: TLSv1.2
+ //
+ // +optional
+ // +nullable
+ Intermediate *IntermediateTLSProfile `json:"intermediate,omitempty"`
+ // modern is a TLS security profile based on:
+ //
+ // https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility
+ //
+ // and looks like this (yaml):
+ //
+ // ciphers:
+ // - TLS_AES_128_GCM_SHA256
+ // - TLS_AES_256_GCM_SHA384
+ // - TLS_CHACHA20_POLY1305_SHA256
+ // minTLSVersion: TLSv1.3
+ //
+ // NOTE: Currently unsupported.
+ //
+ // +optional
+ // +nullable
+ Modern *ModernTLSProfile `json:"modern,omitempty"`
+ // custom is a user-defined TLS security profile. Be extremely careful using a custom
+ // profile as invalid configurations can be catastrophic. An example custom profile
+ // looks like this:
+ //
+ // ciphers:
+ // - ECDHE-ECDSA-CHACHA20-POLY1305
+ // - ECDHE-RSA-CHACHA20-POLY1305
+ // - ECDHE-RSA-AES128-GCM-SHA256
+ // - ECDHE-ECDSA-AES128-GCM-SHA256
+ // minTLSVersion: TLSv1.1
+ //
+ // +optional
+ // +nullable
+ Custom *CustomTLSProfile `json:"custom,omitempty"`
+}
+
+// OldTLSProfile is a TLS security profile based on:
+// https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility
+type OldTLSProfile struct{}
+
+// IntermediateTLSProfile is a TLS security profile based on:
+// https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28default.29
+type IntermediateTLSProfile struct{}
+
+// ModernTLSProfile is a TLS security profile based on:
+// https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility
+type ModernTLSProfile struct{}
+
+// CustomTLSProfile is a user-defined TLS security profile. Be extremely careful
+// using a custom TLS profile as invalid configurations can be catastrophic.
+type CustomTLSProfile struct {
+ TLSProfileSpec `json:",inline"`
+}
+
+// TLSProfileType defines a TLS security profile type.
+type TLSProfileType string
+
+const (
+ // Old is a TLS security profile based on:
+ // https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility
+ TLSProfileOldType TLSProfileType = "Old"
+ // Intermediate is a TLS security profile based on:
+ // https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28default.29
+ TLSProfileIntermediateType TLSProfileType = "Intermediate"
+ // Modern is a TLS security profile based on:
+ // https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility
+ TLSProfileModernType TLSProfileType = "Modern"
+ // Custom is a TLS security profile that allows for user-defined parameters.
+ TLSProfileCustomType TLSProfileType = "Custom"
+)
+
+// TLSProfileSpec is the desired behavior of a TLSSecurityProfile.
+type TLSProfileSpec struct {
+ // ciphers is used to specify the cipher algorithms that are negotiated
+ // during the TLS handshake. Operators may remove entries their operands
+ // do not support. For example, to use DES-CBC3-SHA (yaml):
+ //
+ // ciphers:
+ // - DES-CBC3-SHA
+ //
+ Ciphers []string `json:"ciphers"`
+ // minTLSVersion is used to specify the minimal version of the TLS protocol
+ // that is negotiated during the TLS handshake. For example, to use TLS
+ // versions 1.1, 1.2 and 1.3 (yaml):
+ //
+ // minTLSVersion: TLSv1.1
+ //
+ // NOTE: currently the highest minTLSVersion allowed is VersionTLS12
+ //
+ MinTLSVersion TLSProtocolVersion `json:"minTLSVersion"`
+}
+
+// TLSProtocolVersion is a way to specify the protocol version used for TLS connections.
+// Protocol versions are based on the following most common TLS configurations:
+//
+// https://ssl-config.mozilla.org/
+//
+// Note that SSLv3.0 is not a supported protocol version due to well known
+// vulnerabilities such as POODLE: https://en.wikipedia.org/wiki/POODLE
+type TLSProtocolVersion string
+
+const (
+ // VersionTLSv10 is version 1.0 of the TLS security protocol.
+ VersionTLS10 TLSProtocolVersion = "VersionTLS10"
+ // VersionTLSv11 is version 1.1 of the TLS security protocol.
+ VersionTLS11 TLSProtocolVersion = "VersionTLS11"
+ // VersionTLSv12 is version 1.2 of the TLS security protocol.
+ VersionTLS12 TLSProtocolVersion = "VersionTLS12"
+ // VersionTLSv13 is version 1.3 of the TLS security protocol.
+ VersionTLS13 TLSProtocolVersion = "VersionTLS13"
+)
+
+// TLSProfiles Contains a map of TLSProfileType names to TLSProfileSpec.
+//
+// NOTE: The caller needs to make sure to check that these constants are valid for their binary. Not all
+// entries map to values for all binaries. In the case of ties, the kube-apiserver wins. Do not fail,
+// just be sure to whitelist only and everything will be ok.
+var TLSProfiles = map[TLSProfileType]*TLSProfileSpec{
+ TLSProfileOldType: {
+ Ciphers: []string{
+ "TLS_AES_128_GCM_SHA256",
+ "TLS_AES_256_GCM_SHA384",
+ "TLS_CHACHA20_POLY1305_SHA256",
+ "ECDHE-ECDSA-AES128-GCM-SHA256",
+ "ECDHE-RSA-AES128-GCM-SHA256",
+ "ECDHE-ECDSA-AES256-GCM-SHA384",
+ "ECDHE-RSA-AES256-GCM-SHA384",
+ "ECDHE-ECDSA-CHACHA20-POLY1305",
+ "ECDHE-RSA-CHACHA20-POLY1305",
+ "DHE-RSA-AES128-GCM-SHA256",
+ "DHE-RSA-AES256-GCM-SHA384",
+ "DHE-RSA-CHACHA20-POLY1305",
+ "ECDHE-ECDSA-AES128-SHA256",
+ "ECDHE-RSA-AES128-SHA256",
+ "ECDHE-ECDSA-AES128-SHA",
+ "ECDHE-RSA-AES128-SHA",
+ "ECDHE-ECDSA-AES256-SHA384",
+ "ECDHE-RSA-AES256-SHA384",
+ "ECDHE-ECDSA-AES256-SHA",
+ "ECDHE-RSA-AES256-SHA",
+ "DHE-RSA-AES128-SHA256",
+ "DHE-RSA-AES256-SHA256",
+ "AES128-GCM-SHA256",
+ "AES256-GCM-SHA384",
+ "AES128-SHA256",
+ "AES256-SHA256",
+ "AES128-SHA",
+ "AES256-SHA",
+ "DES-CBC3-SHA",
+ },
+ MinTLSVersion: VersionTLS10,
+ },
+ TLSProfileIntermediateType: {
+ Ciphers: []string{
+ "TLS_AES_128_GCM_SHA256",
+ "TLS_AES_256_GCM_SHA384",
+ "TLS_CHACHA20_POLY1305_SHA256",
+ "ECDHE-ECDSA-AES128-GCM-SHA256",
+ "ECDHE-RSA-AES128-GCM-SHA256",
+ "ECDHE-ECDSA-AES256-GCM-SHA384",
+ "ECDHE-RSA-AES256-GCM-SHA384",
+ "ECDHE-ECDSA-CHACHA20-POLY1305",
+ "ECDHE-RSA-CHACHA20-POLY1305",
+ "DHE-RSA-AES128-GCM-SHA256",
+ "DHE-RSA-AES256-GCM-SHA384",
+ },
+ MinTLSVersion: VersionTLS12,
+ },
+ TLSProfileModernType: {
+ Ciphers: []string{
+ "TLS_AES_128_GCM_SHA256",
+ "TLS_AES_256_GCM_SHA384",
+ "TLS_CHACHA20_POLY1305_SHA256",
+ },
+ MinTLSVersion: VersionTLS13,
+ },
+}
diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go
index 3d44627f9..37888a939 100644
--- a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go
+++ b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go
@@ -38,10 +38,26 @@ func (in *APIServer) DeepCopyObject() runtime.Object {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *APIServerEncryption) DeepCopyInto(out *APIServerEncryption) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerEncryption.
+func (in *APIServerEncryption) DeepCopy() *APIServerEncryption {
+ if in == nil {
+ return nil
+ }
+ out := new(APIServerEncryption)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *APIServerList) DeepCopyInto(out *APIServerList) {
*out = *in
out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]APIServer, len(*in))
@@ -125,6 +141,12 @@ func (in *APIServerSpec) DeepCopyInto(out *APIServerSpec) {
*out = make([]string, len(*in))
copy(*out, *in)
}
+ out.Encryption = in.Encryption
+ if in.TLSSecurityProfile != nil {
+ in, out := &in.TLSSecurityProfile, &out.TLSSecurityProfile
+ *out = new(TLSSecurityProfile)
+ (*in).DeepCopyInto(*out)
+ }
return
}
@@ -269,7 +291,7 @@ func (in *Authentication) DeepCopyObject() runtime.Object {
func (in *AuthenticationList) DeepCopyInto(out *AuthenticationList) {
*out = *in
out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Authentication, len(*in))
@@ -456,7 +478,7 @@ func (in *BuildDefaults) DeepCopy() *BuildDefaults {
func (in *BuildList) DeepCopyInto(out *BuildList) {
*out = *in
out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Build, len(*in))
@@ -619,7 +641,7 @@ func (in *ClusterOperator) DeepCopyObject() runtime.Object {
func (in *ClusterOperatorList) DeepCopyInto(out *ClusterOperatorList) {
*out = *in
out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ClusterOperator, len(*in))
@@ -747,7 +769,7 @@ func (in *ClusterVersion) DeepCopyObject() runtime.Object {
func (in *ClusterVersionList) DeepCopyInto(out *ClusterVersionList) {
*out = *in
out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ClusterVersion, len(*in))
@@ -934,7 +956,7 @@ func (in *ConsoleAuthentication) DeepCopy() *ConsoleAuthentication {
func (in *ConsoleList) DeepCopyInto(out *ConsoleList) {
*out = *in
out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Console, len(*in))
@@ -1023,6 +1045,23 @@ func (in *CustomFeatureGates) DeepCopy() *CustomFeatureGates {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CustomTLSProfile) DeepCopyInto(out *CustomTLSProfile) {
+ *out = *in
+ in.TLSProfileSpec.DeepCopyInto(&out.TLSProfileSpec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomTLSProfile.
+func (in *CustomTLSProfile) DeepCopy() *CustomTLSProfile {
+ if in == nil {
+ return nil
+ }
+ out := new(CustomTLSProfile)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DNS) DeepCopyInto(out *DNS) {
*out = *in
out.TypeMeta = in.TypeMeta
@@ -1054,7 +1093,7 @@ func (in *DNS) DeepCopyObject() runtime.Object {
func (in *DNSList) DeepCopyInto(out *DNSList) {
*out = *in
out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]DNS, len(*in))
@@ -1329,7 +1368,7 @@ func (in *FeatureGateEnabledDisabled) DeepCopy() *FeatureGateEnabledDisabled {
func (in *FeatureGateList) DeepCopyInto(out *FeatureGateList) {
*out = *in
out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]FeatureGate, len(*in))
@@ -1730,7 +1769,7 @@ func (in *ImageLabel) DeepCopy() *ImageLabel {
func (in *ImageList) DeepCopyInto(out *ImageList) {
*out = *in
out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Image, len(*in))
@@ -1840,7 +1879,7 @@ func (in *Infrastructure) DeepCopyObject() runtime.Object {
func (in *InfrastructureList) DeepCopyInto(out *InfrastructureList) {
*out = *in
out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Infrastructure, len(*in))
@@ -1939,7 +1978,7 @@ func (in *Ingress) DeepCopyObject() runtime.Object {
func (in *IngressList) DeepCopyInto(out *IngressList) {
*out = *in
out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Ingress, len(*in))
@@ -2001,6 +2040,22 @@ func (in *IngressStatus) DeepCopy() *IngressStatus {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IntermediateTLSProfile) DeepCopyInto(out *IntermediateTLSProfile) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntermediateTLSProfile.
+func (in *IntermediateTLSProfile) DeepCopy() *IntermediateTLSProfile {
+ if in == nil {
+ return nil
+ }
+ out := new(IntermediateTLSProfile)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KeystoneIdentityProvider) DeepCopyInto(out *KeystoneIdentityProvider) {
*out = *in
out.OAuthRemoteConnectionInfo = in.OAuthRemoteConnectionInfo
@@ -2109,6 +2164,22 @@ func (in *LeaderElection) DeepCopy() *LeaderElection {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ModernTLSProfile) DeepCopyInto(out *ModernTLSProfile) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModernTLSProfile.
+func (in *ModernTLSProfile) DeepCopy() *ModernTLSProfile {
+ if in == nil {
+ return nil
+ }
+ out := new(ModernTLSProfile)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NamedCertificate) DeepCopyInto(out *NamedCertificate) {
*out = *in
if in.Names != nil {
@@ -2162,7 +2233,7 @@ func (in *Network) DeepCopyObject() runtime.Object {
func (in *NetworkList) DeepCopyInto(out *NetworkList) {
*out = *in
out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Network, len(*in))
@@ -2280,7 +2351,7 @@ func (in *OAuth) DeepCopyObject() runtime.Object {
func (in *OAuthList) DeepCopyInto(out *OAuthList) {
*out = *in
out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]OAuth, len(*in))
@@ -2405,6 +2476,22 @@ func (in *ObjectReference) DeepCopy() *ObjectReference {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OldTLSProfile) DeepCopyInto(out *OldTLSProfile) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OldTLSProfile.
+func (in *OldTLSProfile) DeepCopy() *OldTLSProfile {
+ if in == nil {
+ return nil
+ }
+ out := new(OldTLSProfile)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OpenIDClaims) DeepCopyInto(out *OpenIDClaims) {
*out = *in
if in.PreferredUsername != nil {
@@ -2530,7 +2617,7 @@ func (in *OperatorHub) DeepCopyObject() runtime.Object {
func (in *OperatorHubList) DeepCopyInto(out *OperatorHubList) {
*out = *in
out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]OperatorHub, len(*in))
@@ -2602,6 +2689,22 @@ func (in *OperatorHubStatus) DeepCopy() *OperatorHubStatus {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OvirtPlatformStatus) DeepCopyInto(out *OvirtPlatformStatus) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OvirtPlatformStatus.
+func (in *OvirtPlatformStatus) DeepCopy() *OvirtPlatformStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(OvirtPlatformStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PlatformStatus) DeepCopyInto(out *PlatformStatus) {
*out = *in
if in.AWS != nil {
@@ -2629,6 +2732,11 @@ func (in *PlatformStatus) DeepCopyInto(out *PlatformStatus) {
*out = new(OpenStackPlatformStatus)
**out = **in
}
+ if in.Ovirt != nil {
+ in, out := &in.Ovirt, &out.Ovirt
+ *out = new(OvirtPlatformStatus)
+ **out = **in
+ }
return
}
@@ -2674,7 +2782,7 @@ func (in *Project) DeepCopyObject() runtime.Object {
func (in *ProjectList) DeepCopyInto(out *ProjectList) {
*out = *in
out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Project, len(*in))
@@ -2768,7 +2876,7 @@ func (in *Proxy) DeepCopyObject() runtime.Object {
func (in *ProxyList) DeepCopyInto(out *ProxyList) {
*out = *in
out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Proxy, len(*in))
@@ -2973,7 +3081,7 @@ func (in *Scheduler) DeepCopyObject() runtime.Object {
func (in *SchedulerList) DeepCopyInto(out *SchedulerList) {
*out = *in
out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Scheduler, len(*in))
@@ -3114,6 +3222,63 @@ func (in *StringSourceSpec) DeepCopy() *StringSourceSpec {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TLSProfileSpec) DeepCopyInto(out *TLSProfileSpec) {
+ *out = *in
+ if in.Ciphers != nil {
+ in, out := &in.Ciphers, &out.Ciphers
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSProfileSpec.
+func (in *TLSProfileSpec) DeepCopy() *TLSProfileSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(TLSProfileSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TLSSecurityProfile) DeepCopyInto(out *TLSSecurityProfile) {
+ *out = *in
+ if in.Old != nil {
+ in, out := &in.Old, &out.Old
+ *out = new(OldTLSProfile)
+ **out = **in
+ }
+ if in.Intermediate != nil {
+ in, out := &in.Intermediate, &out.Intermediate
+ *out = new(IntermediateTLSProfile)
+ **out = **in
+ }
+ if in.Modern != nil {
+ in, out := &in.Modern, &out.Modern
+ *out = new(ModernTLSProfile)
+ **out = **in
+ }
+ if in.Custom != nil {
+ in, out := &in.Custom, &out.Custom
+ *out = new(CustomTLSProfile)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSSecurityProfile.
+func (in *TLSSecurityProfile) DeepCopy() *TLSSecurityProfile {
+ if in == nil {
+ return nil
+ }
+ out := new(TLSSecurityProfile)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TemplateReference) DeepCopyInto(out *TemplateReference) {
*out = *in
return
diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go
index 6bd877a4f..2d6b19d2d 100644
--- a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go
+++ b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go
@@ -251,6 +251,14 @@ func (APIServer) SwaggerDoc() map[string]string {
return map_APIServer
}
+var map_APIServerEncryption = map[string]string{
+ "type": "type defines what encryption type should be used to encrypt resources at the datastore layer. When this field is unset (i.e. when it is set to the empty string), identity is implied. The behavior of unset can and will change over time. Even if encryption is enabled by default, the meaning of unset may change to a different encryption type based on changes in best practices.\n\nWhen encryption is enabled, all sensitive resources shipped with the platform are encrypted. This list of sensitive resources can and will change over time. The current authoritative list is:\n\n 1. secrets\n 2. configmaps\n 3. routes.route.openshift.io\n 4. oauthaccesstokens.oauth.openshift.io\n 5. oauthauthorizetokens.oauth.openshift.io",
+}
+
+func (APIServerEncryption) SwaggerDoc() map[string]string {
+ return map_APIServerEncryption
+}
+
var map_APIServerNamedServingCert = map[string]string{
"": "APIServerNamedServingCert maps a server DNS name, as understood by a client, to a certificate.",
"names": "names is a optional list of explicit DNS names (leading wildcards allowed) that should use this certificate to serve secure traffic. If no names are provided, the implicit names will be extracted from the certificates. Exact names trump over wildcard names. Explicit names defined here trump over extracted implicit names.",
@@ -273,6 +281,8 @@ var map_APIServerSpec = map[string]string{
"servingCerts": "servingCert is the TLS cert info for serving secure traffic. If not specified, operator managed certificates will be used for serving secure traffic.",
"clientCA": "clientCA references a ConfigMap containing a certificate bundle for the signers that will be recognized for incoming client certificates in addition to the operator managed signers. If this is empty, then only operator managed signers are valid. You usually only have to set this if you have your own PKI you wish to honor client certificates from. The ConfigMap must exist in the openshift-config namespace and contain the following required fields: - ConfigMap.Data[\"ca-bundle.crt\"] - CA bundle.",
"additionalCORSAllowedOrigins": "additionalCORSAllowedOrigins lists additional, user-defined regular expressions describing hosts for which the API server allows access using the CORS headers. This may be needed to access the API and the integrated OAuth server from JavaScript applications. The values are regular expressions that correspond to the Golang regular expression language.",
+ "encryption": "encryption allows the configuration of encryption of resources at the datastore layer.",
+ "tlsSecurityProfile": "tlsSecurityProfile specifies settings for TLS connections for externally exposed servers.\n\nIf unset, a default (which may change between releases) is chosen. Note that only Old and Intermediate profiles are currently supported, and the maximum available MinTLSVersions is VersionTLS12.",
}
func (APIServerSpec) SwaggerDoc() map[string]string {
@@ -280,24 +290,15 @@ func (APIServerSpec) SwaggerDoc() map[string]string {
}
var map_Authentication = map[string]string{
- "": "Authentication specifies cluster-wide settings for authentication (like OAuth and webhook token authenticators). The canonical name of an instance is `cluster`.",
- "metadata": "Standard object's metadata.",
- "spec": "spec holds user settable values for configuration",
- "status": "status holds observed values from the cluster. They may not be overridden.",
+ "": "Authentication specifies cluster-wide settings for authentication (like OAuth and webhook token authenticators). The canonical name of an instance is `cluster`.",
+ "spec": "spec holds user settable values for configuration",
+ "status": "status holds observed values from the cluster. They may not be overridden.",
}
func (Authentication) SwaggerDoc() map[string]string {
return map_Authentication
}
-var map_AuthenticationList = map[string]string{
- "metadata": "Standard object's metadata.",
-}
-
-func (AuthenticationList) SwaggerDoc() map[string]string {
- return map_AuthenticationList
-}
-
var map_AuthenticationSpec = map[string]string{
"type": "type identifies the cluster managed, user facing authentication mode in use. Specifically, it manages the component that responds to login attempts. The default is IntegratedOAuth.",
"oauthMetadata": "oauthMetadata contains the discovery endpoint data for OAuth 2.0 Authorization Server Metadata for an external OAuth server. This discovery document can be viewed from its served location: oc get --raw '/.well-known/oauth-authorization-server' For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 If oauthMetadata.name is non-empty, this value has precedence over any metadata reference stored in status. The key \"oauthMetadata\" is used to locate the data. If specified and the config map or expected key is not found, no metadata is served. If the specified metadata is not valid, no metadata is served. The namespace for this config map is openshift-config.",
@@ -326,7 +327,7 @@ func (WebhookTokenAuthenticator) SwaggerDoc() map[string]string {
}
var map_Build = map[string]string{
- "": "Build holds cluster-wide information on how to handle builds. The canonical name is `cluster`",
+ "": "Build configures the behavior of OpenShift builds for the entire cluster. This includes default settings that can be overridden in BuildConfig objects, and overrides which are applied to all builds.\n\nThe canonical name is \"cluster\"",
"spec": "Spec holds user-settable values for the build controller configuration",
}
@@ -346,14 +347,6 @@ func (BuildDefaults) SwaggerDoc() map[string]string {
return map_BuildDefaults
}
-var map_BuildList = map[string]string{
- "metadata": "Standard object's metadata.",
-}
-
-func (BuildList) SwaggerDoc() map[string]string {
- return map_BuildList
-}
-
var map_BuildOverrides = map[string]string{
"imageLabels": "ImageLabels is a list of docker labels that are applied to the resulting image. If user provided a label in their Build/BuildConfig with the same name as one in this list, the user's label will be overwritten.",
"nodeSelector": "NodeSelector is a selector which must be true for the build pod to fit on a node",
@@ -365,7 +358,7 @@ func (BuildOverrides) SwaggerDoc() map[string]string {
}
var map_BuildSpec = map[string]string{
- "additionalTrustedCA": "AdditionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted for image pushes and pulls during builds. The namespace for this config map is openshift-config.",
+ "additionalTrustedCA": "AdditionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted for image pushes and pulls during builds. The namespace for this config map is openshift-config.\n\nDEPRECATED: Additional CAs for image pull and push should be set on image.config.openshift.io/cluster instead.",
"buildDefaults": "BuildDefaults controls the default information for Builds",
"buildOverrides": "BuildOverrides controls override settings for builds",
}
@@ -385,8 +378,8 @@ func (ImageLabel) SwaggerDoc() map[string]string {
var map_ClusterOperator = map[string]string{
"": "ClusterOperator is the Custom Resource object which holds the current state of an operator. This object is used by operators to convey their state to the rest of the cluster.",
- "spec": "spec hold the intent of how this operator should behave.",
- "status": "status holds the information about the state of an operator. It is consistent with status information across the kube ecosystem.",
+ "spec": "spec holds configuration that could apply to any operator.",
+ "status": "status holds the information about the state of an operator. It is consistent with status information across the Kubernetes ecosystem.",
}
func (ClusterOperator) SwaggerDoc() map[string]string {
@@ -411,8 +404,8 @@ func (ClusterOperatorSpec) SwaggerDoc() map[string]string {
var map_ClusterOperatorStatus = map[string]string{
"": "ClusterOperatorStatus provides information about the status of the operator.",
- "conditions": "conditions describes the state of the operator's reconciliation functionality.",
- "versions": "versions is a slice of operand version tuples. Operators which manage multiple operands will have multiple entries in the array. If an operator is Available, it must have at least one entry. You must report the version of the operator itself with the name \"operator\".",
+ "conditions": "conditions describes the state of the operator's managed and monitored components.",
+ "versions": "versions is a slice of operator and operand version tuples. Operators which manage multiple operands will have multiple operand entries in the array. Available operators must report the version of the operator itself with the name \"operator\". An operator reports a new \"operator\" version when it has rolled out the new version to all of its operands.",
"relatedObjects": "relatedObjects is a list of objects that are \"interesting\" or related to this operator. Common uses are: 1. the detailed resource driving the operator 2. operator namespaces 3. operand namespaces",
"extension": "extension contains any additional status information specific to the operator which owns this status object.",
}
@@ -422,11 +415,11 @@ func (ClusterOperatorStatus) SwaggerDoc() map[string]string {
}
var map_ClusterOperatorStatusCondition = map[string]string{
- "": "ClusterOperatorStatusCondition represents the state of the operator's reconciliation functionality.",
- "type": "type specifies the state of the operator's reconciliation functionality.",
+ "": "ClusterOperatorStatusCondition represents the state of the operator's managed and monitored components.",
+ "type": "type specifies the aspect reported by this condition.",
"status": "status of the condition, one of True, False, Unknown.",
- "lastTransitionTime": "lastTransitionTime is the time of the last update to the current status object.",
- "reason": "reason is the reason for the condition's last transition. Reasons are CamelCase",
+ "lastTransitionTime": "lastTransitionTime is the time of the last update to the current status property.",
+ "reason": "reason is the CamelCase reason for the condition's current status.",
"message": "message provides additional information about the current condition. This is only to be consumed by humans.",
}
@@ -448,7 +441,7 @@ func (ObjectReference) SwaggerDoc() map[string]string {
var map_OperandVersion = map[string]string{
"name": "name is the name of the particular operand this version is for. It usually matches container images, not operators.",
- "version": "version indicates which version of a particular operand is currently being manage. It must always match the Available condition. If 1.0.0 is Available, then this must indicate 1.0.0 even if the operator is trying to rollout 1.1.0",
+ "version": "version indicates which version of a particular operand is currently being managed. It must always match the Available operand. If 1.0.0 is Available, then this must indicate 1.0.0 even if the operator is trying to rollout 1.1.0",
}
func (OperandVersion) SwaggerDoc() map[string]string {
@@ -490,7 +483,7 @@ var map_ClusterVersionStatus = map[string]string{
"": "ClusterVersionStatus reports the status of the cluster versioning, including any upgrades that are in progress. The current field will be set to whichever version the cluster is reconciling to, and the conditions array will report whether the update succeeded, is in progress, or is failing.",
"desired": "desired is the version that the cluster is reconciling towards. If the cluster is not yet fully initialized desired will be set with the information available, which may be an image or a tag.",
"history": "history contains a list of the most recent versions applied to the cluster. This value may be empty during cluster startup, and then will be updated when a new update is being applied. The newest update is first in the list and it is ordered by recency. Updates in the history have state Completed if the rollout completed - if an update was failing or halfway applied the state will be Partial. Only a limited amount of update history is preserved.",
- "observedGeneration": "observedGeneration reports which version of the spec is being synced. If this value is not equal to metadata.generation, then the desired and conditions fields may represent from a previous version.",
+ "observedGeneration": "observedGeneration reports which version of the spec is being synced. If this value is not equal to metadata.generation, then the desired and conditions fields may represent a previous version.",
"versionHash": "versionHash is a fingerprint of the content that the cluster will be updated with. It is used by the operator to avoid unnecessary work and is for internal use only.",
"conditions": "conditions provides information about the cluster version. The condition \"Available\" is set to true if the desiredUpdate has been reached. The condition \"Progressing\" is set to true if an update is being applied. The condition \"Degraded\" is set to true if an update is currently blocked by a temporary or permanent error. Conditions are only valid for the current desiredUpdate when metadata.generation is equal to status.generation.",
"availableUpdates": "availableUpdates contains the list of updates that are appropriate for this cluster. This list may be empty if no updates are recommended, if the update service is unavailable, or if an invalid channel has been specified.",
@@ -539,10 +532,9 @@ func (UpdateHistory) SwaggerDoc() map[string]string {
}
var map_Console = map[string]string{
- "": "Console holds cluster-wide configuration for the web console, including the logout URL, and reports the public URL of the console. The canonical name is `cluster`.",
- "metadata": "Standard object's metadata.",
- "spec": "spec holds user settable values for configuration",
- "status": "status holds observed values from the cluster. They may not be overridden.",
+ "": "Console holds cluster-wide configuration for the web console, including the logout URL, and reports the public URL of the console. The canonical name is `cluster`.",
+ "spec": "spec holds user settable values for configuration",
+ "status": "status holds observed values from the cluster. They may not be overridden.",
}
func (Console) SwaggerDoc() map[string]string {
@@ -558,14 +550,6 @@ func (ConsoleAuthentication) SwaggerDoc() map[string]string {
return map_ConsoleAuthentication
}
-var map_ConsoleList = map[string]string{
- "metadata": "Standard object's metadata.",
-}
-
-func (ConsoleList) SwaggerDoc() map[string]string {
- return map_ConsoleList
-}
-
var map_ConsoleSpec = map[string]string{
"": "ConsoleSpec is the specification of the desired behavior of the Console.",
}
@@ -584,24 +568,15 @@ func (ConsoleStatus) SwaggerDoc() map[string]string {
}
var map_DNS = map[string]string{
- "": "DNS holds cluster-wide information about DNS. The canonical name is `cluster`",
- "metadata": "Standard object's metadata.",
- "spec": "spec holds user settable values for configuration",
- "status": "status holds observed values from the cluster. They may not be overridden.",
+ "": "DNS holds cluster-wide information about DNS. The canonical name is `cluster`",
+ "spec": "spec holds user settable values for configuration",
+ "status": "status holds observed values from the cluster. They may not be overridden.",
}
func (DNS) SwaggerDoc() map[string]string {
return map_DNS
}
-var map_DNSList = map[string]string{
- "metadata": "Standard object's metadata.",
-}
-
-func (DNSList) SwaggerDoc() map[string]string {
- return map_DNSList
-}
-
var map_DNSSpec = map[string]string{
"baseDomain": "baseDomain is the base domain of the cluster. All managed DNS records will be sub-domains of this base.\n\nFor example, given the base domain `openshift.example.com`, an API server DNS record may be created for `cluster-api.openshift.example.com`.\n\nOnce set, this field cannot be changed.",
"publicZone": "publicZone is the location where all the DNS records that are publicly accessible to the internet exist.\n\nIf this field is nil, no public records should be created.\n\nOnce set, this field cannot be changed.",
@@ -632,24 +607,15 @@ func (CustomFeatureGates) SwaggerDoc() map[string]string {
}
var map_FeatureGate = map[string]string{
- "": "Feature holds cluster-wide information about feature gates. The canonical name is `cluster`",
- "metadata": "Standard object's metadata.",
- "spec": "spec holds user settable values for configuration",
- "status": "status holds observed values from the cluster. They may not be overridden.",
+ "": "Feature holds cluster-wide information about feature gates. The canonical name is `cluster`",
+ "spec": "spec holds user settable values for configuration",
+ "status": "status holds observed values from the cluster. They may not be overridden.",
}
func (FeatureGate) SwaggerDoc() map[string]string {
return map_FeatureGate
}
-var map_FeatureGateList = map[string]string{
- "metadata": "Standard object's metadata.",
-}
-
-func (FeatureGateList) SwaggerDoc() map[string]string {
- return map_FeatureGateList
-}
-
var map_FeatureGateSelection = map[string]string{
"featureSet": "featureSet changes the list of features in the cluster. The default is empty. Be very careful adjusting this setting. Turning on or off features may cause irreversible changes in your cluster which cannot be undone.",
"customNoUpgrade": "customNoUpgrade allows the enabling or disabling of any feature. Turning this feature set on IS NOT SUPPORTED, CANNOT BE UNDONE, and PREVENTS UPGRADES. Because of its nature, this setting cannot be validated. If you have any typos or accidentally apply invalid combinations your cluster may fail in an unrecoverable way. featureSet must equal \"CustomNoUpgrade\" must be set to use this field.",
@@ -660,29 +626,20 @@ func (FeatureGateSelection) SwaggerDoc() map[string]string {
}
var map_Image = map[string]string{
- "": "Image holds cluster-wide information about how to handle images. The canonical name is `cluster`",
- "metadata": "Standard object's metadata.",
- "spec": "spec holds user settable values for configuration",
- "status": "status holds observed values from the cluster. They may not be overridden.",
+ "": "Image governs policies related to imagestream imports and runtime configuration for external registries. It allows cluster admins to configure which registries OpenShift is allowed to import images from, extra CA trust bundles for external registries, and policies to blacklist/whitelist registry hostnames. When exposing OpenShift's image registry to the public, this also lets cluster admins specify the external hostname.",
+ "spec": "spec holds user settable values for configuration",
+ "status": "status holds observed values from the cluster. They may not be overridden.",
}
func (Image) SwaggerDoc() map[string]string {
return map_Image
}
-var map_ImageList = map[string]string{
- "metadata": "Standard object's metadata.",
-}
-
-func (ImageList) SwaggerDoc() map[string]string {
- return map_ImageList
-}
-
var map_ImageSpec = map[string]string{
- "allowedRegistriesForImport": "AllowedRegistriesForImport limits the container image registries that normal users may import images from. Set this list to the registries that you trust to contain valid Docker images and that you want applications to be able to import from. Users with permission to create Images or ImageStreamMappings via the API are not affected by this policy - typically only administrators or system integrations will have those permissions.",
+ "allowedRegistriesForImport": "allowedRegistriesForImport limits the container image registries that normal users may import images from. Set this list to the registries that you trust to contain valid Docker images and that you want applications to be able to import from. Users with permission to create Images or ImageStreamMappings via the API are not affected by this policy - typically only administrators or system integrations will have those permissions.",
"externalRegistryHostnames": "externalRegistryHostnames provides the hostnames for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The first value is used in 'publicDockerImageRepository' field in ImageStreams. The value must be in \"hostname[:port]\" format.",
- "additionalTrustedCA": "AdditionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted during imagestream import, pod image pull, and imageregistry pullthrough. The namespace for this config map is openshift-config.",
- "registrySources": "RegistrySources contains configuration that determines how the container runtime should treat individual registries when accessing images for builds+pods. (e.g. whether or not to allow insecure access). It does not contain configuration for the internal cluster registry.",
+ "additionalTrustedCA": "additionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted during imagestream import, pod image pull, build image pull, and imageregistry pullthrough. The namespace for this config map is openshift-config.",
+ "registrySources": "registrySources contains configuration that determines how the container runtime should treat individual registries when accessing images for builds+pods. (e.g. whether or not to allow insecure access). It does not contain configuration for the internal cluster registry.",
}
func (ImageSpec) SwaggerDoc() map[string]string {
@@ -690,7 +647,7 @@ func (ImageSpec) SwaggerDoc() map[string]string {
}
var map_ImageStatus = map[string]string{
- "internalRegistryHostname": "this value is set by the image registry operator which controls the internal registry hostname InternalRegistryHostname sets the hostname for the default internal image registry. The value must be in \"hostname[:port]\" format. For backward compatibility, users can still use OPENSHIFT_DEFAULT_REGISTRY environment variable but this setting overrides the environment variable.",
+ "internalRegistryHostname": "internalRegistryHostname sets the hostname for the default internal image registry. The value must be in \"hostname[:port]\" format. This value is set by the image registry operator which controls the internal registry hostname. For backward compatibility, users can still use OPENSHIFT_DEFAULT_REGISTRY environment variable but this setting overrides the environment variable.",
"externalRegistryHostnames": "externalRegistryHostnames provides the hostnames for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The first value is used in 'publicDockerImageRepository' field in ImageStreams. The value must be in \"hostname[:port]\" format.",
}
@@ -700,8 +657,8 @@ func (ImageStatus) SwaggerDoc() map[string]string {
var map_RegistryLocation = map[string]string{
"": "RegistryLocation contains a location of the registry specified by the registry domain name. The domain name might include wildcards, like '*' or '??'.",
- "domainName": "DomainName specifies a domain name for the registry In case the registry use non-standard (80 or 443) port, the port should be included in the domain name as well.",
- "insecure": "Insecure indicates whether the registry is secure (https) or insecure (http) By default (if not specified) the registry is assumed as secure.",
+ "domainName": "domainName specifies a domain name for the registry In case the registry use non-standard (80 or 443) port, the port should be included in the domain name as well.",
+ "insecure": "insecure indicates whether the registry is secure (https) or insecure (http) By default (if not specified) the registry is assumed as secure.",
}
func (RegistryLocation) SwaggerDoc() map[string]string {
@@ -710,9 +667,9 @@ func (RegistryLocation) SwaggerDoc() map[string]string {
var map_RegistrySources = map[string]string{
"": "RegistrySources holds cluster-wide information about how to handle the registries config.",
- "insecureRegistries": "InsecureRegistries are registries which do not have a valid TLS certificates or only support HTTP connections.",
- "blockedRegistries": "BlockedRegistries are blacklisted from image pull/push. All other registries are allowed.\n\nOnly one of BlockedRegistries or AllowedRegistries may be set.",
- "allowedRegistries": "AllowedRegistries are whitelisted for image pull/push. All other registries are blocked.\n\nOnly one of BlockedRegistries or AllowedRegistries may be set.",
+ "insecureRegistries": "insecureRegistries are registries which do not have a valid TLS certificates or only support HTTP connections.",
+ "blockedRegistries": "blockedRegistries are blacklisted from image pull/push. All other registries are allowed.\n\nOnly one of BlockedRegistries or AllowedRegistries may be set.",
+ "allowedRegistries": "allowedRegistries are whitelisted for image pull/push. All other registries are blocked.\n\nOnly one of BlockedRegistries or AllowedRegistries may be set.",
}
func (RegistrySources) SwaggerDoc() map[string]string {
@@ -729,8 +686,9 @@ func (AWSPlatformStatus) SwaggerDoc() map[string]string {
}
var map_AzurePlatformStatus = map[string]string{
- "": "AzurePlatformStatus holds the current status of the Azure infrastructure provider.",
- "resourceGroupName": "resourceGroupName is the Resource Group for new Azure resources created for the cluster.",
+ "": "AzurePlatformStatus holds the current status of the Azure infrastructure provider.",
+ "resourceGroupName": "resourceGroupName is the Resource Group for new Azure resources created for the cluster.",
+ "networkResourceGroupName": "networkResourceGroupName is the Resource Group for network resources like the Virtual Network and Subnets used by the cluster. If empty, the value is same as ResourceGroupName.",
}
func (AzurePlatformStatus) SwaggerDoc() map[string]string {
@@ -738,7 +696,7 @@ func (AzurePlatformStatus) SwaggerDoc() map[string]string {
}
var map_BareMetalPlatformStatus = map[string]string{
- "": "BareMetalPlatformStatus holds the current status of the BareMetal infrastructure provider.",
+ "": "BareMetalPlatformStatus holds the current status of the BareMetal infrastructure provider. For more information about the network architecture used with the BareMetal platform type, see: https://github.com/openshift/installer/blob/master/docs/design/baremetal/networking-infrastructure.md",
"apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.",
"ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.",
"nodeDNSIP": "nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for BareMetal deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster.",
@@ -759,10 +717,9 @@ func (GCPPlatformStatus) SwaggerDoc() map[string]string {
}
var map_Infrastructure = map[string]string{
- "": "Infrastructure holds cluster-wide information about Infrastructure. The canonical name is `cluster`",
- "metadata": "Standard object's metadata.",
- "spec": "spec holds user settable values for configuration",
- "status": "status holds observed values from the cluster. They may not be overridden.",
+ "": "Infrastructure holds cluster-wide information about Infrastructure. The canonical name is `cluster`",
+ "spec": "spec holds user settable values for configuration",
+ "status": "status holds observed values from the cluster. They may not be overridden.",
}
func (Infrastructure) SwaggerDoc() map[string]string {
@@ -770,8 +727,7 @@ func (Infrastructure) SwaggerDoc() map[string]string {
}
var map_InfrastructureList = map[string]string{
- "": "InfrastructureList is",
- "metadata": "Standard object's metadata.",
+ "": "InfrastructureList is",
}
func (InfrastructureList) SwaggerDoc() map[string]string {
@@ -813,6 +769,17 @@ func (OpenStackPlatformStatus) SwaggerDoc() map[string]string {
return map_OpenStackPlatformStatus
}
+var map_OvirtPlatformStatus = map[string]string{
+ "": "OvirtPlatformStatus holds the current status of the oVirt infrastructure provider.",
+ "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.",
+ "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.",
+ "nodeDNSIP": "nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for oVirt deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster.",
+}
+
+func (OvirtPlatformStatus) SwaggerDoc() map[string]string {
+ return map_OvirtPlatformStatus
+}
+
var map_PlatformStatus = map[string]string{
"": "PlatformStatus holds the current status specific to the underlying infrastructure provider of the current cluster. Since these are used at status-level for the underlying cluster, it is supposed that only one of the status structs is set.",
"type": "type is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", \"OpenStack\", \"VSphere\", \"oVirt\", and \"None\". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform.",
@@ -821,6 +788,7 @@ var map_PlatformStatus = map[string]string{
"gcp": "GCP contains settings specific to the Google Cloud Platform infrastructure provider.",
"baremetal": "BareMetal contains settings specific to the BareMetal platform.",
"openstack": "OpenStack contains settings specific to the OpenStack infrastructure provider.",
+ "ovirt": "Ovirt contains settings specific to the oVirt infrastructure provider.",
}
func (PlatformStatus) SwaggerDoc() map[string]string {
@@ -828,26 +796,17 @@ func (PlatformStatus) SwaggerDoc() map[string]string {
}
var map_Ingress = map[string]string{
- "": "Ingress holds cluster-wide information about Ingress. The canonical name is `cluster`",
- "metadata": "Standard object's metadata.",
- "spec": "spec holds user settable values for configuration",
- "status": "status holds observed values from the cluster. They may not be overridden.",
+ "": "Ingress holds cluster-wide information about ingress, including the default ingress domain used for routes. The canonical name is `cluster`.",
+ "spec": "spec holds user settable values for configuration",
+ "status": "status holds observed values from the cluster. They may not be overridden.",
}
func (Ingress) SwaggerDoc() map[string]string {
return map_Ingress
}
-var map_IngressList = map[string]string{
- "metadata": "Standard object's metadata.",
-}
-
-func (IngressList) SwaggerDoc() map[string]string {
- return map_IngressList
-}
-
var map_IngressSpec = map[string]string{
- "domain": "domain is used to generate a default host name for a route when the route's host name is empty. The generated host name will follow this pattern: \"<route-name>.<route-namespace>.<domain>\".",
+ "domain": "domain is used to generate a default host name for a route when the route's host name is empty. The generated host name will follow this pattern: \"<route-name>.<route-namespace>.<domain>\".\n\nIt is also used as the default wildcard domain suffix for ingress. The default ingresscontroller domain will follow this pattern: \"*.<domain>\".\n\nOnce set, changing domain is not currently supported.",
}
func (IngressSpec) SwaggerDoc() map[string]string {
@@ -866,7 +825,7 @@ func (ClusterNetworkEntry) SwaggerDoc() map[string]string {
var map_ExternalIPConfig = map[string]string{
"": "ExternalIPConfig specifies some IP blocks relevant for the ExternalIP field of a Service resource.",
- "policy": "policy is a set of restrictions applied to the ExternalIP field. If nil, any value is allowed for an ExternalIP. If the empty/zero policy is supplied, then ExternalIP is not allowed to be set.",
+ "policy": "policy is a set of restrictions applied to the ExternalIP field. If nil or empty, then ExternalIP is not allowed to be set.",
"autoAssignCIDRs": "autoAssignCIDRs is a list of CIDRs from which to automatically assign Service.ExternalIP. These are assigned when the service is of type LoadBalancer. In general, this is only useful for bare-metal clusters. In Openshift 3.x, this was misleadingly called \"IngressIPs\". Automatically assigned External IPs are not affected by any ExternalIPPolicy rules. Currently, only one entry may be provided.",
}
@@ -885,30 +844,21 @@ func (ExternalIPPolicy) SwaggerDoc() map[string]string {
}
var map_Network = map[string]string{
- "": "Network holds cluster-wide information about Network. The canonical name is `cluster`",
- "metadata": "Standard object's metadata.",
- "spec": "spec holds user settable values for configuration.",
- "status": "status holds observed values from the cluster. They may not be overridden.",
+ "": "Network holds cluster-wide information about Network. The canonical name is `cluster`. It is used to configure the desired network configuration, such as: IP address pools for services/pod IPs, network plugin, etc. Please view network.spec for an explanation on what applies when configuring this resource.",
+ "spec": "spec holds user settable values for configuration. As a general rule, this SHOULD NOT be read directly. Instead, you should consume the NetworkStatus, as it indicates the currently deployed configuration. Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each.",
+ "status": "status holds observed values from the cluster. They may not be overridden.",
}
func (Network) SwaggerDoc() map[string]string {
return map_Network
}
-var map_NetworkList = map[string]string{
- "metadata": "Standard object's metadata.",
-}
-
-func (NetworkList) SwaggerDoc() map[string]string {
- return map_NetworkList
-}
-
var map_NetworkSpec = map[string]string{
- "": "NetworkSpec is the desired network configuration. As a general rule, this SHOULD NOT be read directly. Instead, you should consume the NetworkStatus, as it indicates the currently deployed configuration. Currently, changing ClusterNetwork, ServiceNetwork, or NetworkType after installation is not supported.",
- "clusterNetwork": "IP address pool to use for pod IPs.",
- "serviceNetwork": "IP address pool for services. Currently, we only support a single entry here.",
- "networkType": "NetworkType is the plugin that is to be deployed (e.g. OpenShiftSDN). This should match a value that the cluster-network-operator understands, or else no networking will be installed. Currently supported values are: - OpenShiftSDN",
- "externalIP": "externalIP defines configuration for controllers that affect Service.ExternalIP",
+ "": "NetworkSpec is the desired network configuration. As a general rule, this SHOULD NOT be read directly. Instead, you should consume the NetworkStatus, as it indicates the currently deployed configuration. Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each.",
+ "clusterNetwork": "IP address pool to use for pod IPs. This field is immutable after installation.",
+ "serviceNetwork": "IP address pool for services. Currently, we only support a single entry here. This field is immutable after installation.",
+ "networkType": "NetworkType is the plugin that is to be deployed (e.g. OpenShiftSDN). This should match a value that the cluster-network-operator understands, or else no networking will be installed. Currently supported values are: - OpenShiftSDN This field is immutable after installation.",
+ "externalIP": "externalIP defines configuration for controllers that affect Service.ExternalIP. If nil, then ExternalIP is not allowed to be set.",
}
func (NetworkSpec) SwaggerDoc() map[string]string {
@@ -1183,8 +1133,9 @@ func (OperatorHubList) SwaggerDoc() map[string]string {
}
var map_OperatorHubSpec = map[string]string{
- "": "OperatorHubSpec defines the desired state of OperatorHub",
- "sources": "sources is the list of default hub sources and their configuration. If the list is empty, it indicates that the default hub sources are enabled on the cluster. The list of default hub sources and their current state will always be reflected in the status block.",
+ "": "OperatorHubSpec defines the desired state of OperatorHub",
+ "disableAllDefaultSources": "disableAllDefaultSources allows you to disable all the default hub sources. If this is true, a specific entry in sources can be used to enable a default source. If this is false, a specific entry in sources can be used to disable or enable a default source.",
+ "sources": "sources is the list of default hub sources and their configuration. If the list is empty, it implies that the default hub sources are enabled on the cluster unless disableAllDefaultSources is true. If disableAllDefaultSources is true and sources is not empty, the configuration present in sources will take precedence. The list of default hub sources and their current state will always be reflected in the status block.",
}
func (OperatorHubSpec) SwaggerDoc() map[string]string {
@@ -1201,24 +1152,15 @@ func (OperatorHubStatus) SwaggerDoc() map[string]string {
}
var map_Project = map[string]string{
- "": "Project holds cluster-wide information about Project. The canonical name is `cluster`",
- "metadata": "Standard object's metadata.",
- "spec": "spec holds user settable values for configuration",
- "status": "status holds observed values from the cluster. They may not be overridden.",
+ "": "Project holds cluster-wide information about Project. The canonical name is `cluster`",
+ "spec": "spec holds user settable values for configuration",
+ "status": "status holds observed values from the cluster. They may not be overridden.",
}
func (Project) SwaggerDoc() map[string]string {
return map_Project
}
-var map_ProjectList = map[string]string{
- "metadata": "Standard object's metadata.",
-}
-
-func (ProjectList) SwaggerDoc() map[string]string {
- return map_ProjectList
-}
-
var map_ProjectSpec = map[string]string{
"": "ProjectSpec holds the project creation configuration.",
"projectRequestMessage": "projectRequestMessage is the string presented to a user if they are unable to request a project via the projectrequest api endpoint",
@@ -1248,14 +1190,6 @@ func (Proxy) SwaggerDoc() map[string]string {
return map_Proxy
}
-var map_ProxyList = map[string]string{
- "metadata": "Standard object's metadata.",
-}
-
-func (ProxyList) SwaggerDoc() map[string]string {
- return map_ProxyList
-}
-
var map_ProxySpec = map[string]string{
"": "ProxySpec contains cluster proxy creation configuration.",
"httpProxy": "httpProxy is the URL of the proxy for HTTP requests. Empty means unset and will not result in an env var.",
@@ -1281,24 +1215,15 @@ func (ProxyStatus) SwaggerDoc() map[string]string {
}
var map_Scheduler = map[string]string{
- "": "Scheduler holds cluster-wide config information to run the Kubernetes Scheduler and influence its placement decisions. The canonical name for this config is `cluster`.",
- "metadata": "Standard object's metadata.",
- "spec": "spec holds user settable values for configuration",
- "status": "status holds observed values from the cluster. They may not be overridden.",
+ "": "Scheduler holds cluster-wide config information to run the Kubernetes Scheduler and influence its placement decisions. The canonical name for this config is `cluster`.",
+ "spec": "spec holds user settable values for configuration",
+ "status": "status holds observed values from the cluster. They may not be overridden.",
}
func (Scheduler) SwaggerDoc() map[string]string {
return map_Scheduler
}
-var map_SchedulerList = map[string]string{
- "metadata": "Standard object's metadata.",
-}
-
-func (SchedulerList) SwaggerDoc() map[string]string {
- return map_SchedulerList
-}
-
var map_SchedulerSpec = map[string]string{
"policy": "policy is a reference to a ConfigMap containing scheduler policy which has user specified predicates and priorities. If this ConfigMap is not available scheduler will default to use DefaultAlgorithmProvider. The namespace for this configmap is openshift-config.",
"defaultNodeSelector": "defaultNodeSelector helps set the cluster-wide default node selector to restrict pod placement to specific nodes. This is applied to the pods created in all namespaces without a specified nodeSelector value. For example, defaultNodeSelector: \"type=user-node,region=east\" would set nodeSelector field in pod spec to \"type=user-node,region=east\" to all pods created in all namespaces. Namespaces having project-wide node selectors won't be impacted even if this field is set. This adds an annotation section to the namespace. For example, if a new namespace is created with node-selector='type=user-node,region=east', the annotation openshift.io/node-selector: type=user-node,region=east gets added to the project. When the openshift.io/node-selector annotation is set on the project the value is used in preference to the value we are setting for defaultNodeSelector field. For instance, openshift.io/node-selector: \"type=user-node,region=west\" means that the default of \"type=user-node,region=east\" set in defaultNodeSelector would not be applied.",
@@ -1309,4 +1234,59 @@ func (SchedulerSpec) SwaggerDoc() map[string]string {
return map_SchedulerSpec
}
+var map_CustomTLSProfile = map[string]string{
+ "": "CustomTLSProfile is a user-defined TLS security profile. Be extremely careful using a custom TLS profile as invalid configurations can be catastrophic.",
+}
+
+func (CustomTLSProfile) SwaggerDoc() map[string]string {
+ return map_CustomTLSProfile
+}
+
+var map_IntermediateTLSProfile = map[string]string{
+ "": "IntermediateTLSProfile is a TLS security profile based on: https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28default.29",
+}
+
+func (IntermediateTLSProfile) SwaggerDoc() map[string]string {
+ return map_IntermediateTLSProfile
+}
+
+var map_ModernTLSProfile = map[string]string{
+ "": "ModernTLSProfile is a TLS security profile based on: https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility",
+}
+
+func (ModernTLSProfile) SwaggerDoc() map[string]string {
+ return map_ModernTLSProfile
+}
+
+var map_OldTLSProfile = map[string]string{
+ "": "OldTLSProfile is a TLS security profile based on: https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility",
+}
+
+func (OldTLSProfile) SwaggerDoc() map[string]string {
+ return map_OldTLSProfile
+}
+
+var map_TLSProfileSpec = map[string]string{
+ "": "TLSProfileSpec is the desired behavior of a TLSSecurityProfile.",
+ "ciphers": "ciphers is used to specify the cipher algorithms that are negotiated during the TLS handshake. Operators may remove entries their operands do not support. For example, to use DES-CBC3-SHA (yaml):\n\n ciphers:\n - DES-CBC3-SHA",
+ "minTLSVersion": "minTLSVersion is used to specify the minimal version of the TLS protocol that is negotiated during the TLS handshake. For example, to use TLS versions 1.1, 1.2 and 1.3 (yaml):\n\n minTLSVersion: TLSv1.1\n\nNOTE: currently the highest minTLSVersion allowed is VersionTLS12",
+}
+
+func (TLSProfileSpec) SwaggerDoc() map[string]string {
+ return map_TLSProfileSpec
+}
+
+var map_TLSSecurityProfile = map[string]string{
+ "": "TLSSecurityProfile defines the schema for a TLS security profile. This object is used by operators to apply TLS security settings to operands.",
+ "type": "type is one of Old, Intermediate, Modern or Custom. Custom provides the ability to specify individual TLS security profile parameters. Old, Intermediate and Modern are TLS security profiles based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations\n\nThe profiles are intent based, so they may change over time as new ciphers are developed and existing ciphers are found to be insecure. Depending on precisely which ciphers are available to a process, the list may be reduced.\n\nNote that the Modern profile is currently not supported because it is not yet well adopted by common software libraries.",
+ "old": "old is a TLS security profile based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility\n\nand looks like this (yaml):\n\n ciphers:\n - TLS_AES_128_GCM_SHA256\n - TLS_AES_256_GCM_SHA384\n - TLS_CHACHA20_POLY1305_SHA256\n - ECDHE-ECDSA-AES128-GCM-SHA256\n - ECDHE-RSA-AES128-GCM-SHA256\n - ECDHE-ECDSA-AES256-GCM-SHA384\n - ECDHE-RSA-AES256-GCM-SHA384\n - ECDHE-ECDSA-CHACHA20-POLY1305\n - ECDHE-RSA-CHACHA20-POLY1305\n - DHE-RSA-AES128-GCM-SHA256\n - DHE-RSA-AES256-GCM-SHA384\n - DHE-RSA-CHACHA20-POLY1305\n - ECDHE-ECDSA-AES128-SHA256\n - ECDHE-RSA-AES128-SHA256\n - ECDHE-ECDSA-AES128-SHA\n - ECDHE-RSA-AES128-SHA\n - ECDHE-ECDSA-AES256-SHA384\n - ECDHE-RSA-AES256-SHA384\n - ECDHE-ECDSA-AES256-SHA\n - ECDHE-RSA-AES256-SHA\n - DHE-RSA-AES128-SHA256\n - DHE-RSA-AES256-SHA256\n - AES128-GCM-SHA256\n - AES256-GCM-SHA384\n - AES128-SHA256\n - AES256-SHA256\n - AES128-SHA\n - AES256-SHA\n - DES-CBC3-SHA\n minTLSVersion: TLSv1.0",
+ "intermediate": "intermediate is a TLS security profile based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29\n\nand looks like this (yaml):\n\n ciphers:\n - TLS_AES_128_GCM_SHA256\n - TLS_AES_256_GCM_SHA384\n - TLS_CHACHA20_POLY1305_SHA256\n - ECDHE-ECDSA-AES128-GCM-SHA256\n - ECDHE-RSA-AES128-GCM-SHA256\n - ECDHE-ECDSA-AES256-GCM-SHA384\n - ECDHE-RSA-AES256-GCM-SHA384\n - ECDHE-ECDSA-CHACHA20-POLY1305\n - ECDHE-RSA-CHACHA20-POLY1305\n - DHE-RSA-AES128-GCM-SHA256\n - DHE-RSA-AES256-GCM-SHA384\n minTLSVersion: TLSv1.2",
+ "modern": "modern is a TLS security profile based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility\n\nand looks like this (yaml):\n\n ciphers:\n - TLS_AES_128_GCM_SHA256\n - TLS_AES_256_GCM_SHA384\n - TLS_CHACHA20_POLY1305_SHA256\n minTLSVersion: TLSv1.3\n\nNOTE: Currently unsupported.",
+ "custom": "custom is a user-defined TLS security profile. Be extremely careful using a custom profile as invalid configurations can be catastrophic. An example custom profile looks like this:\n\n ciphers:\n - ECDHE-ECDSA-CHACHA20-POLY1305\n - ECDHE-RSA-CHACHA20-POLY1305\n - ECDHE-RSA-AES128-GCM-SHA256\n - ECDHE-ECDSA-AES128-GCM-SHA256\n minTLSVersion: TLSv1.1",
+}
+
+func (TLSSecurityProfile) SwaggerDoc() map[string]string {
+ return map_TLSSecurityProfile
+}
+
// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/modules.txt b/vendor/modules.txt
index fa01fbeec..8fa756fbb 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -78,6 +78,8 @@ github.com/containers/buildah/util
# github.com/containers/common v0.0.7
github.com/containers/common/pkg/cgroups
github.com/containers/common/pkg/unshare
+# github.com/containers/conmon v2.0.9+incompatible
+github.com/containers/conmon/runner/config
# github.com/containers/image/v5 v5.1.0
github.com/containers/image/v5/copy
github.com/containers/image/v5/directory
@@ -406,7 +408,7 @@ github.com/opencontainers/runtime-tools/validate
# github.com/opencontainers/selinux v1.3.0
github.com/opencontainers/selinux/go-selinux
github.com/opencontainers/selinux/go-selinux/label
-# github.com/openshift/api v3.9.1-0.20190810003144-27fb16909b15+incompatible
+# github.com/openshift/api v0.0.0-20200106203948-7ab22a2c8316
github.com/openshift/api/config/v1
# github.com/openshift/imagebuilder v1.1.1
github.com/openshift/imagebuilder